You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by je...@apache.org on 2016/02/11 16:26:25 UTC

[01/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Repository: incubator-geode
Updated Branches:
  refs/heads/feature/GEODE-17 3bf38a030 -> c39f8a5f1


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentWANPropogation_1_DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentWANPropogation_1_DUnitTest.java b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentWANPropogation_1_DUnitTest.java
index 804c3ed..c6123be 100644
--- a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentWANPropogation_1_DUnitTest.java
+++ b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentWANPropogation_1_DUnitTest.java
@@ -24,6 +24,8 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.wan.BatchException70;
 import com.gemstone.gemfire.internal.cache.wan.WANTestBase;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 
 /**
  * All the test cases are similar to SerialWANPropogationDUnitTest except that
@@ -69,27 +71,27 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm4.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     
-    addExpectedException(BatchException70.class.getName());
-    addExpectedException(ServerOperationException.class.getName());
+    IgnoredException.addIgnoredException(BatchException70.class.getName());
+    IgnoredException.addIgnoredException(ServerOperationException.class.getName());
 
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
       1000 });
     
     vm2.invoke(WANTestBase.class, "createCache", new Object[] { nyPort });
     vm3.invoke(WANTestBase.class, "createCache", new Object[] { nyPort });
 
     vm2.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-      testName + "_RR", null, isOffHeap() });
+      getTestMethodName() + "_RR", null, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-      testName + "_RR", null, isOffHeap() });
+      getTestMethodName() + "_RR", null, isOffHeap() });
   
     vm2.invoke(WANTestBase.class, "createReceiver2",
         new Object[] {nyPort });
@@ -97,11 +99,11 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
         new Object[] {nyPort });
     
     vm4.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_RR", 1000 });
+        getTestMethodName() + "_RR", 1000 });
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_RR", 1000 });
+        getTestMethodName() + "_RR", 1000 });
     vm3.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_RR", 1000 });
+        getTestMethodName() + "_RR", 1000 });
   }
   
   public void testReplicatedSerialPropagation() throws Exception {
@@ -124,36 +126,36 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
         false, 100, 10, false, false, null, true, 5, OrderPolicy.THREAD });
 
     vm2.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", null, isOffHeap() });
+        getTestMethodName() + "_RR", null, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", null, isOffHeap() });
+        getTestMethodName() + "_RR", null, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm4.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
 
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
 
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_RR", 1000 });
+        getTestMethodName() + "_RR", 1000 });
     vm3.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_RR", 1000 });
+        getTestMethodName() + "_RR", 1000 });
   }
   
   
   public void testReplicatedSerialPropagationWithLocalSiteClosedAndRebuilt() throws Exception {
-    addExpectedException("Broken pipe");
-    addExpectedException("Connection reset");
-    addExpectedException("Unexpected IOException");
+    IgnoredException.addIgnoredException("Broken pipe");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("Unexpected IOException");
     Integer lnPort = (Integer)vm0.invoke(WANTestBase.class,
         "createFirstLocatorWithDSId", new Object[] { 1 });
     Integer nyPort = (Integer)vm1.invoke(WANTestBase.class,
@@ -173,23 +175,23 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
         false, 100, 10, false, false, null, true, 5, OrderPolicy.THREAD });
 
     vm2.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", null, isOffHeap() });
+        getTestMethodName() + "_RR", null, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", null, isOffHeap() });
+        getTestMethodName() + "_RR", null, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm4.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
     
     //---------close local site and build again-----------------------------------------
@@ -199,8 +201,8 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "killSender", new Object[] { });
     
     Integer regionSize = 
-      (Integer) vm2.invoke(WANTestBase.class, "getRegionSize", new Object[] {testName + "_RR" });
-    getLogWriter().info("Region size on remote is: " + regionSize);
+      (Integer) vm2.invoke(WANTestBase.class, "getRegionSize", new Object[] {getTestMethodName() + "_RR" });
+    LogWriterUtils.getLogWriter().info("Region size on remote is: " + regionSize);
     
     vm4.invoke(WANTestBase.class, "createCache", new Object[] { lnPort });
     vm5.invoke(WANTestBase.class, "createCache", new Object[] { lnPort });
@@ -216,13 +218,13 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
     vm5.invoke(WANTestBase.class, "setRemoveFromQueueOnException", new Object[] { "ln", true });
     
     vm4.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-      testName + "_RR", "ln", isOffHeap() });
+      getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-      testName + "_RR", "ln", isOffHeap() });
+      getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-      testName + "_RR", "ln", isOffHeap() });
+      getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-      testName + "_RR", "ln", isOffHeap() });
+      getTestMethodName() + "_RR", "ln", isOffHeap() });
     
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -230,19 +232,19 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
     vm4.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     
-    addExpectedException(EntryExistsException.class.getName());
-    addExpectedException(BatchException70.class.getName());
-    addExpectedException(ServerOperationException.class.getName());
+    IgnoredException.addIgnoredException(EntryExistsException.class.getName());
+    IgnoredException.addIgnoredException(BatchException70.class.getName());
+    IgnoredException.addIgnoredException(ServerOperationException.class.getName());
     
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
       1000 });
     //----------------------------------------------------------------------------------
 
     //verify remote site receives all the events
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_RR", 1000 });
+        getTestMethodName() + "_RR", 1000 });
     vm3.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_RR", 1000 });
+        getTestMethodName() + "_RR", 1000 });
   }
   
   /**
@@ -276,15 +278,15 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
 
     //create one RR (RR_1) on remote site
     vm2.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", null, isOffHeap() });
+        getTestMethodName() + "_RR_1", null, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", null, isOffHeap() });
+        getTestMethodName() + "_RR_1", null, isOffHeap() });
 
     //create another RR (RR_2) on remote site
     vm2.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", null, isOffHeap() });
+        getTestMethodName() + "_RR_2", null, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", null, isOffHeap() });
+        getTestMethodName() + "_RR_2", null, isOffHeap() });
     
     //start the senders on local site
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -292,30 +294,30 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
 
     //create one RR (RR_1) on local site
     vm4.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm5.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm6.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm7.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
 
     //create another RR (RR_2) on local site
     vm4.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm5.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm6.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm7.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     
     //start puts in RR_1 in another thread
-    AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { testName + "_RR_1", 1000 });
+    AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR_1", 1000 });
     //do puts in RR_2 in main thread
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_RR_2", 500 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR_2", 500 });
     //destroy RR_2 after above puts are complete
-    vm4.invoke(WANTestBase.class, "destroyRegion", new Object[] { testName + "_RR_2"});
+    vm4.invoke(WANTestBase.class, "destroyRegion", new Object[] { getTestMethodName() + "_RR_2"});
     
     try {
       inv1.join();
@@ -327,9 +329,9 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
     Thread.sleep(20);
     //vm4.invoke(WANTestBase.class, "verifyQueueSize", new Object[] { "ln", 0 });
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_RR_1", 1000 });
+        getTestMethodName() + "_RR_1", 1000 });
     vm3.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_RR_2", 500 });
+        getTestMethodName() + "_RR_2", 500 });
   }
 
   /**
@@ -363,9 +365,9 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
 
     //create one RR (RR_1) on remote site
     vm2.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", null, isOffHeap() });
+        getTestMethodName() + "_RR_1", null, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", null, isOffHeap() });
+        getTestMethodName() + "_RR_1", null, isOffHeap() });
 
     //start the senders on local site
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -373,21 +375,21 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
 
     //create one RR (RR_1) on local site
     vm4.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm5.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm6.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm7.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
 
-    addExpectedException(BatchException70.class.getName());
-    addExpectedException(ServerOperationException.class.getName());
+    IgnoredException.addIgnoredException(BatchException70.class.getName());
+    IgnoredException.addIgnoredException(ServerOperationException.class.getName());
     
     //start puts in RR_1 in another thread
-    AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { testName + "_RR_1", 10000 });
+    AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR_1", 10000 });
     //destroy RR_1 in remote site
-    vm2.invoke(WANTestBase.class, "destroyRegion", new Object[] { testName + "_RR_1"});
+    vm2.invoke(WANTestBase.class, "destroyRegion", new Object[] { getTestMethodName() + "_RR_1"});
     
     try {
       inv1.join();
@@ -398,7 +400,7 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
 
     //verify that all is well in local site. All the events should be present in local region
     vm4.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_RR_1", 10000 });
+        getTestMethodName() + "_RR_1", 10000 });
     //assuming some events might have been dispatched before the remote region was destroyed,
     //sender's region queue will have events less than 1000 but the queue will not be empty.
     //NOTE: this much verification might be sufficient in DUnit. Hydra will take care of 
@@ -437,15 +439,15 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
 
     //create one RR (RR_1) on remote site
     vm2.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", null, isOffHeap() });
+        getTestMethodName() + "_RR_1", null, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", null, isOffHeap() });
+        getTestMethodName() + "_RR_1", null, isOffHeap() });
 
     //create another RR (RR_2) on remote site
     vm2.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", null, isOffHeap() });
+        getTestMethodName() + "_RR_2", null, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", null, isOffHeap() });
+        getTestMethodName() + "_RR_2", null, isOffHeap() });
     
     //start the senders on local site
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -453,35 +455,35 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
 
     //create one RR (RR_1) on local site
     vm4.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm5.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm6.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm7.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
 
     //create another RR (RR_2) on local site
     vm4.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm5.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm6.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm7.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     //destroy RR_2 on remote site in the middle
-    vm2.invoke(WANTestBase.class, "destroyRegion", new Object[] { testName + "_RR_2"});
+    vm2.invoke(WANTestBase.class, "destroyRegion", new Object[] { getTestMethodName() + "_RR_2"});
     
     //expected exceptions in the logs
-    addExpectedException(BatchException70.class.getName());
-    addExpectedException(ServerOperationException.class.getName());
+    IgnoredException.addIgnoredException(BatchException70.class.getName());
+    IgnoredException.addIgnoredException(ServerOperationException.class.getName());
     
     //start puts in RR_2 in another thread
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_RR_2", 1000 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR_2", 1000 });
     
     //start puts in RR_1 in another thread
-    AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { testName + "_RR_1", 1000 });
+    AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR_1", 1000 });
    
     try {
       inv1.join();
@@ -492,7 +494,7 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
     //though region RR_2 is destroyed, RR_1 should still get all the events put in it 
     //in local site
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_RR_1", 1000 });
+        getTestMethodName() + "_RR_1", 1000 });
 
   }
 
@@ -520,15 +522,15 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
 
     // create one RR (RR_1) on remote site
     vm2.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", null, isOffHeap() });
+        getTestMethodName() + "_RR_1", null, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", null, isOffHeap() });
+        getTestMethodName() + "_RR_1", null, isOffHeap() });
 
     // create another RR (RR_2) on remote site
     vm2.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", null, isOffHeap() });
+        getTestMethodName() + "_RR_2", null, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", null, isOffHeap() });
+        getTestMethodName() + "_RR_2", null, isOffHeap() });
 
     // start the senders on local site
     vm4.invoke(WANTestBase.class, "setRemoveFromQueueOnException",
@@ -542,35 +544,35 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
 
     // create one RR (RR_1) on local site
     vm4.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm5.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm6.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm7.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
 
     // create another RR (RR_2) on local site
     vm4.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm5.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm6.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm7.invoke(WANTestBase.class, "createReplicatedRegion", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
 
-    addExpectedException(BatchException70.class.getName());
-    addExpectedException(ServerOperationException.class.getName());
+    IgnoredException.addIgnoredException(BatchException70.class.getName());
+    IgnoredException.addIgnoredException(ServerOperationException.class.getName());
 
     // start puts in RR_1 in another thread
     AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class, "doPuts",
-        new Object[] { testName + "_RR_1", 1000 });
+        new Object[] { getTestMethodName() + "_RR_1", 1000 });
     // start puts in RR_2 in another thread
     AsyncInvocation inv2 = vm4.invokeAsync(WANTestBase.class, "doPuts",
-        new Object[] { testName + "_RR_2", 1000 });
+        new Object[] { getTestMethodName() + "_RR_2", 1000 });
     // destroy RR_2 on remote site in the middle
-    vm2.invoke(WANTestBase.class, "destroyRegion", new Object[] { testName
+    vm2.invoke(WANTestBase.class, "destroyRegion", new Object[] { getTestMethodName()
         + "_RR_2" });
 
     try {
@@ -585,7 +587,7 @@ public class ConcurrentWANPropogation_1_DUnitTest extends WANTestBase {
     // in local site
     try {
       vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-          testName + "_RR_1", 1000 });
+          getTestMethodName() + "_RR_1", 1000 });
     } finally {
       System.setProperty(
           "gemfire.GatewaySender.REMOVE_FROM_QUEUE_ON_EXCEPTION", "False");


[24/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerTransactionsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerTransactionsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerTransactionsDUnitTest.java
index fa38741..236076a 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerTransactionsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerTransactionsDUnitTest.java
@@ -36,9 +36,14 @@ import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests behaviour of transactions in client server model
@@ -111,10 +116,10 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
   {
     Integer port1 = initServerCache(server1);
     client1.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1 });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1 });
     client2.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1 });
-    pause(PAUSE);
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1 });
+    Wait.pause(PAUSE);
 
     server1.invoke(resetFlags());
     client1.invoke(resetFlags());
@@ -122,14 +127,14 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "putInTransaction",
         new Object[] { "server1" });
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     client1.invoke(CacheServerTransactionsDUnitTest.class, "verifyNotUpdated");
     client2.invoke(CacheServerTransactionsDUnitTest.class, "verifyNotUpdated");
 
     server1.invoke(CacheServerTransactionsDUnitTest.class,
         "commitTransactionOnServer1");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "verifyUpdates");
     client1.invoke(CacheServerTransactionsDUnitTest.class, "verifyUpdates");
@@ -148,10 +153,10 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
     Integer port1 = initServerCache(server1);
     Integer port2 = initServerCache(server2);
     client1.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2 });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2 });
     client2.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2 });
-    pause(PAUSE);
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2 });
+    Wait.pause(PAUSE);
 
     server1.invoke(resetFlags());
     server2.invoke(resetFlags());
@@ -160,7 +165,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "putInTransaction",
         new Object[] { "server1" });
-    pause(PAUSE);
+    Wait.pause(PAUSE);
 
     server2.invoke(CacheServerTransactionsDUnitTest.class, "verifyNotUpdated");
     client1.invoke(CacheServerTransactionsDUnitTest.class, "verifyNotUpdated");
@@ -168,7 +173,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
 
     server1.invoke(CacheServerTransactionsDUnitTest.class,
         "commitTransactionOnServer1");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "verifyUpdates");
     server2.invoke(CacheServerTransactionsDUnitTest.class, "verifyUpdates");
@@ -186,10 +191,10 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
     Integer port1 = initServerCache(server1);
     Integer port2 = initServerCache(server2);
     client1.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1 });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1 });
     client2.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port2 });
-    pause(PAUSE);
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port2 });
+    Wait.pause(PAUSE);
 
     server1.invoke(resetFlags());
     server2.invoke(resetFlags());
@@ -198,7 +203,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "putInTransaction",
         new Object[] { "server1" });
-    pause(PAUSE);
+    Wait.pause(PAUSE);
 
     server2.invoke(CacheServerTransactionsDUnitTest.class, "verifyNotUpdated");
     client1.invoke(CacheServerTransactionsDUnitTest.class, "verifyNotUpdated");
@@ -206,7 +211,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
 
     server1.invoke(CacheServerTransactionsDUnitTest.class,
         "commitTransactionOnServer1");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "verifyUpdates");
     server2.invoke(CacheServerTransactionsDUnitTest.class, "verifyUpdates");
@@ -222,10 +227,10 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
   {
     Integer port1 = initServerCache(server1);
     client1.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1 });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1 });
     client2.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1 });
-    pause(PAUSE);
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1 });
+    Wait.pause(PAUSE);
 
     server1.invoke(resetFlags());
     client1.invoke(resetFlags());
@@ -235,7 +240,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
         new Object[] { "server1" });
     server1.invoke(CacheServerTransactionsDUnitTest.class,
         "commitTransactionOnServer1");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "verifyInvalidates");
     client1.invoke(CacheServerTransactionsDUnitTest.class, "verifyInvalidates");
@@ -251,10 +256,10 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
     Integer port1 = initServerCache(server1);
     Integer port2 = initServerCache(server2);
     client1.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2 });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2 });
     client2.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2 });
-    pause(PAUSE);
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2 });
+    Wait.pause(PAUSE);
 
     server1.invoke(resetFlags());
     server2.invoke(resetFlags());
@@ -265,7 +270,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
         new Object[] { "server1" });
     server1.invoke(CacheServerTransactionsDUnitTest.class,
         "commitTransactionOnServer1");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "verifyInvalidates");
     server2.invoke(CacheServerTransactionsDUnitTest.class, "verifyInvalidates");
@@ -283,10 +288,10 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
     Integer port1 = initServerCache(server1);
     Integer port2 = initServerCache(server2);
     client1.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1 });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1 });
     client2.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port2 });
-    pause(PAUSE);
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port2 });
+    Wait.pause(PAUSE);
 
     server1.invoke(resetFlags());
     server2.invoke(resetFlags());
@@ -297,7 +302,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
         new Object[] { "server1" });
     server1.invoke(CacheServerTransactionsDUnitTest.class,
         "commitTransactionOnServer1");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "verifyInvalidates");
     server2.invoke(CacheServerTransactionsDUnitTest.class, "verifyInvalidates");
@@ -315,10 +320,10 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
   {
     Integer port1 = initServerCache(server1);
     client1.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1 });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1 });
     client2.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1 });
-    pause(PAUSE);
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1 });
+    Wait.pause(PAUSE);
 
     server1.invoke(resetFlags());
     client1.invoke(resetFlags());
@@ -328,7 +333,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
         new Object[] { "server1" });
     server1.invoke(CacheServerTransactionsDUnitTest.class,
         "commitTransactionOnServer1");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "verifyDestroys");
     client1.invoke(CacheServerTransactionsDUnitTest.class, "verifyDestroys");
@@ -344,10 +349,10 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
     Integer port1 = initServerCache(server1);
     Integer port2 = initServerCache(server2);
     client1.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2 });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2 });
     client2.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2 });
-    pause(PAUSE);
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2 });
+    Wait.pause(PAUSE);
     
     server1.invoke(resetFlags());
     server2.invoke(resetFlags());
@@ -358,7 +363,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
         new Object[] { "server1" });
     server1.invoke(CacheServerTransactionsDUnitTest.class,
         "commitTransactionOnServer1");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "verifyDestroys");
     server2.invoke(CacheServerTransactionsDUnitTest.class, "verifyDestroys");
@@ -376,10 +381,10 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
     Integer port1 = initServerCache(server1);
     Integer port2 = initServerCache(server2);
     client1.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1 });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1 });
     client2.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port2 });
-    pause(PAUSE);
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port2 });
+    Wait.pause(PAUSE);
     
     server1.invoke(resetFlags());
     server2.invoke(resetFlags());
@@ -390,7 +395,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
         new Object[] { "server1" });
     server1.invoke(CacheServerTransactionsDUnitTest.class,
         "commitTransactionOnServer1");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
 
     server1.invoke(CacheServerTransactionsDUnitTest.class, "verifyDestroys");
     server2.invoke(CacheServerTransactionsDUnitTest.class, "verifyDestroys");
@@ -409,11 +414,11 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
     Integer port2 = ((Integer)server2.invoke(
         CacheServerTransactionsDUnitTest.class, "createServerCache"));
     client1.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port1 });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1 });
     client2.invoke(CacheServerTransactionsDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), port2 });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port2 });
     client1.invoke(CacheServerTransactionsDUnitTest.class, "commitTransactionOnClient");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     server1.invoke(CacheServerTransactionsDUnitTest.class, "verifyUpdatesOnServer");
     server2.invoke(CacheServerTransactionsDUnitTest.class, "verifyUpdatesOnServer");
@@ -455,7 +460,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
     final Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     assertNotNull(r1);
     try {
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "vlaue for the key k1" + r1.getEntry(k1).getValue());
       WaitCriterion ev = new WaitCriterion() {
         public boolean done() {
@@ -466,7 +471,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 120 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 120 * 1000, 200, true);
       
       ev = new WaitCriterion() {
         public boolean done() {
@@ -477,7 +482,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 120 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 120 * 1000, 200, true);
     }
     catch (Exception e) {
       fail("Exception in trying to get due to " + e);
@@ -565,7 +570,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
     final Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     assertNotNull(r1);
     try {
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "vlaue for the key k1" + r1.getEntry(k1).getValue());
       // wait until
       // condition is
@@ -579,7 +584,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 120 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 120 * 1000, 200, true);
 
       ev = new WaitCriterion() {
         public boolean done() {
@@ -590,7 +595,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 120 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 120 * 1000, 200, true);
     }
     catch (Exception e) {
       fail("Exception in trying to get due to " + e);
@@ -613,7 +618,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
               + "; it is still " + r1.getEntry(k1).getValue();
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 120 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 120 * 1000, 200, true);
       
       ev = new WaitCriterion() {
         public boolean done() {
@@ -624,7 +629,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 120 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 120 * 1000, 200, true);
     }
     catch (Exception e) {
       fail("Exception in trying to get due to " + e);
@@ -810,7 +815,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
       assertEquals(r.getEntry(k3).getValue(), k3);
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -826,7 +831,7 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
       r.registerInterest(keys);
     }
     catch (Exception ex) {
-      fail("failed while registering keys(" + keys + ")", ex);
+      Assert.fail("failed while registering keys(" + keys + ")", ex);
     }
   }
 
@@ -838,9 +843,8 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
     }
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     client1.invoke(CacheServerTransactionsDUnitTest.class, "closeCache");
     client2.invoke(CacheServerTransactionsDUnitTest.class, "closeCache");
@@ -848,5 +852,4 @@ public class CacheServerTransactionsDUnitTest extends DistributedTestCase
     server1.invoke(CacheServerTransactionsDUnitTest.class, "closeCache");
     server2.invoke(CacheServerTransactionsDUnitTest.class, "closeCache");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClearPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClearPropagationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClearPropagationDUnitTest.java
index 404d977..15beecd 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClearPropagationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClearPropagationDUnitTest.java
@@ -36,8 +36,11 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheObserverAdapter;
 import com.gemstone.gemfire.internal.cache.CacheObserverHolder;
 import com.gemstone.gemfire.internal.cache.EventID;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.cache.client.*;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
@@ -103,9 +106,9 @@ public class ClearPropagationDUnitTest extends DistributedTestCase
         "createServerCache")).intValue();
 
     client1.invoke(ClearPropagationDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), new Integer(PORT1), new Integer(PORT2) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1), new Integer(PORT2) });
     client2.invoke(ClearPropagationDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), new Integer(PORT1), new Integer(PORT2) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1), new Integer(PORT2) });
 
     CacheObserverHolder.setInstance(new CacheObserverAdapter());
 
@@ -160,7 +163,7 @@ public class ClearPropagationDUnitTest extends DistributedTestCase
 
     client1.invoke(ClearPropagationDUnitTest.class,
         "acquireConnectionsAndClear",
-        new Object[] { getServerHostName(client1.getHost())});
+        new Object[] { NetworkUtils.getServerHostName(client1.getHost())});
 
     client1.invoke(checkSizeRegion(2, false/*Do not Block*/));
     client2.invoke(checkSizeRegion(0, true /* block*/));
@@ -211,7 +214,7 @@ public class ClearPropagationDUnitTest extends DistributedTestCase
 
     client1.invoke(ClearPropagationDUnitTest.class,
       "acquireConnectionsAndDestroyRegion",
-      new Object[] { getServerHostName(client1.getHost())});
+      new Object[] { NetworkUtils.getServerHostName(client1.getHost())});
 
     client1.invoke(checkSizeRegion(2, false/*Do not Block*/));
     client2.invoke(checkDestroyRegion(true /* block*/));
@@ -263,7 +266,7 @@ public class ClearPropagationDUnitTest extends DistributedTestCase
       {
         Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
         assertNotNull(region);
-        getLogWriter().info("Size of the region " + region.size());
+        LogWriterUtils.getLogWriter().info("Size of the region " + region.size());
 
         if (toBlock) {
           synchronized (ClearPropagationDUnitTest.class) {
@@ -348,7 +351,7 @@ public class ClearPropagationDUnitTest extends DistributedTestCase
       assertEquals(r1.getEntry("key2").getValue(), "key-2");
     }
     catch (Exception ex) {
-      fail("failed while createEntriesK1andK2()", ex);
+      Assert.fail("failed while createEntriesK1andK2()", ex);
     }
   }
 
@@ -431,7 +434,7 @@ public class ClearPropagationDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while registering interest", ex);
+      Assert.fail("failed while registering interest", ex);
     }
   }
 
@@ -445,7 +448,7 @@ public class ClearPropagationDUnitTest extends DistributedTestCase
       assertEquals("key-2", r.getEntry("key2").getValue());
     }
     catch (Exception ex) {
-      fail("failed while verifyNoUpdates()", ex);
+      Assert.fail("failed while verifyNoUpdates()", ex);
     }
   }
 
@@ -461,7 +464,7 @@ public class ClearPropagationDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while region", ex);
+      Assert.fail("failed while region", ex);
     }
   }
 
@@ -473,14 +476,13 @@ public class ClearPropagationDUnitTest extends DistributedTestCase
     }
   }
 
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     //close client
     client1.invoke(ClearPropagationDUnitTest.class, "closeCache");
     client2.invoke(ClearPropagationDUnitTest.class, "closeCache");
     //close server
     server1.invoke(ClearPropagationDUnitTest.class, "closeCache");
     server2.invoke(ClearPropagationDUnitTest.class, "closeCache");
-
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientConflationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientConflationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientConflationDUnitTest.java
index 3673228..3a7c246 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientConflationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientConflationDUnitTest.java
@@ -39,9 +39,14 @@ import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 
 /**
@@ -105,7 +110,7 @@ public class ClientConflationDUnitTest extends DistributedTestCase
       performSteps(DistributionConfig.CLIENT_CONFLATION_PROP_VALUE_DEFAULT);
     }
     catch( Exception e ) {
-      fail("testConflationDefault failed due to exception", e);
+      Assert.fail("testConflationDefault failed due to exception", e);
     }
   }
   
@@ -114,7 +119,7 @@ public class ClientConflationDUnitTest extends DistributedTestCase
       performSteps(DistributionConfig.CLIENT_CONFLATION_PROP_VALUE_ON);
     }
     catch( Exception e ) {
-      fail("testConflationOn failed due to exception", e);
+      Assert.fail("testConflationOn failed due to exception", e);
     }
   }
   
@@ -123,13 +128,13 @@ public class ClientConflationDUnitTest extends DistributedTestCase
       performSteps(DistributionConfig.CLIENT_CONFLATION_PROP_VALUE_OFF);
     }
     catch( Exception e ) {
-      fail("testConflationOff failed due to exception", e);
+      Assert.fail("testConflationOff failed due to exception", e);
     }
   }
   
   private void performSteps(String conflation) throws Exception {
-    createClientCacheFeeder(getServerHostName(Host.getHost(0)), new Integer(PORT));
-    vm1.invoke(ClientConflationDUnitTest.class, "createClientCache", new Object[] { getServerHostName(vm1.getHost()), new Integer(PORT),
+    createClientCacheFeeder(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT));
+    vm1.invoke(ClientConflationDUnitTest.class, "createClientCache", new Object[] { NetworkUtils.getServerHostName(vm1.getHost()), new Integer(PORT),
       conflation});
     vm1.invoke(ClientConflationDUnitTest.class, "setClientServerObserverForBeforeInterestRecovery");
     vm1.invoke(ClientConflationDUnitTest.class, "setAllCountersZero");
@@ -334,7 +339,7 @@ public class ClientConflationDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     
     final int u1 = update1;
     ev = new WaitCriterion() {
@@ -346,7 +351,7 @@ public class ClientConflationDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     
     ev = new WaitCriterion() {
       public boolean done() {
@@ -357,7 +362,7 @@ public class ClientConflationDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     
     final int u2 = update2;
     ev = new WaitCriterion() {
@@ -369,7 +374,7 @@ public class ClientConflationDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
   }
 
   /**
@@ -500,7 +505,7 @@ public class ClientConflationDUnitTest extends DistributedTestCase
   public static void putEntries()
   {
     try {
-      getLogWriter().info("Putting entries...");
+      LogWriterUtils.getLogWriter().info("Putting entries...");
       Region r1 = cacheFeeder.getRegion(Region.SEPARATOR +REGION_NAME1);
       Region r2 = cacheFeeder.getRegion(Region.SEPARATOR +REGION_NAME2);
       r1.put("key-1", "11");
@@ -516,15 +521,15 @@ public class ClientConflationDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.put()", ex);
+      Assert.fail("failed while region.put()", ex);
     }
   }
 
   /**
    * close the cache in tearDown
    */
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     // close client
     closeCacheFeeder();
     vm1.invoke(ClientConflationDUnitTest.class, "closeCacheClient");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientHealthMonitorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientHealthMonitorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientHealthMonitorJUnitTest.java
index 0a8a45a..1a6b038 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientHealthMonitorJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientHealthMonitorJUnitTest.java
@@ -41,8 +41,8 @@ import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.EventID;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -176,7 +176,7 @@ public class ClientHealthMonitorJUnitTest
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 20 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 20 * 1000, 200, true);
     
     assertEquals(1, s.getInt("currentClients"));
     assertEquals(1, s.getInt("currentClientConnections"));
@@ -193,7 +193,7 @@ public class ClientHealthMonitorJUnitTest
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, TIME_BETWEEN_PINGS * 5, 200, true);
+    Wait.waitForCriterion(ev, TIME_BETWEEN_PINGS * 5, 200, true);
 
     {
       this.system.getLogWriter().info("currentClients="

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientInterestNotifyDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientInterestNotifyDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientInterestNotifyDUnitTest.java
index 2153746..22f36cd 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientInterestNotifyDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientInterestNotifyDUnitTest.java
@@ -39,9 +39,14 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This test verifies the per-client notify-by-subscription (NBS) override
@@ -170,7 +175,7 @@ public class ClientInterestNotifyDUnitTest extends DistributedTestCase
       performSteps();
     }
     catch( Exception e ) {
-      fail("testInterestNotify failed due to exception", e);
+      Assert.fail("testInterestNotify failed due to exception", e);
     }
   }
   
@@ -181,14 +186,14 @@ public class ClientInterestNotifyDUnitTest extends DistributedTestCase
     
     // Create a feeder.
     vm0.invoke(ClientInterestNotifyDUnitTest.class, "createClientCacheFeeder",
-        new Object[] {getServerHostName(Host.getHost(0)), new Integer(PORT)});
+        new Object[] {NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT)});
     
     // Client 1 overrides NBS to true.
     // Client 2 "overrides" NSB to false.
     // Client 3 uses the default NBS which is false on the server.
     
     vm1.invoke(ClientInterestNotifyDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(Host.getHost(0)), new Integer(PORT), "ClientOn"});
+        new Object[] { NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT), "ClientOn"});
     /*
     vm2.invoke(ClientInterestNotifyDUnitTest.class, "createClientCache",
         new Object[] { getServerHostName(Host.getHost(0)), new Integer(PORT), 
@@ -460,7 +465,7 @@ public class ClientInterestNotifyDUnitTest extends DistributedTestCase
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
   }
   
   /**
@@ -567,7 +572,7 @@ public class ClientInterestNotifyDUnitTest extends DistributedTestCase
   public static void doEntryOps()
   {
     try {
-      getLogWriter().info("Putting entries...");
+      LogWriterUtils.getLogWriter().info("Putting entries...");
       Cache cacheClient = GemFireCacheImpl.getInstance();
       Region r1 = cacheClient.getRegion(Region.SEPARATOR +REGION_NAME1);
       Region r2 = cacheClient.getRegion(Region.SEPARATOR +REGION_NAME2);
@@ -587,7 +592,7 @@ public class ClientInterestNotifyDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region doing ops", ex);
+      Assert.fail("failed while region doing ops", ex);
     }
   }
   
@@ -597,7 +602,7 @@ public class ClientInterestNotifyDUnitTest extends DistributedTestCase
   public static void doFeed()
   {
     try {
-      getLogWriter().info("Putting entries...");
+      LogWriterUtils.getLogWriter().info("Putting entries...");
       Cache cacheClient = GemFireCacheImpl.getInstance();
       Region r1 = cacheClient.getRegion(Region.SEPARATOR +REGION_NAME1);
       Region r2 = cacheClient.getRegion(Region.SEPARATOR +REGION_NAME2);
@@ -608,7 +613,7 @@ public class ClientInterestNotifyDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region doing ops", ex);
+      Assert.fail("failed while region doing ops", ex);
     }
   }
 
@@ -618,22 +623,22 @@ public class ClientInterestNotifyDUnitTest extends DistributedTestCase
   public static void getEntries()
   {
     try {
-      getLogWriter().info("Getting entries...");
+      LogWriterUtils.getLogWriter().info("Getting entries...");
       Cache cacheClient = GemFireCacheImpl.getInstance();
       Region r3 = cacheClient.getRegion(Region.SEPARATOR +REGION_NAME3);
       r3.get("key-1");
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region doing ops", ex);
+      Assert.fail("failed while region doing ops", ex);
     }
   }
   
   /**
    * close the caches in tearDown
    */
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     vm0.invoke(ClientInterestNotifyDUnitTest.class, "closeCache");
     vm1.invoke(ClientInterestNotifyDUnitTest.class, "closeCache");
     /*

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
index 8d3b9ab..0f40428 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientServerMiscDUnitTest.java
@@ -41,11 +41,16 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import java.util.Iterator;
 import java.util.Properties;
@@ -142,13 +147,13 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
   public void testConcurrentOperationsWithDRandPR() throws Exception {
     int port1 = initServerCache(true); // vm0
     int port2 = initServerCache2(true); // vm1
-    String serverName = getServerHostName(Host.getHost(0));
+    String serverName = NetworkUtils.getServerHostName(Host.getHost(0));
     host.getVM(2).invoke(this.getClass(), "createClientCacheV", new Object[]{serverName, port1});
     host.getVM(3).invoke(this.getClass(), "createClientCacheV", new Object[]{serverName, port2});
-    getLogWriter().info("Testing concurrent map operations from a client with a distributed region");
+    LogWriterUtils.getLogWriter().info("Testing concurrent map operations from a client with a distributed region");
     concurrentMapTest(host.getVM(2), "/" + REGION_NAME1);
     // TODO add verification in vm3
-    getLogWriter().info("Testing concurrent map operations from a client with a partitioned region");
+    LogWriterUtils.getLogWriter().info("Testing concurrent map operations from a client with a partitioned region");
     concurrentMapTest(host.getVM(2), "/" + PR_REGION_NAME);
     // TODO add verification in vm3
   }
@@ -156,13 +161,13 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
   public void testConcurrentOperationsWithDRandPRandEmptyClient() throws Exception {
     int port1 = initServerCache(true); // vm0
     int port2 = initServerCache2(true); // vm1
-    String serverName = getServerHostName(Host.getHost(0));
+    String serverName = NetworkUtils.getServerHostName(Host.getHost(0));
     host.getVM(2).invoke(this.getClass(), "createEmptyClientCache", new Object[]{serverName, port1});
     host.getVM(3).invoke(this.getClass(), "createClientCacheV", new Object[]{serverName, port2});
-    getLogWriter().info("Testing concurrent map operations from a client with a distributed region");
+    LogWriterUtils.getLogWriter().info("Testing concurrent map operations from a client with a distributed region");
     concurrentMapTest(host.getVM(2), "/" + REGION_NAME1);
     // TODO add verification in vm3
-    getLogWriter().info("Testing concurrent map operations from a client with a partitioned region");
+    LogWriterUtils.getLogWriter().info("Testing concurrent map operations from a client with a partitioned region");
     concurrentMapTest(host.getVM(2), "/" + PR_REGION_NAME);
     // TODO add verification in vm3
   }
@@ -374,7 +379,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
   {
     // start server first
     PORT1 = initServerCache(true);
-    createClientCache(getServerHostName(Host.getHost(0)), PORT1);
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1);
     populateCache();
     registerInterest();
     server1.invoke(ClientServerMiscDUnitTest.class, "put");
@@ -407,11 +412,11 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
   {
     // start server first
     PORT1 = initServerCache(true);
-    createClientCache(getServerHostName(Host.getHost(0)), PORT1);
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1);
     populateCache();
     registerInterestInBothTheRegions();
     closeRegion1();
-    pause(6000);
+    Wait.pause(6000);
     server1.invoke(ClientServerMiscDUnitTest.class,
         "verifyInterestListOnServer");
     server1.invoke(ClientServerMiscDUnitTest.class, "put");
@@ -430,7 +435,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
   {
     // start server first
     PORT1 = initServerCache(true);
-    pool = (PoolImpl)createClientCache(getServerHostName(Host.getHost(0)),PORT1);
+    pool = (PoolImpl)createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)),PORT1);
     populateCache();
     registerInterestInBothTheRegions();
     closeBothRegions();
@@ -457,7 +462,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
   public void testCCPDestroyOnLastDestroyRegion() throws Exception
   {
     PORT1 = initServerCache(true);
-    PoolImpl pool = (PoolImpl)createClientCache(getServerHostName(Host.getHost(0)),PORT1);
+    PoolImpl pool = (PoolImpl)createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)),PORT1);
     destroyRegion1();
     // pause(5000);
     server1.invoke(ClientServerMiscDUnitTest.class,
@@ -494,7 +499,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
   {
     // start server first
     PORT1 = initServerCache(false);
-    createClientCache(getServerHostName(Host.getHost(0)), PORT1);
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1);
     registerInterestForInvalidatesInBothTheRegions();
     populateCache();
     server1.invoke(ClientServerMiscDUnitTest.class, "put");
@@ -515,7 +520,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
   {
     // start server first
     PORT1 = initServerCache(false);
-    createClientCache(getServerHostName(Host.getHost(0)), PORT1);
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1);
     registerInterestForInvalidatesInBothTheRegions();
     Region region = static_cache.getRegion(REGION_NAME1);
     populateCache();
@@ -561,7 +566,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "");
     new ClientServerMiscDUnitTest("temp").createCache(props);
-    String host = getServerHostName(server1.getHost());
+    String host = NetworkUtils.getServerHostName(server1.getHost());
     PoolImpl p = (PoolImpl)PoolManager.createFactory()
       .addServer(host, PORT1)
       .setSubscriptionEnabled(true)
@@ -601,7 +606,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
     
     // assertEquals(region1.getEntry(k1).getValue(), k1);
     wc = new WaitCriterion() {
@@ -614,7 +619,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
     
     wc = new WaitCriterion() {
       String excuse;
@@ -626,7 +631,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
     
     // assertEquals(region1.getEntry(k2).getValue(), k2);
     // assertEquals(region2.getEntry(k1).getValue(), k1);
@@ -640,7 +645,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
     
     // assertEquals(region2.getEntry(k2).getValue(), k2);
   }
@@ -664,7 +669,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
     ds.disconnect();
     ds = getSystem(props);
     PORT1 = initServerCache(true);
-    String host = getServerHostName(server1.getHost());
+    String host = NetworkUtils.getServerHostName(server1.getHost());
     Pool p = PoolManager.createFactory()
       .addServer(host, PORT1)
       .setSubscriptionEnabled(true)
@@ -686,7 +691,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
     assertNotNull(region2);
     //region1.registerInterest(CacheClientProxy.ALL_KEYS);
     region2.registerInterest("ALL_KEYS");
-    pause(6000);
+    Wait.pause(6000);
     server1.invoke(ClientServerMiscDUnitTest.class,
         "verifyInterestListOnServer");
 
@@ -707,7 +712,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
   public void testBug35380() throws Exception
   {
     //work around GEODE-477
-    addExpectedException("Connection reset");
+    IgnoredException.addIgnoredException("Connection reset");
     Properties props = new Properties();
     props.setProperty("mcast-port", "0");
     props.setProperty("locators", "");
@@ -715,7 +720,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
     assertNotNull(ds);
     
     PORT1 = initServerCache(true);
-    String host = getServerHostName(server1.getHost());
+    String host = NetworkUtils.getServerHostName(server1.getHost());
     Pool p = PoolManager.createFactory()
       .addServer(host, PORT1)
       .setSubscriptionEnabled(true)
@@ -849,7 +854,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
     
     return p;
   }
@@ -900,7 +905,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
     }
     catch (CacheWriterException e) {
       e.printStackTrace();
-      fail("Test failed due to CacheWriterException during registerInterest", e);
+      Assert.fail("Test failed due to CacheWriterException during registerInterest", e);
     }
   }
 
@@ -917,7 +922,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
     }
     catch (CacheWriterException e) {
       e.printStackTrace();
-      fail(
+      Assert.fail(
           "Test failed due to CacheWriterException during registerInterestnBothRegions",
           e);
     }
@@ -936,7 +941,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
     }
     catch (CacheWriterException e) {
       e.printStackTrace();
-      fail(
+      Assert.fail(
           "Test failed due to CacheWriterException during registerInterestnBothRegions",
           e);
     }
@@ -952,7 +957,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
     }
     catch (Exception e) {
       e.printStackTrace();
-      fail("Test failed due to Exception during closeRegion1", e);
+      Assert.fail("Test failed due to Exception during closeRegion1", e);
     }
   }
 
@@ -972,7 +977,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
     }
     catch (Exception e) {
       e.printStackTrace();
-      fail("Test failed due to Exception during closeBothRegions", e);
+      Assert.fail("Test failed due to Exception during closeBothRegions", e);
     }
   }
 
@@ -986,7 +991,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
     }
     catch (Exception e) {
       e.printStackTrace();
-      fail("Test failed due to Exception during closeBothRegions", e);
+      Assert.fail("Test failed due to Exception during closeBothRegions", e);
     }
   }
 
@@ -1000,7 +1005,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
     }
     catch (Exception e) {
       e.printStackTrace();
-      fail("Test failed due to Exception during closeBothRegions", e);
+      Assert.fail("Test failed due to Exception during closeBothRegions", e);
     }
   }
 
@@ -1012,7 +1017,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
       r2.destroyRegion();
     } catch (Exception e) {
      // e.printStackTrace();
-      fail("Test failed due to Exception during closeBothRegions", e);
+      Assert.fail("Test failed due to Exception during closeBothRegions", e);
     }
   }
 
@@ -1068,7 +1073,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 40 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 40 * 1000, 1000, true);
     }
     catch (Exception ex) {
       ex.printStackTrace();
@@ -1113,7 +1118,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 40 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 40 * 1000, 1000, true);
     }
     catch (Exception ex) {
       ex.printStackTrace();
@@ -1145,7 +1150,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
       assertEquals(r2.getEntry(k2).getValue(), k2);
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -1170,7 +1175,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
       assertEquals(r2.getEntry(k2).getValue(), server_k2);
     }
     catch (Exception ex) {
-      fail("failed while put()", ex);
+      Assert.fail("failed while put()", ex);
     }
   }
 
@@ -1193,7 +1198,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       // assertEquals(k1, r1.getEntry(k1).getValue());
       wc = new WaitCriterion() {
@@ -1206,7 +1211,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       // assertEquals(k2, r1.getEntry(k2).getValue());
       wc = new WaitCriterion() {
@@ -1219,7 +1224,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       // assertEquals(server_k1, r2.getEntry(k1).getValue());
       wc = new WaitCriterion() {
@@ -1232,12 +1237,12 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       // assertEquals(server_k2, r2.getEntry(k2).getValue());
     }
     catch (Exception ex) {
-      fail("failed while verifyUpdates()", ex);
+      Assert.fail("failed while verifyUpdates()", ex);
     }
   }
 
@@ -1260,7 +1265,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 90 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 90 * 1000, 1000, true);
       
       // assertNull(r1.getEntry(k1).getValue());
       wc = new WaitCriterion() {
@@ -1273,7 +1278,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 90 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 90 * 1000, 1000, true);
       
       // assertNull(r1.getEntry(k2).getValue());
       wc = new WaitCriterion() {
@@ -1286,7 +1291,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 90 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 90 * 1000, 1000, true);
       
 
       // assertNull(r2.getEntry(k1).getValue());
@@ -1300,7 +1305,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 90 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 90 * 1000, 1000, true);
       
       // assertNull(r2.getEntry(k2).getValue());
       wc = new WaitCriterion() {
@@ -1313,10 +1318,10 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 90 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 90 * 1000, 1000, true);
     }
     catch (Exception ex) {
-      fail("failed while verifyInvalidatesOnBothRegions()", ex);
+      Assert.fail("failed while verifyInvalidatesOnBothRegions()", ex);
     }
   }
 
@@ -1336,7 +1341,7 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       // assertEquals(server_k1, r2.getEntry(k1).getValue());
       wc = new WaitCriterion() {
@@ -1349,23 +1354,21 @@ public class ClientServerMiscDUnitTest extends CacheTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       // assertEquals(server_k2, r2.getEntry(k2).getValue());
     }
     catch (Exception ex) {
-      fail("failed while verifyUpdatesOnRegion2()", ex);
+      Assert.fail("failed while verifyUpdatesOnRegion2()", ex);
     }
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     // close the clients first
     closeCache();
     // then close the servers
     server1.invoke(ClientServerMiscDUnitTest.class, "closeCache");
-
   }
 
   public static void closeCache()

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ConflationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ConflationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ConflationDUnitTest.java
index fe5d03f..88bdb20 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ConflationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ConflationDUnitTest.java
@@ -40,9 +40,14 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.HARegion;
 import com.gemstone.gemfire.internal.cache.ha.HAHelper;
 import com.gemstone.gemfire.internal.cache.ha.HARegionQueue;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.cache.client.*;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 
@@ -136,9 +141,9 @@ public class ConflationDUnitTest extends DistributedTestCase
   {
     try {
       vm0.invoke(ConflationDUnitTest.class, "setIsSlowStart");
-      createClientCache1UniqueWriter ( getServerHostName(Host.getHost(0)), new Integer(PORT));
+      createClientCache1UniqueWriter ( NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT));
       vm2.invoke(ConflationDUnitTest.class, "createClientCache2UniqueWriter",
-          new Object[] { getServerHostName(Host.getHost(0)), new Integer(PORT)});
+          new Object[] { NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT)});
       vm2.invoke(ConflationDUnitTest.class, "setClientServerObserverForBeforeInterestRecovery");
       vm2.invoke(ConflationDUnitTest.class, "setAllCountersZero");
       vm2.invoke(ConflationDUnitTest.class, "assertAllCountersZero");
@@ -155,7 +160,7 @@ public class ConflationDUnitTest extends DistributedTestCase
       vm2.invoke(ConflationDUnitTest.class, "assertCounterSizes");
     }
     catch( Exception e ) {
-      fail("Test failed due to exception", e);
+      Assert.fail("Test failed due to exception", e);
     }
   }
 
@@ -167,9 +172,9 @@ public class ConflationDUnitTest extends DistributedTestCase
   {
     try {
       vm0.invoke(ConflationDUnitTest.class, "setIsSlowStart");
-      createClientCache1CommonWriter( getServerHostName(Host.getHost(0)), new Integer(PORT));
+      createClientCache1CommonWriter( NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT));
       vm2.invoke(ConflationDUnitTest.class, "createClientCache2CommonWriter",
-          new Object[] { getServerHostName(Host.getHost(0)), new Integer(PORT)});
+          new Object[] { NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT)});
       vm2.invoke(ConflationDUnitTest.class, "setClientServerObserverForBeforeInterestRecovery");
       vm2.invoke(ConflationDUnitTest.class, "setAllCountersZero");
       vm2.invoke(ConflationDUnitTest.class, "assertAllCountersZero");
@@ -186,7 +191,7 @@ public class ConflationDUnitTest extends DistributedTestCase
       vm2.invoke(ConflationDUnitTest.class, "assertCounterSizes");
     }
     catch( Exception e ) {
-      fail("Test failed due to exception", e);
+      Assert.fail("Test failed due to exception", e);
     }
   }
 
@@ -199,10 +204,10 @@ public class ConflationDUnitTest extends DistributedTestCase
   {
     try {
       vm0.invoke(ConflationDUnitTest.class, "setIsSlowStart");
-      createClientCache1CommonWriterTest3(getServerHostName(Host.getHost(0)), new Integer(PORT));
+      createClientCache1CommonWriterTest3(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT));
       vm2.invoke(ConflationDUnitTest.class,
           "createClientCache2CommonWriterTest3", new Object[] {
-        getServerHostName(Host.getHost(0)), new Integer(PORT) });
+        NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT) });
       vm2.invoke(ConflationDUnitTest.class, "setClientServerObserverForBeforeInterestRecovery");
       vm2.invoke(ConflationDUnitTest.class, "setAllCountersZero");
       vm2.invoke(ConflationDUnitTest.class, "assertAllCountersZero");
@@ -221,7 +226,7 @@ public class ConflationDUnitTest extends DistributedTestCase
       vm0.invoke(ConflationDUnitTest.class, "assertConflationStatus");
     }
     catch (Exception e) {
-      fail("Test failed due to exception", e);
+      Assert.fail("Test failed due to exception", e);
     }
   }
   /**
@@ -301,7 +306,7 @@ public class ConflationDUnitTest extends DistributedTestCase
     factory.setPoolName(createPool(host,"p1", port, true).getName());
     factory.addCacheListener(new CacheListenerAdapter() {
       public void afterCreate(EntryEvent event) {
-        getLogWriter().info("Listener received event " + event);
+        LogWriterUtils.getLogWriter().info("Listener received event " + event);
         String val = (String) event.getNewValue();
         synchronized (ConflationDUnitTest.class) {
           if (val.equals(MARKER)) {
@@ -317,7 +322,7 @@ public class ConflationDUnitTest extends DistributedTestCase
       }
 
       public void afterUpdate(EntryEvent event) {
-        getLogWriter().info("Listener received event " + event);
+        LogWriterUtils.getLogWriter().info("Listener received event " + event);
         synchronized (this) {
           counterUpdate++;
         }
@@ -325,7 +330,7 @@ public class ConflationDUnitTest extends DistributedTestCase
 
       public void afterDestroy(EntryEvent event)
       {
-        getLogWriter().info("Listener received event " + event);
+        LogWriterUtils.getLogWriter().info("Listener received event " + event);
         synchronized (this) {
           if(!event.getKey().equals(MARKER)) {
             counterDestroy++;
@@ -348,7 +353,7 @@ public class ConflationDUnitTest extends DistributedTestCase
     factory.setPoolName(createPool(host,"p1", port, true).getName());
     factory.addCacheListener(new CacheListenerAdapter() {
       public void afterCreate(EntryEvent event) {
-        getLogWriter().info("Listener received event " + event);
+        LogWriterUtils.getLogWriter().info("Listener received event " + event);
         String val = (String)event.getNewValue();
         synchronized (ConflationDUnitTest.class) {
           if (val.equals(MARKER)) {
@@ -364,14 +369,14 @@ public class ConflationDUnitTest extends DistributedTestCase
       }
 
       public void afterUpdate(EntryEvent event) {
-        getLogWriter().info("Listener received event " + event);
+        LogWriterUtils.getLogWriter().info("Listener received event " + event);
         synchronized (this) {
           counterUpdate++;
         }
       }
 
       public void afterDestroy(EntryEvent event) {
-        getLogWriter().info("Listener received event " + event);
+        LogWriterUtils.getLogWriter().info("Listener received event " + event);
         synchronized (this) {
           if (!event.getKey().equals(MARKER)) {
             counterDestroy++;
@@ -421,7 +426,7 @@ public class ConflationDUnitTest extends DistributedTestCase
       public void afterCreate(EntryEvent event)
       {
         String val = (String) event.getNewValue();
-        getLogWriter().info("Listener received event " + event);
+        LogWriterUtils.getLogWriter().info("Listener received event " + event);
         synchronized (ConflationDUnitTest.class) {
           if (val.equals(MARKER)) {
             count++;
@@ -518,7 +523,7 @@ public class ConflationDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
 
     ev = new WaitCriterion() {
       public boolean done() {
@@ -529,7 +534,7 @@ public class ConflationDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     
     ev = new WaitCriterion() {
       public boolean done() {
@@ -540,7 +545,7 @@ public class ConflationDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
   }
 
 
@@ -559,7 +564,7 @@ public class ConflationDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     // assertEquals("creates", 2, counterCreate);
     
     ev = new WaitCriterion() {
@@ -571,7 +576,7 @@ public class ConflationDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     
     // assertEquals("destroys", 2, counterDestroy);
     // assertTrue("updates", 20000 >= counterUpdate);
@@ -584,7 +589,7 @@ public class ConflationDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
   }
 
   public static void waitForMarker()
@@ -729,7 +734,7 @@ public class ConflationDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.create()", ex);
+      Assert.fail("failed while region.create()", ex);
     }
   }
 
@@ -751,7 +756,7 @@ public class ConflationDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.put()", ex);
+      Assert.fail("failed while region.put()", ex);
     }
   }
 
@@ -765,7 +770,7 @@ public class ConflationDUnitTest extends DistributedTestCase
    }
    catch (Exception ex) {
      ex.printStackTrace();
-     fail("failed while region.create() marker", ex);
+     Assert.fail("failed while region.create() marker", ex);
    }
  }
 
@@ -788,7 +793,7 @@ public class ConflationDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.put()", ex);
+      Assert.fail("failed while region.put()", ex);
     }
   }
 
@@ -815,7 +820,7 @@ public class ConflationDUnitTest extends DistributedTestCase
         HARegionQueue haRegionQueue = HAHelper.getRegionQueue(region);
         statMap.put("eventsConflated", new Long(HAHelper.getRegionQueueStats(
             haRegionQueue).getEventsConflated()));
-        getLogWriter().info("new Stats Map  : " + statMap.toString());
+        LogWriterUtils.getLogWriter().info("new Stats Map  : " + statMap.toString());
 
       }
     }
@@ -835,7 +840,7 @@ public class ConflationDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.get()", ex);
+      Assert.fail("failed while region.get()", ex);
     }
   }
 
@@ -903,8 +908,8 @@ public class ConflationDUnitTest extends DistributedTestCase
   /**
    * close the cache in tearDown
    */
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     // close client
     closeCache();
     vm2.invoke(ConflationDUnitTest.class, "closeCache");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ConnectionProxyJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ConnectionProxyJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ConnectionProxyJUnitTest.java
index ae73ed4..4ef6e2f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ConnectionProxyJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ConnectionProxyJUnitTest.java
@@ -54,8 +54,8 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.cache.ha.ThreadIdentifier;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -260,7 +260,7 @@ public class ConnectionProxyJUnitTest
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 90 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 90 * 1000, 200, true);
    }
    finally {
      if (server != null) {
@@ -321,7 +321,7 @@ public class ConnectionProxyJUnitTest
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 90 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 90 * 1000, 200, true);
    }
    finally {
      if (server != null) {
@@ -462,7 +462,7 @@ public class ConnectionProxyJUnitTest
          return null;
        }
      };
-     DistributedTestCase.waitForCriterion(ev, 20 * 1000, 200, true);
+     Wait.waitForCriterion(ev, 20 * 1000, 200, true);
    }
    catch (Exception ex) {
      ex.printStackTrace();
@@ -838,7 +838,7 @@ public class ConnectionProxyJUnitTest
         return "ack flag never became " + expectedAckSend;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, timeToWait, 1000, true);
+    Wait.waitForCriterion(wc, timeToWait, 1000, true);
   }  
   
   private void verifyExpiry(long timeToWait)
@@ -851,7 +851,7 @@ public class ConnectionProxyJUnitTest
         return "Entry never expired";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, timeToWait * 2, 200, true);
+    Wait.waitForCriterion(wc, timeToWait * 2, 200, true);
   }
  
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DataSerializerPropogationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DataSerializerPropogationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DataSerializerPropogationDUnitTest.java
index 85bed81..06abd14 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DataSerializerPropogationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DataSerializerPropogationDUnitTest.java
@@ -47,9 +47,16 @@ import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.EventID;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
+import com.gemstone.gemfire.test.dunit.StoppableWaitCriterion;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
 
@@ -149,7 +156,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDown() throws Exception {
     try {
       // close the clients first
       client1.invoke(DataSerializerPropogationDUnitTest.class, "closeCache");
@@ -159,15 +166,14 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       server1.invoke(DataSerializerPropogationDUnitTest.class, "closeCache");
       server2.invoke(DataSerializerPropogationDUnitTest.class, "closeCache");
 
-      super.tearDown2();
-
       client1 = null;
       client2 = null;
       server1 = null;
 
     }
     finally {
-      unregisterAllDataSerializersFromAllVms();
+      DataSerializerPropogationDUnitTest.successfullyLoadedTestDataSerializer = false;
+      DistributedTestUtils.unregisterAllDataSerializersFromAllVms();
     }
   }
 
@@ -182,7 +188,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
     verifyDataSerializers(numOfDataSerializers, false);
   }
   public static void verifyDataSerializers(final int numOfDataSerializers, final boolean allowNonLocal) {
-    WaitCriterion wc = new WaitCriterion2() {
+    WaitCriterion wc = new StoppableWaitCriterion() {
       String excuse;
 
       private DataSerializer[] getSerializers() {
@@ -206,7 +212,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
         return getSerializers().length > numOfDataSerializers;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
   }
 
   public static final ThreadLocal<Boolean> allowNonLocalTL = new ThreadLocal<Boolean>();
@@ -217,7 +223,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject1.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject1", e);
+      Assert.fail("Test failed due to exception in DSObject1", e);
     }
   }
 
@@ -227,7 +233,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject2.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject2", e);
+      Assert.fail("Test failed due to exception in DSObject2", e);
     }
   }
 
@@ -237,7 +243,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject3.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject2", e);
+      Assert.fail("Test failed due to exception in DSObject2", e);
     }
   }
 
@@ -247,7 +253,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject4.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject4", e);
+      Assert.fail("Test failed due to exception in DSObject4", e);
     }
   }
 
@@ -257,7 +263,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject5.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject5", e);
+      Assert.fail("Test failed due to exception in DSObject5", e);
     }
   }
 
@@ -267,7 +273,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject6.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject6", e);
+      Assert.fail("Test failed due to exception in DSObject6", e);
     }
   }
 
@@ -277,7 +283,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject7.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject7", e);
+      Assert.fail("Test failed due to exception in DSObject7", e);
     }
   }
 
@@ -287,7 +293,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject8.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject8", e);
+      Assert.fail("Test failed due to exception in DSObject8", e);
     }
   }
 
@@ -297,7 +303,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject9.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject9", e);
+      Assert.fail("Test failed due to exception in DSObject9", e);
     }
   }
 
@@ -307,7 +313,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject10.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject10", e);
+      Assert.fail("Test failed due to exception in DSObject10", e);
     }
   }
 
@@ -317,7 +323,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject11.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject11", e);
+      Assert.fail("Test failed due to exception in DSObject11", e);
     }
   }
 
@@ -327,7 +333,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject12.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject12", e);
+      Assert.fail("Test failed due to exception in DSObject12", e);
     }
   }
 
@@ -337,7 +343,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer.register(DSObject13.class, true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObject13", e);
+      Assert.fail("Test failed due to exception in DSObject13", e);
     }
   }
 
@@ -347,7 +353,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       InternalDataSerializer._register(new DSObjectLocalOnly(79), true);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in DSObjectLocalOnly", e);
+      Assert.fail("Test failed due to exception in DSObjectLocalOnly", e);
     }
   }
   
@@ -355,7 +361,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
     try {
       InternalDataSerializer.register(TestDataSerializer.class, true);
     } catch (Exception e) {
-      fail("Test failed due to exception in TestDataSerializer", e);
+      Assert.fail("Test failed due to exception in TestDataSerializer", e);
     }
   }
 
@@ -396,7 +402,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
   public void testServerUpFirstClientLater() throws Exception {
     PORT1 = initServerCache(server1);
 
-    pause(3000);
+    Wait.pause(3000);
 
     server1.invoke(DataSerializerPropogationDUnitTest.class,
         "registerDSObject1");
@@ -408,10 +414,10 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), new Integer(PORT1) });
+            NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
 
     // wait for client2 to come online
-    pause(3000);
+    Wait.pause(3000);
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "verifyDataSerializers", new Object[] { new Integer(2) });
@@ -447,7 +453,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
       }
     });
 
-    pause(3000);
+    Wait.pause(3000);
     // Run getAll
     client1.invoke(new CacheSerializableRunnable("Get entry from client") {
       @Override
@@ -466,17 +472,17 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), new Integer(PORT1) });
+            NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
     client2.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server2.getHost()), new Integer(PORT2) });
+            NetworkUtils.getServerHostName(server2.getHost()), new Integer(PORT2) });
 
     // wait for client2 to come online
-    pause(2000);
+    Wait.pause(2000);
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "registerDSObject3");
-    pause(4000);
+    Wait.pause(4000);
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "verifyDataSerializers", new Object[] { new Integer(1) });
@@ -498,18 +504,18 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), new Integer(PORT1) });
+            NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
     client2.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server2.getHost()), new Integer(PORT2) });
+            NetworkUtils.getServerHostName(server2.getHost()), new Integer(PORT2) });
 
     // wait for client2 to come online
-    pause(2000);
+    Wait.pause(2000);
 
     server1.invoke(DataSerializerPropogationDUnitTest.class,
         "registerDSObjectLocalOnly", new Object[] { });
 
-    pause(4000);
+    Wait.pause(4000);
 
     server1.invoke(DataSerializerPropogationDUnitTest.class,
         "verifyDataSerializers", new Object[] { new Integer(1) });
@@ -530,13 +536,13 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), new Integer(PORT1) });
+            NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
     client2.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server2.getHost()), new Integer(PORT2) });
+            NetworkUtils.getServerHostName(server2.getHost()), new Integer(PORT2) });
 
     // wait for client2 to come online
-    pause(2000);
+    Wait.pause(2000);
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "registerDSObject4");
@@ -553,7 +559,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
         "verifyDataSerializers", new Object[] { Integer.valueOf(1) });
 
     // can get server connectivity exception
-    final ExpectedException expectedEx = addExpectedException(
+    final IgnoredException expectedEx = IgnoredException.addIgnoredException(
         "Server unreachable", client1);
 
     // stop the cache server
@@ -587,20 +593,20 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), new Integer(PORT1) });
+            NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
     client2.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server2.getHost()), new Integer(PORT2) });
+            NetworkUtils.getServerHostName(server2.getHost()), new Integer(PORT2) });
 
     // wait for client2 to come online
-    pause(2000);
+    Wait.pause(2000);
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "registerDSObject10");
 
     server1.invoke(DataSerializerPropogationDUnitTest.class,
         "registerDSObject11");
-    pause(4000);
+    Wait.pause(4000);
 
     server2.invoke(DataSerializerPropogationDUnitTest.class,
         "verifyDataSerializers", new Object[] { new Integer(2) });
@@ -620,10 +626,10 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
     PORT2 = initServerCache(server2);
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), new Integer(PORT1) });
+            NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
     client2.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server2.getHost()), new Integer(PORT2) });
+            NetworkUtils.getServerHostName(server2.getHost()), new Integer(PORT2) });
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "registerDSObject7");
@@ -644,7 +650,7 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
             instanceCountWithOnePut) });
 
     // can get server connectivity exception
-    final ExpectedException expectedEx = addExpectedException(
+    final IgnoredException expectedEx = IgnoredException.addIgnoredException(
         "Server unreachable", client1);
 
     server1.invoke(DataSerializerPropogationDUnitTest.class, "stopServer");
@@ -690,18 +696,18 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), new Integer(PORT1) });
+            NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
     client2.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), new Integer(PORT1) });
-    createClientCache(getServerHostName(server2.getHost()), new Integer(PORT2));
+            NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
+    createClientCache(NetworkUtils.getServerHostName(server2.getHost()), new Integer(PORT2));
 
     // wait for client2 to come online
-    pause(2000);
+    Wait.pause(2000);
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "registerDSObject12");
-    pause(4000);
+    Wait.pause(4000);
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "verifyDataSerializers", new Object[] { new Integer(1) });
@@ -728,19 +734,19 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
     PORT1 = initServerCache(server1, 1);
     PORT2 = initServerCache(server2, 2);
 
-    createClientCache(getServerHostName(server1.getHost()),
+    createClientCache(NetworkUtils.getServerHostName(server1.getHost()),
         new Integer(PORT1));
 
     client2.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server2.getHost()), new Integer(PORT2) });
+            NetworkUtils.getServerHostName(server2.getHost()), new Integer(PORT2) });
     setClientServerObserver1();
     client2
         .invoke(DataSerializerPropogationDUnitTest.class, "setClientServerObserver2");
 
     registerDSObject13();
 
-    pause(10000);
+    Wait.pause(10000);
 
     Boolean pass = (Boolean)client2.invoke(
         DataSerializerPropogationDUnitTest.class, "verifyResult");
@@ -756,17 +762,17 @@ public class DataSerializerPropogationDUnitTest extends DistributedTestCase {
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), new Integer(PORT1) });
+            NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
     client2.invoke(DataSerializerPropogationDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), new Integer(PORT2) });
+            NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT2) });
 
     // wait for client2 to come online
-    pause(2000);
+    Wait.pause(2000);
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "registerTestDataSerializer");
-    pause(4000);
+    Wait.pause(4000);
 
     client1.invoke(DataSerializerPropogationDUnitTest.class,
         "verifyDataSerializers", new Object[] { new Integer(1) });



[07/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsDUnitTest.java
index 8f3c260..a006f46 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsDUnitTest.java
@@ -35,8 +35,12 @@ import com.gemstone.gemfire.cache.query.internal.cq.InternalCqQuery;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * This class tests the ContiunousQuery mechanism in GemFire.
@@ -58,7 +62,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     // avoid IllegalStateException from HandShake by connecting all vms to
     // system before creating pool
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -74,7 +78,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
       final int cqListenerInvocations) {
     vm.invoke(new CacheSerializableRunnable("Validate CQs") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating CQ Stats. ### " + cqName);
+        LogWriterUtils.getLogWriter().info("### Validating CQ Stats. ### " + cqName);
 //      Get CQ Service.
         QueryService qService = null;
         try {          
@@ -157,7 +161,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
       final int clientsWithCqs) {
     vm.invoke(new CacheSerializableRunnable("Validate CQ Service Stats") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating CQ Service Stats. ### ");
+        LogWriterUtils.getLogWriter().info("### Validating CQ Service Stats. ### ");
 //      Get CQ Service.
         QueryService qService = null;
         try {          
@@ -245,7 +249,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     cqDUnitTest.createClient(client, port, host0);
     
     /* Create CQs. */
@@ -272,7 +276,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     cqDUnitTest.createValues(server, cqDUnitTest.regions[0], 200);
     // Wait for client to Synch.
     cqDUnitTest.waitForCreated(client, "testCQStatistics_0", CqQueryDUnitTest.KEY+200);
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     size = 200;
     
     // validate CQs.
@@ -296,7 +300,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     // Wait for client to Synch.
     cqDUnitTest.waitForDestroyed(client, "testCQStatistics_0", CqQueryDUnitTest.KEY+100);
     size = 10;
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     cqDUnitTest.validateCQ(client, "testCQStatistics_0",
         /* resultSize: */ CqQueryDUnitTest.noTest,
@@ -315,7 +319,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     
     // Test  CQ Close
     cqDUnitTest.closeCQ(client, "testCQStatistics_0");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     // Close.
     cqDUnitTest.closeClient(client);
@@ -336,7 +340,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     cqDUnitTest.createClient(client1, port, host0);
     cqDUnitTest.createClient(client2, port, host0);
     
@@ -345,7 +349,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     String cqName10 = new String("testCQServiceStatistics_10");   
     cqDUnitTest.createCQ(client1, cqName, cqDUnitTest.cqs[0]);
     cqDUnitTest.createCQ(client2, cqName10, cqDUnitTest.cqs[2]); 
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on clients: #1");
     validateCQServiceStats(client1, 1, 0, 1, 0, 1, 1, CqQueryDUnitTest.noTest);
@@ -353,7 +357,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     
     cqDUnitTest.executeCQ(client1, cqName, false, null);
     cqDUnitTest.executeCQ(client2, cqName10, false, null);
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     getCache().getLogger().info("Validating CQ Service stats on clients: #2");
     validateCQServiceStats(client1, 1, 1, 0, 0, 1, 1, CqQueryDUnitTest.noTest);
@@ -378,7 +382,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
         /* queryUpdates: */ 0,
         /* queryDeletes: */ 0,
         /* totalEvents: */ size);
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on clients: #3");
@@ -391,7 +395,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     
     //Create CQs with no name, execute, and close. 
     cqDUnitTest.createAndExecCQNoName(client1, cqDUnitTest.cqs[0]); 
-    pause(PAUSE);      
+    Wait.pause(PAUSE);      
     
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on client: #4");
@@ -402,7 +406,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     
     // Test  CQ Close
     cqDUnitTest.closeCQ(client1, cqName);
-    pause(PAUSE);      
+    Wait.pause(PAUSE);      
     
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on client: #5");
@@ -413,7 +417,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     
     //Test stop CQ
     cqDUnitTest.stopCQ(client2, cqName10);
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on client: #6");
@@ -423,7 +427,7 @@ public class CqStatsDUnitTest extends CacheTestCase {
     
     // Test  CQ Close
     cqDUnitTest.closeCQ(client2, cqName10);
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on client: #7");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsOptimizedExecuteDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsOptimizedExecuteDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsOptimizedExecuteDUnitTest.java
index 987a70a..df0b950 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsOptimizedExecuteDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsOptimizedExecuteDUnitTest.java
@@ -18,6 +18,7 @@ package com.gemstone.gemfire.cache.query.cq.dunit;
 
 import com.gemstone.gemfire.cache.query.internal.cq.CqService;
 import com.gemstone.gemfire.cache.query.internal.cq.CqServiceImpl;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -31,7 +32,7 @@ public class CqStatsOptimizedExecuteDUnitTest extends CqStatsDUnitTest{
 
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = false;
       }
@@ -39,12 +40,11 @@ public class CqStatsOptimizedExecuteDUnitTest extends CqStatsDUnitTest{
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = true;
       }
     });
-    super.tearDown2();
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsUsingPoolDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsUsingPoolDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsUsingPoolDUnitTest.java
index 1210426..869ea81 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsUsingPoolDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsUsingPoolDUnitTest.java
@@ -35,8 +35,12 @@ import com.gemstone.gemfire.cache.query.internal.cq.InternalCqQuery;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * This class tests the ContiunousQuery mechanism in GemFire.
@@ -58,7 +62,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     // avoid IllegalStateException from HandShake by connecting all vms to
     // system before creating pool
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -74,7 +78,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
       final int cqListenerInvocations) {
     vm.invoke(new CacheSerializableRunnable("Validate CQs") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating CQ Stats. ### " + cqName);
+        LogWriterUtils.getLogWriter().info("### Validating CQ Stats. ### " + cqName);
 //      Get CQ Service.
         QueryService qService = null;
         try {          
@@ -157,7 +161,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
       final int clientsWithCqs) {
     vm.invoke(new CacheSerializableRunnable("Validate CQ Service Stats") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating CQ Service Stats. ### ");
+        LogWriterUtils.getLogWriter().info("### Validating CQ Service Stats. ### ");
 //      Get CQ Service.
         QueryService qService = null;
         try {          
@@ -245,7 +249,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     String poolName = "testCQStatistics";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -276,7 +280,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     cqDUnitTest.createValues(server, cqDUnitTest.regions[0], 200);
     // Wait for client to Synch.
     cqDUnitTest.waitForCreated(client, "testCQStatistics_0", CqQueryUsingPoolDUnitTest.KEY+200);
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     size = 200;
     
     // validate CQs.
@@ -300,7 +304,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     // Wait for client to Synch.
     cqDUnitTest.waitForDestroyed(client, "testCQStatistics_0", CqQueryUsingPoolDUnitTest.KEY+100);
     size = 10;
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     cqDUnitTest.validateCQ(client, "testCQStatistics_0",
         /* resultSize: */ CqQueryUsingPoolDUnitTest.noTest,
@@ -319,7 +323,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     
     // Test  CQ Close
     cqDUnitTest.closeCQ(client, "testCQStatistics_0");
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     // Close.
     cqDUnitTest.closeClient(client);
@@ -340,7 +344,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     String poolName1 = "testCQServiceStatistics1";
     String poolName2 = "testCQServiceStatistics2";
@@ -356,7 +360,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     String cqName10 = new String("testCQServiceStatistics_10");   
     cqDUnitTest.createCQ(client1, poolName1, cqName, cqDUnitTest.cqs[0]);
     cqDUnitTest.createCQ(client2, poolName2, cqName10, cqDUnitTest.cqs[2]); 
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on clients: #1");
     validateCQServiceStats(client1, 1, 0, 1, 0, 1, 1, CqQueryUsingPoolDUnitTest.noTest);
@@ -364,7 +368,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     
     cqDUnitTest.executeCQ(client1, cqName, false, null);
     cqDUnitTest.executeCQ(client2, cqName10, false, null);
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     getCache().getLogger().info("Validating CQ Service stats on clients: #2");
     validateCQServiceStats(client1, 1, 1, 0, 0, 1, 1, CqQueryUsingPoolDUnitTest.noTest);
@@ -389,7 +393,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
         /* queryUpdates: */ 0,
         /* queryDeletes: */ 0,
         /* totalEvents: */ size);
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on clients: #3");
@@ -402,7 +406,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     
     //Create CQs with no name, execute, and close. 
     cqDUnitTest.createAndExecCQNoName(client1, poolName1, cqDUnitTest.cqs[0]); 
-    pause(PAUSE);      
+    Wait.pause(PAUSE);      
     
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on client: #4");
@@ -413,7 +417,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     
     // Test  CQ Close
     cqDUnitTest.closeCQ(client1, cqName);
-    pause(PAUSE);      
+    Wait.pause(PAUSE);      
     
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on client: #5");
@@ -424,7 +428,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     
     //Test stop CQ
     cqDUnitTest.stopCQ(client2, cqName10);
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on client: #6");
@@ -434,7 +438,7 @@ public class CqStatsUsingPoolDUnitTest extends CacheTestCase {
     
     // Test  CQ Close
     cqDUnitTest.closeCQ(client2, cqName10);
-    pause(PAUSE);
+    Wait.pause(PAUSE);
     
     // Test CQ Service stats
     getCache().getLogger().info("Validating CQ Service stats on client: #7");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsUsingPoolOptimizedExecuteDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsUsingPoolOptimizedExecuteDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsUsingPoolOptimizedExecuteDUnitTest.java
index 95eb789..a959024 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsUsingPoolOptimizedExecuteDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStatsUsingPoolOptimizedExecuteDUnitTest.java
@@ -18,6 +18,7 @@ package com.gemstone.gemfire.cache.query.cq.dunit;
 
 import com.gemstone.gemfire.cache.query.internal.cq.CqService;
 import com.gemstone.gemfire.cache.query.internal.cq.CqServiceImpl;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -31,7 +32,7 @@ public class CqStatsUsingPoolOptimizedExecuteDUnitTest extends CqStatsUsingPoolD
 
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = false;
       }
@@ -39,12 +40,11 @@ public class CqStatsUsingPoolOptimizedExecuteDUnitTest extends CqStatsUsingPoolD
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = true;
       }
     });
-    super.tearDown2();
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqTimeTestListener.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqTimeTestListener.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqTimeTestListener.java
index 23d0728..4edc4a8 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqTimeTestListener.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqTimeTestListener.java
@@ -25,8 +25,8 @@ import com.gemstone.gemfire.cache.Operation;
 import com.gemstone.gemfire.cache.query.CqEvent;
 import com.gemstone.gemfire.cache.query.CqListener;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author anil.
@@ -198,7 +198,7 @@ public class CqTimeTestListener implements CqListener {
         return "never got create event for CQ " + CqTimeTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -211,7 +211,7 @@ public class CqTimeTestListener implements CqListener {
         return "never got destroy event for CQ " + CqTimeTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -224,7 +224,7 @@ public class CqTimeTestListener implements CqListener {
         return "never got invalidate event for CQ " + CqTimeTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -237,7 +237,7 @@ public class CqTimeTestListener implements CqListener {
         return "never got update event for CQ " + CqTimeTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
 
@@ -250,7 +250,7 @@ public class CqTimeTestListener implements CqListener {
         return "never got close event for CQ " + CqTimeTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PartitionedRegionCqQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PartitionedRegionCqQueryDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PartitionedRegionCqQueryDUnitTest.java
index 360c0d9..c9aa564 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PartitionedRegionCqQueryDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PartitionedRegionCqQueryDUnitTest.java
@@ -46,9 +46,13 @@ import com.gemstone.gemfire.cache30.ClientServerTestCase;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 /**
  * Test class for Partitioned Region and CQs
  * 
@@ -125,14 +129,14 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     // create client 
     
     final int port = server1.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     createClient(client, port, host0);
     
     // register cq.
     createCQ(client, "testCQEvents_0", cqs[0]);
     cqHelper.executeCQ(client, "testCQEvents_0", false, null);
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     
     // create values
     int size = 40;
@@ -162,7 +166,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     server1.bounce();
     
     cqHelper.closeClient(client);
-    pause(10 * 1000);
+    Wait.pause(10 * 1000);
     //cc1 = server1.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCqCountFromRegionProfile");
     cc2 = server2.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCqCountFromRegionProfile");
     
@@ -187,14 +191,14 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     // create client 
     
     final int port = server1.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     createClient(client, port, host0);
     
     // register cq.
     createCQ(client, "testCQEvents_0", cqs[0]);
     cqHelper.executeCQ(client, "testCQEvents_0", false, null);
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     
     // create values
     int size = 40;
@@ -300,7 +304,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     // create client 
     
     final int port = server1.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     createClient(client, port, host0);
     
@@ -391,7 +395,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     // creating an accessor vm with Bridge Server installed.
     createServer(server1);
     final int port = server1.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     createClient(client, port, host0);
     
@@ -489,7 +493,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     // create client 
     
     final int port = server2.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server2.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server2.getHost());
     
     createClient(client, port, host0);
     
@@ -584,7 +588,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     
     
     final int port = server1.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     createClient(client, port, host0);
     
@@ -680,7 +684,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     
     
     final int port = server1.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     createClient(client, port, host0);
     
@@ -776,7 +780,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     
     
     final int port = server1.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     createClient(client, port, host0);
     createClient(client2, port, host0);
@@ -875,10 +879,10 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     createServer(server2, false , 1);
     
     // Wait for server to initialize.
-    pause(2000);
+    Wait.pause(2000);
     
     final int port = server1.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     createClient(client, port, host0);
     createClient(client2, port, host0);
@@ -1019,7 +1023,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     
     
     final int port = server1.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     createClient(client, port, host0);
     createClient(client2, port, host0);
@@ -1237,7 +1241,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     createServer(server2);
     
     final int port = server1.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     // Initialize Client.
     createClient(client, port, host0);
@@ -1370,7 +1374,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     createServerWithoutRootRegion(server, 0, false, 0);
     
     final int port = server.invokeInt(PartitionedRegionCqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     // Initialize Client.
     createCacheClient(client1, port, host0);
@@ -1435,7 +1439,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
           // REGION NULL
           Log.getLogWriter().info("Local region is NOT null in client 1");
           
-          pause(5*1000);
+          Wait.pause(5*1000);
           CqQuery[] cqs = getCache().getQueryService().getCqs();
           if (cqs != null && cqs.length > 0) {
             assertTrue(cqs[0].isClosed());
@@ -1495,7 +1499,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
         "Create Cache Server") {
       public void run2() throws CacheException
       {
-          getLogWriter().info("### Create Cache Server. ###");
+          LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
           //AttributesFactory factory = new AttributesFactory();
           //factory.setScope(Scope.DISTRIBUTED_ACK);
           //factory.setMirrorType(MirrorType.KEYS_VALUES);
@@ -1514,14 +1518,14 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
           //assertTrue(getSystem().getDistributionManager().getOtherDistributionManagerIds().size() > 0);
           for (int i = 0; i < regions.length; i++) {
             Region r = createRegion(regions[i], attr.create());
-            getLogWriter().info("Server created the region: "+r);
+            LogWriterUtils.getLogWriter().info("Server created the region: "+r);
           }
 //          pause(2000);
           try {
             startBridgeServer(port, true);
           }
           catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
 //          pause(2000);
        
@@ -1544,7 +1548,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
         "Create Cache Server") {
       public void run2() throws CacheException
       {
-          getLogWriter().info("### Create Cache Server. ###");
+          LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
           //AttributesFactory factory = new AttributesFactory();
           //factory.setScope(Scope.DISTRIBUTED_ACK);
           //factory.setMirrorType(MirrorType.KEYS_VALUES);
@@ -1563,14 +1567,14 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
           //assertTrue(getSystem().getDistributionManager().getOtherDistributionManagerIds().size() > 0);
           for (int i = 0; i < regions.length; i++) {
             Region r = createRegionWithoutRoot(regions[i], attr.create());
-            getLogWriter().info("Server created the region: "+r);
+            LogWriterUtils.getLogWriter().info("Server created the region: "+r);
           }
 //          pause(2000);
           try {
             startBridgeServer(port, true);
           }
           catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
 //          pause(2000);
       }
@@ -1614,8 +1618,8 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
     SerializableRunnable createQService =
       new CacheSerializableRunnable("Create Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Client. ###");
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info("### Create Client. ###");
+        LogWriterUtils.getLogWriter().info(
             "Will connect to server at por: " + serverPorts[0] + " and at host : "
              + serverHost);
         //Region region1 = null;
@@ -1638,7 +1642,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
         
         for (int i=0; i < regions.length; i++) {        
           Region clientRegion = createRegion(regions[i], regionFactory.createRegionAttributes());
-          getLogWriter().info("### Successfully Created Region on Client :" + clientRegion);
+          LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + clientRegion);
           //region1.getAttributesMutator().setCacheListener(new CqListener());
         }
       }
@@ -1654,7 +1658,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
         //getLogWriter().info("### DEBUG CREATE CQ START ####");
         //pause(20 * 1000);
         
-        getLogWriter().info("### Create CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
@@ -1665,7 +1669,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
         }
         // Create CQ Attributes.
         CqAttributesFactory cqf = new CqAttributesFactory();
-        CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+        CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
         ((CqQueryTestListener)cqListeners[0]).cqName = cqName;
         
         cqf.initCqListeners(cqListeners);
@@ -1675,11 +1679,11 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
         try {
           CqQuery cq1 = cqService.newCq(cqName, queryStr, cqa);
           assertTrue("newCq() state mismatch", cq1.getState().isStopped());
-          getLogWriter().info("Created a new CqQuery : "+cq1);
+          LogWriterUtils.getLogWriter().info("Created a new CqQuery : "+cq1);
         } catch (Exception ex){
           AssertionError err = new AssertionError("Failed to create CQ " + cqName + " . ");
           err.initCause(ex);
-          getLogWriter().info("CqService is :" + cqService, err);
+          LogWriterUtils.getLogWriter().info("CqService is :" + cqService, err);
           throw err;
         }
       }
@@ -1718,7 +1722,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
         for (int i = 1; i <= size; i++) {
           region1.put(KEY+i, new Portfolio(i));
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -1745,13 +1749,13 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
   public void createCacheClient(VM vm, final String[] serverHosts, final int[] serverPorts, final String redundancyLevel) {
     vm.invoke(new CacheSerializableRunnable("createCacheClient") {
       public void run2() throws CacheException {
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Will connect to server at por: " + serverPorts[0] + " and at host : "
              + serverHosts[0]);
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolServer(serverHosts[0]/*getServerHostName(Host.getHost(0))*/, serverPorts[0]);
         ccf.setPoolSubscriptionEnabled(true);
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         
         // Create Client Cache.
         getClientCache(ccf);
@@ -1777,7 +1781,7 @@ public class PartitionedRegionCqQueryDUnitTest extends CacheTestCase {
         for (int i=0; i < regions.length; i++) {        
           Region clientRegion = ((ClientCache)getCache()).createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
           .create(regions[i]);
-          getLogWriter().info("### Successfully Created Region on Client :" + clientRegion);
+          LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + clientRegion);
           //region1.getAttributesMutator().setCacheListener(new CqListener());
         }
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PartitionedRegionCqQueryOptimizedExecuteDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PartitionedRegionCqQueryOptimizedExecuteDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PartitionedRegionCqQueryOptimizedExecuteDUnitTest.java
index e5544b7..659bf12 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PartitionedRegionCqQueryOptimizedExecuteDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PartitionedRegionCqQueryOptimizedExecuteDUnitTest.java
@@ -24,6 +24,9 @@ import com.gemstone.gemfire.cache.query.internal.cq.CqServiceProvider;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -35,7 +38,7 @@ public class PartitionedRegionCqQueryOptimizedExecuteDUnitTest extends Partition
 
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = false;
       }
@@ -43,14 +46,13 @@ public class PartitionedRegionCqQueryOptimizedExecuteDUnitTest extends Partition
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = true;
         CqServiceProvider.MAINTAIN_KEYS = true;
       }
     });
-    super.tearDown2();
   }
   
   public void testCqExecuteWithoutQueryExecution() throws Exception {
@@ -66,7 +68,7 @@ public class PartitionedRegionCqQueryOptimizedExecuteDUnitTest extends Partition
 
     final int thePort = server.invokeInt(PartitionedRegionCqQueryOptimizedExecuteDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Create client.
     createClient(client, thePort, host0);
@@ -93,7 +95,7 @@ public class PartitionedRegionCqQueryOptimizedExecuteDUnitTest extends Partition
         for (int i = numOfEntries+1; i <= numOfEntries*2; i++) {
           region1.put(KEY+i, new Portfolio(i));
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
     
@@ -164,7 +166,7 @@ public class PartitionedRegionCqQueryOptimizedExecuteDUnitTest extends Partition
 
     final int thePort = server.invokeInt(PartitionedRegionCqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Create client.
     createClient(client, thePort, host0);
@@ -192,7 +194,7 @@ public class PartitionedRegionCqQueryOptimizedExecuteDUnitTest extends Partition
         for (int i = numOfEntries+1; i <= numOfEntries*2; i++) {
           region1.put(KEY+i, new Portfolio(i));
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PrCqUsingPoolDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PrCqUsingPoolDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PrCqUsingPoolDUnitTest.java
index 80cd738..273380d 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PrCqUsingPoolDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PrCqUsingPoolDUnitTest.java
@@ -40,9 +40,13 @@ import com.gemstone.gemfire.cache.query.Struct;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache30.ClientServerTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 /**
@@ -117,7 +121,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     // create client 
     
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     //createClient(client, port, host0);
     String poolName = "testCQAndPartitionedRegion";
@@ -232,7 +236,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     // create client 
     
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     String poolName = "testPartitionedCqOnAccessorBridgeServer";
     createPool(client, poolName, host0, port);
@@ -326,7 +330,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     // creating an accessor vm with Bridge Server installed.
     createServer(server1);
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     String poolName = "testPartitionedCqOnSingleBridgeServer";
     createPool(client, poolName, host0, port);
@@ -426,7 +430,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     // create client 
     
     final int port = server2.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server2.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server2.getHost());
     
     String poolName = "testPRCqOnSingleBridgeServerUpdatesOriginatingAtAccessor";
     createPool(client, poolName, host0, port);
@@ -524,7 +528,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     
     
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     String poolName = "testPRCqWithInvalidatesOnBridgeServer";
     createPool(client, poolName, host0, port);
@@ -624,7 +628,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     
     
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     String poolName = "testPRCqWithInvalidatesOnAccessorBridgeServer";
     createPool(client, poolName, host0, port);
@@ -725,7 +729,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     
     
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     String poolName1 = "testPRCqWithUpdatesFromClients1";
     createPool(client, poolName1, host0, port);
@@ -831,10 +835,10 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     createServer(server2, false , 1);
 
     // Wait for server to initialize.
-    pause(2000);
+    Wait.pause(2000);
     
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     String poolName1 = "testPRCqWithMultipleRegionsOnServer1";
     createPool(client, poolName1, host0, port);
@@ -977,10 +981,10 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     createServer(server2, false , 1);
 
     // Wait for server to initialize.
-    pause(2000);
+    Wait.pause(2000);
     
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     String poolName1 = "testPRWithCQsAndProfileUpdates1";
     createPool(client, poolName1, host0, port);
@@ -1199,7 +1203,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     createServer(server2);
     
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     String poolName1 = "testEventsDuringQueryExecution";
     createPool(client, poolName1, host0, port);
@@ -1333,7 +1337,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     final int size = 100;
 
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     String poolName = "testCQsWithPutalls";
     createPool(client, poolName, new String[]{host0}, new int[]{port});
@@ -1427,7 +1431,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     final int size = 100;
 
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     String poolName = "testCQsWithPutalls";
     createPool(client, poolName, new String[]{host0}, new int[]{port});
@@ -1529,7 +1533,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     final int size = 100;
 
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     String poolName = "testCQsWithPutallsTx";
     createPool(client, poolName, new String[]{host0}, new int[]{port});
@@ -1651,7 +1655,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     final int size = 100;
 
     final int port = server1.invokeInt(PrCqUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     String poolName = "testCQsWithPutallsTx";
     createPool(client, poolName, new String[]{host0}, new int[]{port});
@@ -1804,7 +1808,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
         "Create Cache Server") {
       public void run2() throws CacheException
       {
-          getLogWriter().info("### Create Cache Server. ###");
+          LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
           //AttributesFactory factory = new AttributesFactory();
           //factory.setScope(Scope.DISTRIBUTED_ACK);
           //factory.setMirrorType(MirrorType.KEYS_VALUES);
@@ -1823,14 +1827,14 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
           //assertTrue(getSystem().getDistributionManager().getOtherDistributionManagerIds().size() > 0);
           for (int i = 0; i < regions.length; i++) {
             Region r = createRegion(regions[i], attr.create());
-            getLogWriter().info("Server created the region: "+r);
+            LogWriterUtils.getLogWriter().info("Server created the region: "+r);
           }
 //          pause(2000);
           try {
             startBridgeServer(port, true);
           }
           catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
 //          pause(2000);
        
@@ -1881,7 +1885,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
         } 
         
         for (int i=0; i < servers.length; i++){
-          getLogWriter().info("### Adding to Pool. ### Server : " + servers[i] + " Port : " + ports[i]);
+          LogWriterUtils.getLogWriter().info("### Adding to Pool. ### Server : " + servers[i] + " Port : " + ports[i]);
           cpf.addServer(servers[i], ports[i]);
         }
         
@@ -1902,8 +1906,8 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
     SerializableRunnable createQService =
       new CacheSerializableRunnable("Create Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Client. ###");
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info("### Create Client. ###");
+        LogWriterUtils.getLogWriter().info(
             "Will connect to server at por: " + serverPorts[0] + " and at host : "
              + serverHost);
         //Region region1 = null;
@@ -1926,7 +1930,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
         
         for (int i=0; i < regions.length; i++) {        
           Region clientRegion = createRegion(regions[i], regionFactory.createRegionAttributes());
-          getLogWriter().info("### Successfully Created Region on Client :" + clientRegion);
+          LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + clientRegion);
           //region1.getAttributesMutator().setCacheListener(new CqListener());
         }
       }
@@ -1942,7 +1946,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
         //getLogWriter().info("### DEBUG CREATE CQ START ####");
         //pause(20 * 1000);
         
-        getLogWriter().info("### Create CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
@@ -1953,7 +1957,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
         }
         // Create CQ Attributes.
         CqAttributesFactory cqf = new CqAttributesFactory();
-        CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+        CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
         ((CqQueryTestListener)cqListeners[0]).cqName = cqName;
         
         cqf.initCqListeners(cqListeners);
@@ -1963,11 +1967,11 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
         try {
           CqQuery cq1 = cqService.newCq(cqName, queryStr, cqa);
           assertTrue("newCq() state mismatch", cq1.getState().isStopped());
-          getLogWriter().info("Created a new CqQuery : "+cq1);
+          LogWriterUtils.getLogWriter().info("Created a new CqQuery : "+cq1);
         } catch (Exception ex){
           AssertionError err = new AssertionError("Failed to create CQ " + cqName + " . ");
           err.initCause(ex);
-          getLogWriter().info("CqService is :" + cqService, err);
+          LogWriterUtils.getLogWriter().info("CqService is :" + cqService, err);
           throw err;
         }
       }
@@ -1987,7 +1991,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
         for (int i = 1; i <= size; i++) {
           region1.put(KEY+i, new Portfolio(i));
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -2001,7 +2005,7 @@ public class PrCqUsingPoolDUnitTest extends CacheTestCase {
           m.put(KEY+i, new Portfolio(i));
         }
         region1.putAll(m);
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PrCqUsingPoolOptimizedExecuteDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PrCqUsingPoolOptimizedExecuteDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PrCqUsingPoolOptimizedExecuteDUnitTest.java
index 5db4b22..d71a7be 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PrCqUsingPoolOptimizedExecuteDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/PrCqUsingPoolOptimizedExecuteDUnitTest.java
@@ -17,6 +17,7 @@
 package com.gemstone.gemfire.cache.query.cq.dunit;
 
 import com.gemstone.gemfire.cache.query.internal.cq.CqServiceImpl;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -31,7 +32,7 @@ public class PrCqUsingPoolOptimizedExecuteDUnitTest extends PrCqUsingPoolDUnitTe
 
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = false;
       }
@@ -39,12 +40,11 @@ public class PrCqUsingPoolOptimizedExecuteDUnitTest extends PrCqUsingPoolDUnitTe
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = true;
       }
     });
-    super.tearDown2();
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxQueryCQDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxQueryCQDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxQueryCQDUnitTest.java
index 8abed40..49a1252 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxQueryCQDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxQueryCQDUnitTest.java
@@ -36,9 +36,13 @@ import com.gemstone.gemfire.cache.query.cq.dunit.CqQueryTestListener;
 import com.gemstone.gemfire.cache.query.dunit.PdxQueryCQTestBase.TestObject;
 import com.gemstone.gemfire.cache30.ClientServerTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 
 
@@ -84,7 +88,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
     final int port0 = vm0.invokeInt(PdxQueryCQTestBase.class, "getCacheServerPort");
     final int port1 = vm1.invokeInt(PdxQueryCQTestBase.class, "getCacheServerPort");
 
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     // Create client pool.
     final String poolName = "testCqPool"; 
@@ -95,17 +99,17 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
     // Execute CQ
     SerializableRunnable executeCq = new CacheSerializableRunnable("Execute queries") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
         // Get CQ Service.
         QueryService qService = null;
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         // Create CQ Attributes.
         CqAttributesFactory cqf = new CqAttributesFactory();
-        CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+        CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
         ((CqQueryTestListener)cqListeners[0]).cqName = cqName;
 
         cqf.initCqListeners(cqListeners);
@@ -125,7 +129,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
         } catch (Exception ex){
           AssertionError err = new AssertionError("Failed to create CQ " + cqName + " . ");
           err.initCause(ex);
-          getLogWriter().info("QueryService is :" + qService, err);
+          LogWriterUtils.getLogWriter().info("QueryService is :" + qService, err);
           throw err;
         }
       }
@@ -164,13 +168,13 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
 
     SerializableRunnable validateCq = new CacheSerializableRunnable("Validate CQs") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating CQ. ### " + cqName);
+        LogWriterUtils.getLogWriter().info("### Validating CQ. ### " + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {          
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         CqQuery cQuery = cqService.getCq(cqName);
@@ -183,7 +187,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
         final CqQueryTestListener listener = (CqQueryTestListener) cqListeners[0];
         
         //Wait for the events to show up on the client.
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           
           public boolean done() {
             return listener.getTotalEventCount() >= (numberOfEntries * 2 - queryLimit);
@@ -265,7 +269,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
     final int port0 = vm0.invokeInt(PdxQueryCQTestBase.class, "getCacheServerPort");
     final int port1 = vm1.invokeInt(PdxQueryCQTestBase.class, "getCacheServerPort");
 
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     // Create client pool.
     final String poolName = "testCqPool"; 
@@ -314,18 +318,18 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
         }
         region.registerInterest(list);
         
-        getLogWriter().info("### Create CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
         // Get CQ Service.
         QueryService qService = null;
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         // Create CQ Attributes.
         for (int i=0; i < queries.length; i++) {
           CqAttributesFactory cqf = new CqAttributesFactory();
-          CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+          CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
           ((CqQueryTestListener)cqListeners[0]).cqName = (cqName + i);
 
           cqf.initCqListeners(cqListeners);
@@ -345,7 +349,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
           } catch (Exception ex){
             AssertionError err = new AssertionError("Failed to create CQ " + cqName + " . ");
             err.initCause(ex);
-            getLogWriter().info("QueryService is :" + qService, err);
+            LogWriterUtils.getLogWriter().info("QueryService is :" + qService, err);
             throw err;
           }
         }
@@ -469,7 +473,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
     final int port1 = vm1.invokeInt(PdxQueryCQTestBase.class, "getCacheServerPort");
     final int port2 = vm2.invokeInt(PdxQueryCQTestBase.class, "getCacheServerPort");
     
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     // Create client pool.
     final String poolName = "testCqPool";     
@@ -505,18 +509,18 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
         }
         region.registerInterest(list);
         
-        getLogWriter().info("### Create CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
         // Get CQ Service.
         QueryService qService = null;
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         // Create CQ Attributes.
         for (int i=0; i < queries.length; i++) {
           CqAttributesFactory cqf = new CqAttributesFactory();
-          CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+          CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
           ((CqQueryTestListener)cqListeners[0]).cqName = (cqName + i);
 
           cqf.initCqListeners(cqListeners);
@@ -536,7 +540,7 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
           } catch (Exception ex){
             AssertionError err = new AssertionError("Failed to create CQ " + cqName + " . ");
             err.initCause(ex);
-            getLogWriter().info("QueryService is :" + qService, err);
+            LogWriterUtils.getLogWriter().info("QueryService is :" + qService, err);
             throw err;
           }
         }
@@ -654,13 +658,13 @@ public class PdxQueryCQDUnitTest extends PdxQueryCQTestBase {
       final int updateEvents) {
         vm.invoke(new CacheSerializableRunnable("Validate CQs") {
           public void run2() throws CacheException {
-            getLogWriter().info("### Validating CQ. ### " + cqName);
+            LogWriterUtils.getLogWriter().info("### Validating CQ. ### " + cqName);
             // Get CQ Service.
             QueryService cqService = null;
               try {          
                 cqService = getCache().getQueryService();
               } catch (Exception cqe) {
-                fail("Failed to getCQService.", cqe);
+                Assert.fail("Failed to getCQService.", cqe);
               }
       
               CqQuery cQuery = cqService.getCq(cqName);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxQueryCQTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxQueryCQTestBase.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxQueryCQTestBase.java
index 3637dce..f99a316 100755
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxQueryCQTestBase.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxQueryCQTestBase.java
@@ -51,6 +51,7 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.pdx.PdxReader;
 import com.gemstone.gemfire.pdx.PdxSerializable;
 import com.gemstone.gemfire.pdx.PdxWriter;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -77,7 +78,8 @@ public abstract class PdxQueryCQTestBase extends CacheTestCase {
     return bridgeServerPort;
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS(); // tests all expect to create a new ds
     // Reset the testObject numinstance for the next test.
     TestObject.numInstance = 0;
@@ -129,7 +131,7 @@ public abstract class PdxQueryCQTestBase extends CacheTestCase {
             cpf.setSubscriptionEnabled(subscriptionEnabled);
             cpf.setSubscriptionRedundancy(redundancy);
             for (int i=0; i < servers.length; i++){
-              getLogWriter().info("### Adding to Pool. ### Server : " + servers[i] + " Port : " + ports[i]);
+              com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("### Adding to Pool. ### Server : " + servers[i] + " Port : " + ports[i]);
               cpf.addServer(servers[i], ports[i]);
             }
             cpf.create(poolName);
@@ -151,18 +153,18 @@ public abstract class PdxQueryCQTestBase extends CacheTestCase {
           remoteQueryService = (PoolManager.find(poolName)).getQueryService();
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
   
         try {
-          getLogWriter().info("### Executing Query on server:" + queryStr);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("### Executing Query on server:" + queryStr);
           Query query = remoteQueryService.newQuery(queryStr);
           rs[0][0] = (SelectResults)query.execute();
           //printResults (rs[0][0], " ### Remote Query Results : ####");
-          getLogWriter().info("### Executing Query locally:" + queryStr);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("### Executing Query locally:" + queryStr);
           query = localQueryService.newQuery(queryStr);
           rs[0][1] = (SelectResults)query.execute();
-          getLogWriter().info("### Remote Query rs size: " + (rs[0][0]).size() + 
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("### Remote Query rs size: " + (rs[0][0]).size() + 
               "Local Query rs size: " + (rs[0][1]).size());
           //printResults (rs[0][1], " ### Local Query Results : ####");
           // Compare local and remote query results.
@@ -171,7 +173,7 @@ public abstract class PdxQueryCQTestBase extends CacheTestCase {
              fail("Local and Remote Query Results are not matching for query :" + queryStr);  
           }
         } catch (Exception e) {
-          fail("Failed executing " + queryStr, e);
+          Assert.fail("Failed executing " + queryStr, e);
         }
       }
     });
@@ -235,7 +237,7 @@ public abstract class PdxQueryCQTestBase extends CacheTestCase {
         try {
           startBridgeServer(0, false);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
 
@@ -248,16 +250,16 @@ public abstract class PdxQueryCQTestBase extends CacheTestCase {
     try {
       qService = (PoolManager.find(poolName)).getQueryService();
     } catch (Exception e) {
-      fail("Failed to get QueryService.", e);
+      Assert.fail("Failed to get QueryService.", e);
     }          
   
     for (int i=0; i < queryString.length; i++){
       try {
-        getLogWriter().info("### Executing Query :" + queryString[i]);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("### Executing Query :" + queryString[i]);
         Query query = qService.newQuery(queryString[i]);
         results = (SelectResults)query.execute(params[i]);
       } catch (Exception e) {
-        fail("Failed executing " + queryString[i], e);
+        Assert.fail("Failed executing " + queryString[i], e);
       }
     }        
   }
@@ -292,12 +294,12 @@ public abstract class PdxQueryCQTestBase extends CacheTestCase {
     SerializableRunnable closeCache =
       new CacheSerializableRunnable("Close Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close Client. ###");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("### Close Client. ###");
         try {
           closeCache();
           disconnectFromDS();
         } catch (Exception ex) {
-          getLogWriter().info("### Failed to get close client. ###");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("### Failed to get close client. ###");
         }
       }
     };
@@ -332,13 +334,13 @@ public abstract class PdxQueryCQTestBase extends CacheTestCase {
     
     @Override
     public boolean equals(Object o){
-      getLogWriter().info("In TestObject2.equals() this: " + this + " other :" + o);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("In TestObject2.equals() this: " + this + " other :" + o);
       GemFireCacheImpl.getInstance().getLoggerI18n().fine("In TestObject2.equals() this: " + this + " other :" + o);
       TestObject2 other = (TestObject2)o;
       if (_id == other._id) {
         return true;
       } else {
-        getLogWriter().info("NOT EQUALS");  
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("NOT EQUALS");  
         return false;
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryIndexUpdateRIDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryIndexUpdateRIDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryIndexUpdateRIDUnitTest.java
index a39f5e8..6e4de65 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryIndexUpdateRIDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryIndexUpdateRIDUnitTest.java
@@ -40,9 +40,13 @@ import com.gemstone.gemfire.cache.query.internal.QueryObserverAdapter;
 import com.gemstone.gemfire.cache.query.internal.QueryObserverHolder;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache30.ClientServerTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.cache30.CertifiableTestCacheListener;
@@ -94,7 +98,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
 
     final int port = server.invokeInt(QueryIndexUpdateRIDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Init values at server. 
     final int size = 10;
@@ -112,7 +116,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
     this.registerInterestList(client, cqDUnitTest.regions[0], 4, KEYS);
 
     //Wait for Index to get updated.
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
 
     //This query execution should fail as it will run on client index and index are not updated just by registerInterest.
     //Validate query results.
@@ -140,7 +144,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
 
     final int port = server.invokeInt(QueryIndexUpdateRIDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testClientIndexUpdateWithRegisterInterest";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -155,7 +159,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
     this.createValues(client, cqDUnitTest.regions[0], size, 1);
 
     //wait for index to get updated.
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     //this.validateQueryOnIndex(client, incompleteQ+"p.getID() > 0", 10);
     
     this.validateQueryOnIndex(client, incompleteQ+"p.ID > 0", 10);
@@ -167,7 +171,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
     this.registerInterestList(client, cqDUnitTest.regions[0], size, KEYS, 4 /*start index*/);
 
     //Wait for Index to get updated.
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
 
     //This query execution should fail as it will run on client index and index are not updated just by registerInterest.
     //Validate query results.
@@ -191,7 +195,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
 
     final int port = server.invokeInt(QueryIndexUpdateRIDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     //Init values at server. 
     final int size = 10;
@@ -209,7 +213,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
     cqDUnitTest.registerInterestListCQ(client, cqDUnitTest.regions[0], size, true);
 
     //Wait for Index to get updated.
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
 
     //This query execution should fail as it will run on client index and index are not updated just by registerInterest.
     //Validate query results.
@@ -232,7 +236,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
 
     final int port = server.invokeInt(QueryIndexUpdateRIDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     //Init values at server. 
     final int size = 10;
@@ -250,7 +254,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
     this.registerInterestList(client, cqDUnitTest.regions[0], 2, REGEX);
 
     //Wait for Index to get updated.
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
 
     //This query execution should fail as it will run on client index and index are not updated just by registerInterest.
     //Validate query results.
@@ -279,7 +283,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
 
     final int port = server.invokeInt(QueryIndexUpdateRIDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     //Init values at server. 
     final int size = 1000;
@@ -336,7 +340,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
 
     final int port = server.invokeInt(QueryIndexUpdateRIDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Init values at server. 
     final int size = 10;
@@ -354,7 +358,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
     this.registerInterestList(client, ROOT, size, 0);
 
     //Wait for Index to get updated.
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
 
     //This query execution should fail as it will run on client index and index are not updated just by registerInterest.
     //Validate query results.
@@ -377,7 +381,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
 
     final int port = server.invokeInt(QueryIndexUpdateRIDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Init values at server. 
     final int size = 10;
@@ -395,7 +399,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
     this.registerInterestList(client, ROOT, 4, KEYS);
 
     //Wait for Index to get updated.
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
 
     //This query execution should fail as it will run on client index and index are not updated just by registerInterest.
     //Validate query results.
@@ -418,7 +422,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
 
     final int port = server.invokeInt(QueryIndexUpdateRIDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Init values at server. 
     final int size = 10;
@@ -436,7 +440,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
     this.registerInterestList(client, "root", 2, REGEX);
 
     //Wait for Index to get updated.
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
 
     //This query execution should fail as it will run on client index and index are not updated just by registerInterest.
     //Validate query results.
@@ -465,7 +469,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
           } else {
             region = getRootRegion().getSubregion(regionName);
           }
-          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(getLogWriter()));
+          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(LogWriterUtils.getLogWriter()));
         } catch (Exception cqe) {
           AssertionError err = new AssertionError("Failed to get Region.");
           err.initCause(cqe);
@@ -508,7 +512,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
           } else {
             region = getRootRegion().getSubregion(regionName);
           }
-          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(getLogWriter()));
+          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(LogWriterUtils.getLogWriter()));
         } catch (Exception cqe) {
           AssertionError err = new AssertionError("Failed to get Region.");
           err.initCause(cqe);
@@ -544,7 +548,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
         "Create Cache Server") {
       public void run2() throws CacheException
       {
-        getLogWriter().info("### Create Cache Server. ###");
+        LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
         AttributesFactory factory = new AttributesFactory();
         factory.setMirrorType(MirrorType.KEYS_VALUES);
 
@@ -559,16 +563,16 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
 
         
         
-        pause(2000);
+        Wait.pause(2000);
 
         try {
           startBridgeServer(thePort, true);
         }
 
         catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
-        pause(2000);
+        Wait.pause(2000);
         
       }
     };
@@ -620,7 +624,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
 //          getLogWriter().info("### puting '"+KEY+i+"' in region " + region1);
           region1.put(KEY+i, new Portfolio((start != 0 ? start : 1) * i, i));
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -642,13 +646,13 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
     SerializableRunnable createQService =
       new CacheSerializableRunnable("Create Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Client. ###");
+        LogWriterUtils.getLogWriter().info("### Create Client. ###");
         //Region region1 = null;
         // Initialize CQ Service.
         try {
           getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         AttributesFactory regionFactory = new AttributesFactory();
@@ -665,7 +669,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
         }
                
           createRootRegion(regionFactory.createRegionAttributes());
-          getLogWriter().info("### Successfully Created Root Region on Client");
+          LogWriterUtils.getLogWriter().info("### Successfully Created Root Region on Client");
       }
     };
     
@@ -686,7 +690,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
   public void validateQueryOnIndexWithRegion(VM vm, final String query, final int resultSize, final String region) {
     vm.invoke(new CacheSerializableRunnable("Validate Query") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating Query. ###");
+        LogWriterUtils.getLogWriter().info("### Validating Query. ###");
         QueryService qs = getCache().getQueryService();
         
         Query q = qs.newQuery(query);
@@ -697,7 +701,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
           Object r = q.execute();
           if(r instanceof SelectResults){
             int rSize = ((SelectResults)r).asSet().size();
-            getLogWriter().info("### Result Size is :" + rSize);
+            LogWriterUtils.getLogWriter().info("### Result Size is :" + rSize);
             
             if(region == null) {
               assertEquals(resultSize, rSize);
@@ -715,7 +719,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
           }
         }
         catch (Exception e) {
-          fail("Failed to execute the query.", e);
+          Assert.fail("Failed to execute the query.", e);
         }
         if(!observer.isIndexesUsed) {
           fail("Index not used for query");
@@ -727,7 +731,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
   public void asyncClearRegion(VM vm, final String regionName){
     vm.invokeAsync(new CacheSerializableRunnable("Destroy entries") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Clearing Region. ###");
+        LogWriterUtils.getLogWriter().info("### Clearing Region. ###");
         Region region1;
         if(!"root".equals(regionName)){
           region1 = getRootRegion().getSubregion(regionName);
@@ -735,7 +739,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
           region1 = getRootRegion();
         }
         region1.clear();
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -743,7 +747,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
   private SerializableRunnable getSRClearRegion(final String regionName) {
     SerializableRunnable sr = new CacheSerializableRunnable("Destroy entries") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Clearing Region. ###");
+        LogWriterUtils.getLogWriter().info("### Clearing Region. ###");
         Region region1;
         if(!"root".equals(regionName)){
           region1 = getRootRegion().getSubregion(regionName);
@@ -751,7 +755,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
           region1 = getRootRegion();
         }
         region1.clear();
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     };
     return sr;
@@ -770,7 +774,7 @@ public class QueryIndexUpdateRIDUnitTest extends CacheTestCase{
           } else {
             region = getRootRegion().getSubregion(regionName);
           }
-          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(getLogWriter()));
+          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(LogWriterUtils.getLogWriter()));
         } catch (Exception cqe) {
           AssertionError err = new AssertionError("Failed to get Region.");
           err.initCause(cqe);


[05/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/RemoteCQTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/RemoteCQTransactionDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/RemoteCQTransactionDUnitTest.java
index 42d367c..4bca2c4 100755
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/RemoteCQTransactionDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/RemoteCQTransactionDUnitTest.java
@@ -70,8 +70,12 @@ import com.gemstone.gemfire.internal.cache.execute.data.Customer;
 import com.gemstone.gemfire.internal.cache.execute.data.Order;
 import com.gemstone.gemfire.internal.cache.execute.data.OrderId;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author sbawaska
@@ -93,7 +97,7 @@ public class RemoteCQTransactionDUnitTest extends CacheTestCase {
       //TXManagerImpl mgr = getGemfireCache().getTxManager();
       //assertEquals(0, mgr.hostedTransactionsInProgressForTest());
       final TXManagerImpl mgr = getGemfireCache().getTxManager();
-      waitForCriterion(new WaitCriterion() {
+      Wait.waitForCriterion(new WaitCriterion() {
         @Override
         public boolean done() {
           return mgr.hostedTransactionsInProgressForTest() == 0;
@@ -119,13 +123,12 @@ public class RemoteCQTransactionDUnitTest extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
 //    try { Thread.sleep(5000); } catch (InterruptedException e) { } // FOR MANUAL TESTING OF STATS - DON"T KEEP THIS
     try {
-      invokeInEveryVM(verifyNoTxState);
+      Invoke.invokeInEveryVM(verifyNoTxState);
     } finally {
       closeAllCache();
-      super.tearDown2();
     }
   }
   
@@ -737,7 +740,7 @@ public class RemoteCQTransactionDUnitTest extends CacheTestCase {
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port);
         ccf.setPoolSubscriptionEnabled(true);
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<Integer, String> crf = cCache
             .createClientRegionFactory(isEmpty ? ClientRegionShortcut.PROXY

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/ha/CQListGIIDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/ha/CQListGIIDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/ha/CQListGIIDUnitTest.java
index f5f1fc1..9b8f28f 100755
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/ha/CQListGIIDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/ha/CQListGIIDUnitTest.java
@@ -57,9 +57,10 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.ClientUpdateMessageImpl;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
 import com.gemstone.gemfire.internal.cache.InternalRegionArguments;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
-
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -174,8 +175,8 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
   /**
    * Tears down the test.
    */
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     serverVM0.invoke(ConflationDUnitTest.class, "unsetIsSlowStart");
     serverVM1.invoke(ConflationDUnitTest.class, "unsetIsSlowStart");
     closeCache();
@@ -332,7 +333,7 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
   public static void createClientCache(Integer port1, Integer port2,
       Integer port3, String rLevel, Boolean addListener) throws Exception {
     CacheServerTestUtil.disableShufflingOfEndpoints();
-    String host = DistributedTestCase.getIPLiteral();
+    String host = NetworkUtils.getIPLiteral();
 
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
@@ -362,7 +363,7 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
       cache.getQueryService();
     }
     catch (Exception cqe) {
-      fail("Failed to getCQService.", cqe);
+      Assert.fail("Failed to getCQService.", cqe);
     }
 
     AttributesFactory factory = new AttributesFactory();
@@ -377,18 +378,18 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
 
   /* Register CQs */
   public static void createCQ(String cqName, String queryStr) {
-    getLogWriter().info("### Create CQ. ###" + cqName);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
     // Get CQ Service.
     QueryService cqService = null;
     try {
       cqService = cache.getQueryService();
     }
     catch (Exception cqe) {
-      fail("Failed to getCQService.", cqe);
+      Assert.fail("Failed to getCQService.", cqe);
     }
     // Create CQ Attributes.
     CqAttributesFactory cqf = new CqAttributesFactory();
-    CqListener[] cqListeners = { new CqQueryTestListener(getLogWriter()) };
+    CqListener[] cqListeners = { new CqQueryTestListener(com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()) };
     ((CqQueryTestListener)cqListeners[0]).cqName = cqName;
 
     cqf.initCqListeners(cqListeners);
@@ -400,7 +401,7 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
       assertTrue("newCq() state mismatch", cq1.getState().isStopped());
     }
     catch (Exception ex) {
-      getLogWriter().info("CqService is :" + cqService);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
       ex.printStackTrace();
       AssertionError err = new AssertionError("Failed to create CQ " + cqName
           + " . ");
@@ -410,7 +411,7 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
   }
 
   public static void executeCQ(String cqName, Boolean initialResults) {
-    getLogWriter().info("### DEBUG EXECUTE CQ START ####");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("### DEBUG EXECUTE CQ START ####");
     // Get CQ Service.
     QueryService cqService = null;
     CqQuery cq1 = null;
@@ -420,19 +421,19 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
     try {
       cq1 = cqService.getCq(cqName);
       if (cq1 == null) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Failed to get CqQuery object for CQ name: " + cqName);
-        fail("Failed to get CQ " + cqName, new Exception("Failed to get CQ "
+        Assert.fail("Failed to get CQ " + cqName, new Exception("Failed to get CQ "
             + cqName));
       }
       else {
-        getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
         assertTrue("newCq() state mismatch", cq1.getState().isStopped());
       }
     }
     catch (Exception ex) {
-      getLogWriter().info("CqService is :" + cqService);
-      getLogWriter().error(ex);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().error(ex);
       AssertionError err = new AssertionError("Failed to execute  CQ " + cqName);
       err.initCause(ex);
       throw err;
@@ -445,14 +446,14 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
         cqResults = cq1.executeWithInitialResults();
       }
       catch (Exception ex) {
-        getLogWriter().info("CqService is :" + cqService);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
         ex.printStackTrace();
         AssertionError err = new AssertionError("Failed to execute  CQ "
             + cqName);
         err.initCause(ex);
         throw err;
       }
-      getLogWriter().info("initial result size = " + cqResults.size());
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("initial result size = " + cqResults.size());
       assertTrue("executeWithInitialResults() state mismatch", cq1.getState()
           .isRunning());
       // if (expectedResultsSize >= 0) {
@@ -466,7 +467,7 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
         cq1.execute();
       }
       catch (Exception ex) {
-        getLogWriter().info("CqService is :" + cqService);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
         ex.printStackTrace();
         AssertionError err = new AssertionError("Failed to execute  CQ "
             + cqName);
@@ -483,7 +484,7 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
     try {
       region = cache.getRegion("root").getSubregion(regionName);
       region.getAttributesMutator().setCacheListener(
-          new CertifiableTestCacheListener(getLogWriter()));
+          new CertifiableTestCacheListener(com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()));
     }
     catch (Exception cqe) {
       AssertionError err = new AssertionError("Failed to get Region.");
@@ -517,12 +518,12 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
     }
     catch (Exception cqe) {
       cqe.printStackTrace();
-      fail("Failed to getCQService.", cqe);
+      Assert.fail("Failed to getCQService.", cqe);
     }
 
     CqQuery cQuery = cqService.getCq(cqName);
     if (cQuery == null) {
-      fail("Failed to get CqQuery for CQ : " + cqName, new Exception(
+      Assert.fail("Failed to get CqQuery for CQ : " + cqName, new Exception(
           "Failed to get CqQuery for CQ : " + cqName));
     }
 
@@ -569,7 +570,7 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
       r.registerInterest("ALL_KEYS");
     }
     catch (Exception ex) {
-      fail("failed in registerInterestListAll", ex);
+      Assert.fail("failed in registerInterestListAll", ex);
     }
   }
 
@@ -582,7 +583,7 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
       r.registerInterest("k5");
     }
     catch (Exception ex) {
-      fail("failed while registering keys", ex);
+      Assert.fail("failed while registering keys", ex);
     }
   }
 
@@ -593,11 +594,11 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
       for (int i = 0; i < num.longValue(); i++) {
         r.put(KEY + i, new Portfolio(i + 1));
       }
-      getLogWriter().info(
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
           "### Number of Entries in Region " + rName + ": " + r.keys().size());
     }
     catch (Exception ex) {
-      fail("failed in putEntries()", ex);
+      Assert.fail("failed in putEntries()", ex);
     }
   }
 
@@ -724,7 +725,7 @@ public class CQListGIIDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception e) {
-      fail("failed in VerifyCUMCQList()" + e, e);
+      Assert.fail("failed in VerifyCUMCQList()" + e, e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/ha/HADispatcherDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/ha/HADispatcherDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/ha/HADispatcherDUnitTest.java
index ddc907c..8920afb 100755
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/ha/HADispatcherDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/ha/HADispatcherDUnitTest.java
@@ -56,7 +56,11 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
 import com.gemstone.gemfire.internal.cache.tier.sockets.HAEventWrapper;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This Dunit test is to verify that when the dispatcher of CS dispatches the
@@ -143,12 +147,12 @@ public class HADispatcherDUnitTest extends DistributedTestCase
     client2.invoke( CacheServerTestUtil.class, "disableShufflingOfEndpoints");
     client1.invoke(HADispatcherDUnitTest.class, "createClientCache",
         new Object[] {
-            getServerHostName(Host.getHost(0)),
+            NetworkUtils.getServerHostName(Host.getHost(0)),
             new Integer(PORT1), new Integer(PORT2),
             new Boolean(false) });
     client2.invoke(HADispatcherDUnitTest.class, "createClientCache",
         new Object[] {
-            getServerHostName(Host.getHost(0)),
+            NetworkUtils.getServerHostName(Host.getHost(0)),
             new Integer(PORT1), new Integer(PORT2),
             new Boolean(true) });
     //createClientCache(new Integer(PORT1), new Integer(PORT2), new Boolean(true) );
@@ -156,16 +160,13 @@ public class HADispatcherDUnitTest extends DistributedTestCase
   }
 
   @Override
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  protected final void preTearDown() throws Exception {
     client1.invoke(HADispatcherDUnitTest.class, "closeCache");
     client2.invoke(HADispatcherDUnitTest.class, "closeCache");
     // close server
     server1.invoke(HADispatcherDUnitTest.class, "resetQRMslow");
     server1.invoke(HADispatcherDUnitTest.class, "closeCache");
     server2.invoke(HADispatcherDUnitTest.class, "closeCache");
-
   }
 
   public static void closeCache()
@@ -290,7 +291,7 @@ public class HADispatcherDUnitTest extends DistributedTestCase
                     + " for proxy " + proxy;
               }
             };
-            DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+            Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
             cache.getLogger().fine("processed a proxy");
         }
       }
@@ -412,7 +413,7 @@ public class HADispatcherDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 30 * 1000, 200, true);
       ev = new WaitCriterion() {
         public boolean done() {
           return pool.getRedundants().size() >= 1;
@@ -421,7 +422,7 @@ public class HADispatcherDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 30 * 1000, 200, true);
       
       assertNotNull(pool.getPrimary());
       assertTrue("backups="+pool.getRedundants() + " expected=" + 1,
@@ -446,7 +447,7 @@ public class HADispatcherDUnitTest extends DistributedTestCase
     
     // Create CQ Attributes.
     CqAttributesFactory cqf = new CqAttributesFactory();
-    CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};    
+    CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};    
     cqf.initCqListeners(cqListeners);
     CqAttributes cqa = cqf.create();
     
@@ -458,7 +459,7 @@ public class HADispatcherDUnitTest extends DistributedTestCase
       CqQuery cq1 = cqService.newCq(cqName, queryStr, cqa);
       cq1.execute();
     } catch (Exception ex){
-      getLogWriter().info("CQService is :" + cqService);
+      LogWriterUtils.getLogWriter().info("CQService is :" + cqService);
       ex.printStackTrace();
       AssertionError err = new AssertionError("Failed to create/execute CQ " + cqName + " . ");
       err.initCause(ex);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientToServerDeltaDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientToServerDeltaDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientToServerDeltaDUnitTest.java
index d04d249..0449a78 100755
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientToServerDeltaDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ClientToServerDeltaDUnitTest.java
@@ -52,7 +52,10 @@ import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionLocalMaxMemoryDUnitTest.TestObject1;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Test client to server flow for delta propogation
@@ -130,8 +133,8 @@ public class ClientToServerDeltaDUnitTest extends DistributedTestCase {
     client2 = host.getVM(3);
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // reset all flags
     DeltaTestImpl.resetDeltaInvokationCounters();
     server.invoke(DeltaTestImpl.class, "resetDeltaInvokationCounters");
@@ -161,11 +164,11 @@ public class ClientToServerDeltaDUnitTest extends DistributedTestCase {
             Boolean.TRUE, Boolean.FALSE, clone, enableDelta })).intValue();
 
     client.invoke(ClientToServerDeltaDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server.getHost()), new Integer(PORT1),
+        new Object[] { NetworkUtils.getServerHostName(server.getHost()), new Integer(PORT1),
             Boolean.FALSE, Boolean.FALSE, Boolean.FALSE });
 
     client2.invoke(ClientToServerDeltaDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server2.getHost()),
+        new Object[] { NetworkUtils.getServerHostName(server2.getHost()),
             new Integer(PORT2), Boolean.TRUE, Boolean.FALSE, cq, queries, RI });
   }
 
@@ -180,11 +183,11 @@ public class ClientToServerDeltaDUnitTest extends DistributedTestCase {
             Boolean.FALSE, Boolean.FALSE, clone, enableDelta })).intValue();
 
     client.invoke(ClientToServerDeltaDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server.getHost()), new Integer(PORT1),
+        new Object[] { NetworkUtils.getServerHostName(server.getHost()), new Integer(PORT1),
             Boolean.FALSE, Boolean.FALSE, Boolean.FALSE });
 
     client2.invoke(ClientToServerDeltaDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server2.getHost()),
+        new Object[] { NetworkUtils.getServerHostName(server2.getHost()),
             new Integer(PORT2), Boolean.TRUE, Boolean.FALSE, cq, queries, RI });
   }
 
@@ -713,7 +716,7 @@ public class ClientToServerDeltaDUnitTest extends DistributedTestCase {
         new Object[] { Boolean.FALSE, Boolean.FALSE });
 
     client.invoke(ClientToServerDeltaDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server.getHost()), new Integer(PORT1),
+        new Object[] { NetworkUtils.getServerHostName(server.getHost()), new Integer(PORT1),
             Boolean.FALSE, Boolean.TRUE, Boolean.FALSE });
 
 /*    server2.invoke(ClientToServerDeltaDUnitTest.class, "setFirstSecondUpdate",
@@ -746,7 +749,7 @@ public class ClientToServerDeltaDUnitTest extends DistributedTestCase {
         new Object[] { Boolean.FALSE, Boolean.FALSE });
 
     client.invoke(ClientToServerDeltaDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server.getHost()), new Integer(PORT1),
+        new Object[] { NetworkUtils.getServerHostName(server.getHost()), new Integer(PORT1),
             Boolean.FALSE, Boolean.TRUE, Boolean.FALSE });
 
     client.invoke(ClientToServerDeltaDUnitTest.class, "putDelta",
@@ -778,11 +781,11 @@ public class ClientToServerDeltaDUnitTest extends DistributedTestCase {
         new Object[] { Boolean.FALSE, Boolean.FALSE });
 
     client.invoke(ClientToServerDeltaDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server.getHost()), new Integer(PORT1),
+        new Object[] { NetworkUtils.getServerHostName(server.getHost()), new Integer(PORT1),
             Boolean.TRUE, Boolean.TRUE, Boolean.FALSE });
 
     client2.invoke(ClientToServerDeltaDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server.getHost()), new Integer(PORT1),
+        new Object[] { NetworkUtils.getServerHostName(server.getHost()), new Integer(PORT1),
             Boolean.TRUE, Boolean.FALSE, Boolean.FALSE });
     
     int deltaSent =  (Integer)server2.invoke(
@@ -809,7 +812,7 @@ public class ClientToServerDeltaDUnitTest extends DistributedTestCase {
         new Object[] { Boolean.FALSE, Boolean.FALSE });
 
     client.invoke(ClientToServerDeltaDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server.getHost()), new Integer(PORT1),
+        new Object[] { NetworkUtils.getServerHostName(server.getHost()), new Integer(PORT1),
             Boolean.FALSE, Boolean.FALSE, Boolean.FALSE});
 
     client.invoke(ClientToServerDeltaDUnitTest.class, "putDelta",
@@ -1000,7 +1003,7 @@ public class ClientToServerDeltaDUnitTest extends DistributedTestCase {
         return "Last key NOT received.";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10*1000, 100, true);
+    Wait.waitForCriterion(wc, 10*1000, 100, true);
   }
   
   static class CSDeltaTestImpl extends DeltaTestImpl {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DeltaPropagationWithCQDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DeltaPropagationWithCQDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DeltaPropagationWithCQDUnitTest.java
index 2e7efe7..8095079 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DeltaPropagationWithCQDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DeltaPropagationWithCQDUnitTest.java
@@ -54,7 +54,10 @@ import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author ashetkar
@@ -101,8 +104,8 @@ public class DeltaPropagationWithCQDUnitTest extends DistributedTestCase {
     client2 = host.getVM(3);
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     server1.invoke(DeltaPropagationWithCQDUnitTest.class, "close");
     server2.invoke(DeltaPropagationWithCQDUnitTest.class, "close");
     client1.invoke(DeltaPropagationWithCQDUnitTest.class, "close");
@@ -126,10 +129,10 @@ public class DeltaPropagationWithCQDUnitTest extends DistributedTestCase {
     // 2. setup a client
     client1
         .invoke(DeltaPropagationWithCQDUnitTest.class, "createClientCache",
-            new Object[] {getServerHostName(server1.getHost()), port,
+            new Object[] {NetworkUtils.getServerHostName(server1.getHost()), port,
                 Boolean.TRUE});
     // 3. setup another client with cqs and interest in all keys.
-    createClientCache(getServerHostName(server1.getHost()), port, true);
+    createClientCache(NetworkUtils.getServerHostName(server1.getHost()), port, true);
     registerCQs(1, "CQWithInterestDUnitTest_cq");
     // 4. put a key on client1
     client1.invoke(DeltaPropagationWithCQDUnitTest.class, "doPut", new Object[] {
@@ -147,7 +150,7 @@ public class DeltaPropagationWithCQDUnitTest extends DistributedTestCase {
             + " cqEvents and " + cqErrors + " cqErrors";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 30 * 1000, 100, true);
+    Wait.waitForCriterion(wc, 30 * 1000, 100, true);
 
     // 7. validate that client2 has the new value
     assertEquals("Latest value: ", "NEW_VALUE", cache.getRegion(regionName)
@@ -164,10 +167,10 @@ public class DeltaPropagationWithCQDUnitTest extends DistributedTestCase {
     // 2. setup a client with register interest
     client1
         .invoke(DeltaPropagationWithCQDUnitTest.class, "createClientCache",
-            new Object[] {getServerHostName(server1.getHost()), port,
+            new Object[] {NetworkUtils.getServerHostName(server1.getHost()), port,
                 Boolean.TRUE});
     // 3. setup another client with cqs but without interest.
-    createClientCache(getServerHostName(server1.getHost()), port, false/*RI*/);
+    createClientCache(NetworkUtils.getServerHostName(server1.getHost()), port, false/*RI*/);
     for (int i = 0; i < numOfCQs; i++) {
       registerCQs(numOfListeners, "Query_"+i);
     }
@@ -200,7 +203,7 @@ public class DeltaPropagationWithCQDUnitTest extends DistributedTestCase {
         return (cqEvents + cqErrors) == events;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10000, 100, true);
+    Wait.waitForCriterion(wc, 10000, 100, true);
   }
 
   public static void verifyFullValueRequestsFromClients(Long expected)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DeltaToRegionRelationCQRegistrationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DeltaToRegionRelationCQRegistrationDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DeltaToRegionRelationCQRegistrationDUnitTest.java
index ed7232f..886e671 100755
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DeltaToRegionRelationCQRegistrationDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DeltaToRegionRelationCQRegistrationDUnitTest.java
@@ -44,8 +44,11 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 /**
  * This tests the flag setting for region ( DataPolicy as Empty ) for
  * Delta propogation for a client while registering CQ
@@ -115,7 +118,7 @@ public class DeltaToRegionRelationCQRegistrationDUnitTest extends DistributedTes
   public void setUp() throws Exception
   {
     disconnectAllFromDS();
-    pause(5000);
+    Wait.pause(5000);
     final Host host = Host.getHost(0);
     server = host.getVM(0);
     client = host.getVM(1);
@@ -331,7 +334,7 @@ public class DeltaToRegionRelationCQRegistrationDUnitTest extends DistributedTes
       else  
         cq1.execute();
     } catch (Exception ex){
-      getLogWriter().info("CqService is :" + cqService);
+      LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
       ex.printStackTrace();
       AssertionError err = new AssertionError("Failed to create CQ " + cqName1 + " . ");
       err.initCause(ex);
@@ -372,7 +375,7 @@ public class DeltaToRegionRelationCQRegistrationDUnitTest extends DistributedTes
       else  
         cq1.execute();
     } catch (Exception ex){
-      getLogWriter().info("CqService is :" + cqService);
+      LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
       ex.printStackTrace();
       AssertionError err = new AssertionError("Failed to create CQ " + cqName1 + " . ");
       err.initCause(ex);
@@ -401,7 +404,7 @@ public class DeltaToRegionRelationCQRegistrationDUnitTest extends DistributedTes
         return "Wait Expired";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 5 * 1000, 100, true);
+    Wait.waitForCriterion(wc, 5 * 1000, 100, true);
     
     assertTrue(REGION_NAME1
         + " not present in cache client proxy : Delta is enable", proxy
@@ -442,7 +445,7 @@ public class DeltaToRegionRelationCQRegistrationDUnitTest extends DistributedTes
         return "Wait Expired";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 5 * 1000, 100, true);
+    Wait.waitForCriterion(wc, 5 * 1000, 100, true);
     
     assertTrue("Multiple entries for a region", proxy
         .getRegionsWithEmptyDataPolicy().size() == 2);
@@ -587,11 +590,6 @@ public class DeltaToRegionRelationCQRegistrationDUnitTest extends DistributedTes
     return new Integer(p.getPrimaryPort());
   }
   
-  public void tearDown2() throws Exception
-  {
-    // donot do any thing as we handling closing cache in test case
-  }
-
   /*
    * close cache
    */
@@ -667,7 +665,7 @@ public class DeltaToRegionRelationCQRegistrationDUnitTest extends DistributedTes
         "createServerCache")).intValue();
     client
         .invoke(DeltaToRegionRelationCQRegistrationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server.getHost()),
                 new Integer(PORT1) });
   }
   
@@ -679,7 +677,7 @@ public class DeltaToRegionRelationCQRegistrationDUnitTest extends DistributedTes
         "createServerCache")).intValue();
     client
         .invoke(DeltaToRegionRelationCQRegistrationDUnitTest.class, "createClientCacheWithNoRegion",
-            new Object[] { getServerHostName(server.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server.getHost()),
                 new Integer(PORT1) });
   }
   /*
@@ -702,8 +700,8 @@ public class DeltaToRegionRelationCQRegistrationDUnitTest extends DistributedTes
         "createServerCache")).intValue();
     primary = (Integer)client2.invoke(
         DeltaToRegionRelationCQRegistrationDUnitTest.class, "createClientCache2",
-        new Object[] { getServerHostName(server.getHost()),
-            getServerHostName(server2.getHost()), new Integer(PORT1),
+        new Object[] { NetworkUtils.getServerHostName(server.getHost()),
+            NetworkUtils.getServerHostName(server2.getHost()), new Integer(PORT1),
             new Integer(PORT2) });
   }
   
@@ -718,8 +716,8 @@ public class DeltaToRegionRelationCQRegistrationDUnitTest extends DistributedTes
         "createServerCache")).intValue();
     primary = (Integer)client2.invoke(
         DeltaToRegionRelationCQRegistrationDUnitTest.class, "createClientCache3",
-        new Object[] { getServerHostName(server.getHost()),
-            getServerHostName(server2.getHost()), new Integer(PORT1),
+        new Object[] { NetworkUtils.getServerHostName(server.getHost()),
+            NetworkUtils.getServerHostName(server2.getHost()), new Integer(PORT1),
             new Integer(PORT2) });
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientCrashDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientCrashDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientCrashDUnitTest.java
index 13afbf2..753fe67 100755
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientCrashDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientCrashDUnitTest.java
@@ -43,9 +43,9 @@ public class DurableClientCrashDUnitTest extends DurableClientTestCase {
     this.durableClientVM.invoke(CacheServerTestUtil.class, "setClientCrash", new Object[] {new Boolean(true)});    
   }
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected void preTearDownDurableClientTestCase() throws Exception {
     configureClientStop2();
-    super.tearDown2();
   }
   
   public void configureClientStop2()

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientNetDownDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientNetDownDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientNetDownDUnitTest.java
index 4dd41b8..96781f9 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientNetDownDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientNetDownDUnitTest.java
@@ -32,10 +32,9 @@ public class DurableClientNetDownDUnitTest extends DurableClientCrashDUnitTest {
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownDurableClientTestCase() throws Exception {
     //ensure that the test flag is no longer set in this vm
     this.durableClientVM.invoke(CacheServerTestUtil.class, "reconnectClient");
-    super.tearDown2();
   }
 
   public void setPrimaryRecoveryCheck() {}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientSimpleDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientSimpleDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientSimpleDUnitTest.java
index 04ce137..ed0e565 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientSimpleDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientSimpleDUnitTest.java
@@ -42,10 +42,15 @@ import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
 import com.gemstone.gemfire.internal.cache.PoolFactoryImpl;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
-import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
 
@@ -65,7 +70,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     // normally
     final String durableClientId = getName() + "_client";
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -88,7 +93,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
 
     // Start normal publisher client
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
 
     // Publish some entries
     final int numberOfEntries = 10;
@@ -147,7 +152,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     final String regionName1 = regionName + "1";
     final String regionName2 = regionName + "2";
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClients", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName1, regionName2, getClientDistributedSystemProperties(durableClientId)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName1, regionName2, getClientDistributedSystemProperties(durableClientId)});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -213,7 +218,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     // stops normally
     final String durableClientId = getName() + "_client";
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -239,7 +244,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
       public void run2() throws CacheException {
         getSystem(getClientDistributedSystemProperties(durableClientId));
         PoolFactoryImpl pf = (PoolFactoryImpl)PoolManager.createFactory();
-        pf.init(getClientPool(getServerHostName(publisherClientVM.getHost()), serverPort, true));
+        pf.init(getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), serverPort, true));
         try {
           pf.create("uncreatablePool");
           fail("Should not have been able to create the pool");
@@ -247,7 +252,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
           // expected exception
           disconnectFromDS();
         } catch (Exception e) {
-          fail("Should not have gotten here", e);
+          Assert.fail("Should not have gotten here", e);
         }
       }
     });
@@ -269,7 +274,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     
     // Start normal publisher client
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
 
     // Publish some entries
     final int numberOfEntries = 10;
@@ -325,7 +330,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     // stops normally
     final String durableClientId = getName() + "_client";
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId)});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -339,7 +344,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     VM durableClient2VM = this.publisherClientVM;
     final String durableClientId2 = getName() + "_client2";
     durableClient2VM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClient2VM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId2)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClient2VM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId2)});
     
     // Send clientReady message
     durableClient2VM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -407,7 +412,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     final int durableClientTimeout = 60; // keep the client alive for 60 seconds
     //final boolean durableClientKeepAlive = true; // keep the client alive when it stops normally
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -445,7 +450,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
 
     // Start normal publisher client
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), server1Port, server2Port, false), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), server1Port, server2Port, false), regionName});
 
     // Publish some entries
     final int numberOfEntries = 10;
@@ -514,7 +519,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     
     // Re-start the durable client
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -577,7 +582,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     final int durableClientTimeout = 60; // keep the client alive for 60 seconds
     //final boolean durableClientKeepAlive = true; // keep the client alive when it stops normally
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -603,7 +608,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     VM durableClient2VM = this.server2VM;
     final String durableClientId2 = getName() + "_client2";
     durableClient2VM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClient2VM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId2, durableClientTimeout), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClient2VM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId2, durableClientTimeout), Boolean.TRUE});
     
     // Send clientReady message
     durableClient2VM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -652,7 +657,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     
     // Start normal publisher client
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
 
     // Publish some entries
     final int numberOfEntries = 10;
@@ -778,7 +783,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
 
     // Re-start durable client 1
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -789,7 +794,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
 
     // Re-start durable client 2
     durableClient2VM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClient2VM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId2), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClient2VM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId2), Boolean.TRUE});
 
     // Send clientReady message
     durableClient2VM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -869,7 +874,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     final int durableClientTimeout = 60; // keep the client alive for 60 seconds
     //final boolean durableClientKeepAlive = true; // keep the client alive when it stops normally
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -907,7 +912,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     });
 
     // VJR: wait for ack to go out
-    pause(5000);
+    Wait.pause(5000);
 
     // Stop the durable client
     this.durableClientVM.invoke(CacheServerTestUtil.class, "closeCache", new Object[] {new Boolean(true)});
@@ -929,7 +934,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
         
     // Start normal publisher client
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), server1Port, server2Port, false), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), server1Port, server2Port, false), regionName});
 
     // Publish some entries
     final int numberOfEntries = 10;
@@ -969,7 +974,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     // Re-start the durable client that is kept alive on the server when it stops
     // normally
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -1056,7 +1061,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     final String durableClientId = getName() + "_client";
     // make the client use ClientCacheFactory so it will have a default pool
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createClientCache", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId)});
 
     // verify that readyForEvents has not yet been called on the client's default pool
     this.durableClientVM.invoke(new CacheSerializableRunnable("check readyForEvents not called") {
@@ -1187,7 +1192,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
       
       // Start normal publisher client
       this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-          new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
+          new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
   
       // Publish some entries
       final int numberOfEntries = 10;
@@ -1353,7 +1358,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
         ClientServerObserver origObserver = ClientServerObserverHolder.setInstance(new ClientServerObserverAdapter() {
           public void beforeSendingClientAck()
           {
-            getLogWriter().info("beforeSendingClientAck invoked");
+            LogWriterUtils.getLogWriter().info("beforeSendingClientAck invoked");
            
           }
         });
@@ -1430,7 +1435,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     
     // Start normal publisher client
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
 
     // Publish some entries
     final int numberOfEntries = 10;
@@ -2877,8 +2882,8 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
    */
   public void testRejectClientWhenDrainingCq() throws Exception {
     try {
-      DistributedTestCase.addExpectedException(LocalizedStrings.CacheClientNotifier_COULD_NOT_CONNECT_DUE_TO_CQ_BEING_DRAINED.toLocalizedString());
-      DistributedTestCase.addExpectedException("Could not initialize a primary queue on startup. No queue servers available.");
+      IgnoredException.addIgnoredException(LocalizedStrings.CacheClientNotifier_COULD_NOT_CONNECT_DUE_TO_CQ_BEING_DRAINED.toLocalizedString());
+      IgnoredException.addIgnoredException("Could not initialize a primary queue on startup. No queue servers available.");
       
       String greaterThan5Query = "select * from /" + regionName + " p where p.ID > 5";
       String allQuery = "select * from /" + regionName + " p where p.ID > -1";
@@ -2949,7 +2954,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
               return null;
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+          Wait.waitForCriterion(ev, 10 * 1000, 200, true);
           assertTrue(((RejectClientReconnectTestHook) CacheClientProxy.testHook).wasClientRejected());
         }
       });
@@ -3281,7 +3286,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     //final boolean durableClientKeepAlive = true; // keep the client alive when it stops normally
     final String durableClientId = getName() + "_client";
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -3346,7 +3351,7 @@ public class DurableClientSimpleDUnitTest extends DurableClientTestCase {
     // Start up the client again. This time initialize it so that it is not kept
     // alive on the servers when it stops normally.
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), server1Port, server2Port, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientTestCase.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientTestCase.java
index c3a3482..f9d73e0 100755
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientTestCase.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientTestCase.java
@@ -64,7 +64,11 @@ import com.gemstone.gemfire.internal.cache.PoolFactoryImpl;
 import com.gemstone.gemfire.internal.cache.ha.HARegionQueue;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Class <code>DurableClientTestCase</code> tests durable client
@@ -96,17 +100,24 @@ public class DurableClientTestCase extends DistributedTestCase {
     this.publisherClientVM = host.getVM(3);
     this.regionName = getName() + "_region";
     //Clients see this when the servers disconnect
-    addExpectedException("Could not find any server");
-    testName = getName();
-    System.out.println("\n\n[setup] START TEST " + getClass().getSimpleName()+"."+ testName+"\n\n");
+    IgnoredException.addIgnoredException("Could not find any server");
+    setTestMethodName(getName());
+    assertEquals(getName(), getTestMethodName());
+    System.out.println("\n\n[setup] START TEST " + getClass().getSimpleName()+"."+ getTestMethodName()+"\n\n");
   }
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
+    preTearDownDurableClientTestCase();
+    
     this.durableClientVM.invoke(CacheServerTestUtil.class, "closeCache");
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "closeCache");
     this.server1VM.invoke(CacheServerTestUtil.class, "closeCache");
     this.server2VM.invoke(CacheServerTestUtil.class, "closeCache");
   }
+  
+  protected void preTearDownDurableClientTestCase() throws Exception {
+  }
 
   /**
    * Test that starting a durable client is correctly processed by the server.
@@ -121,7 +132,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     // stops normally
     final String durableClientId = getName() + "_client";
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId)});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -177,7 +188,7 @@ public class DurableClientTestCase extends DistributedTestCase {
       final String durableClientId = getName() + "_client";
 
       this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient",
-          new Object[] { getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId), new Boolean(false), jp });
+          new Object[] { getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId), new Boolean(false), jp });
 
       // Send clientReady message
       this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -241,7 +252,7 @@ public class DurableClientTestCase extends DistributedTestCase {
   {
     printClientProxyState("Before");
     this.durableClientVM.invoke(CacheServerTestUtil.class, "closeCache",new Object[] {new Boolean(keepAlive)});
-    pause(1000);
+    Wait.pause(1000);
     printClientProxyState("after");
   }
 
@@ -310,7 +321,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     final int durableClientTimeout = 60; // keep the client alive for 60 seconds
     //final boolean durableClientKeepAlive = true; // keep the client alive when it stops normally
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout)});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -349,7 +360,7 @@ public class DurableClientTestCase extends DistributedTestCase {
 
     // Re-start the durable client
     this.restartDurableClient(new Object[] {
-        getClientPool(getServerHostName(durableClientVM.getHost()),serverPort, true),
+        getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()),serverPort, true),
         regionName,
         getClientDistributedSystemProperties(durableClientId,
             durableClientTimeout) });
@@ -393,7 +404,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     final int durableClientTimeout = 60; // keep the client alive for 60 seconds
     //final boolean durableClientKeepAlive = true; // keep the client alive when it stops normally
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout)});
 
 //    // Send clientReady message
 //    this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -442,7 +453,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     // Re-start the durable client (this is necessary so the
     //netDown test will set the appropriate system properties.
     this.restartDurableClient(new Object[] {
-        getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true),
+        getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true),
         regionName,
         getClientDistributedSystemProperties(durableClientId,
             durableClientTimeout) });
@@ -482,7 +493,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     final int durableClientTimeout = 5; // keep the client alive for 5 seconds
     //final boolean durableClientKeepAlive = true; // keep the client alive when it stops normally
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout)});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -538,7 +549,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     });
     
     this.restartDurableClient(new Object[] {
-        getClientPool(getServerHostName(Host.getHost(0)), serverPort, true),
+        getClientPool(NetworkUtils.getServerHostName(Host.getHost(0)), serverPort, true),
         regionName,
         getClientDistributedSystemProperties(durableClientId,
             durableClientTimeout) });
@@ -565,7 +576,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     final int durableClientTimeout = 120; // keep the client alive for 60 seconds
     //final boolean durableClientKeepAlive = true; // keep the client alive when it stops normally
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -588,7 +599,7 @@ public class DurableClientTestCase extends DistributedTestCase {
 
     // Start normal publisher client
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
 
     // Publish some entries
     final int numberOfEntries = 1;
@@ -632,7 +643,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         };
         //If we wait too long, the durable queue will be gone, because
         //the timeout is 120 seconds.
-        DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+        Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       }
     });
 
@@ -682,7 +693,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         };
         //If we wait too long, the durable queue will be gone, because
         //the timeout is 120 seconds.
-        DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+        Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       }
     });
 
@@ -691,7 +702,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     
     // Re-start the durable client
     this.restartDurableClient(new Object[] {
-        getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName,
+        getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName,
         getClientDistributedSystemProperties(durableClientId), Boolean.TRUE });
 
     // Verify durable client on server
@@ -736,7 +747,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     final int durableClientTimeout = 60; // keep the client alive for 60 seconds
     //final boolean durableClientKeepAlive = true; // keep the client alive when it stops normally
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId, durableClientTimeout), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -759,7 +770,7 @@ public class DurableClientTestCase extends DistributedTestCase {
 
     // Start normal publisher client
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
 
     // Publish some entries
     final int numberOfEntries = 1;
@@ -804,7 +815,7 @@ public class DurableClientTestCase extends DistributedTestCase {
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 1000, 200, true);
+        Wait.waitForCriterion(ev, 1000, 200, true);
         assertTrue(proxy.isPaused());
       }
     });
@@ -847,7 +858,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     
     // Re-start the durable client
     this.restartDurableClient(new Object[] {
-        getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName,
+        getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName,
         getClientDistributedSystemProperties(durableClientId), Boolean.TRUE });
 
     // Verify durable client on server
@@ -951,7 +962,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     // stops normally
     final String durableClientId = getName() + "_client";
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), serverPort, true), regionName, getClientDistributedSystemProperties(durableClientId), Boolean.TRUE});
 
     // Send clientReady message
     this.durableClientVM.invoke(new CacheSerializableRunnable("Send clientReady") {
@@ -1017,7 +1028,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     
     // Start a publisher
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), serverPort, false), regionName});
 
     // Publish some messages
     // Publish some entries
@@ -1072,7 +1083,7 @@ public class DurableClientTestCase extends DistributedTestCase {
   public void DISABLED_testDurableHAFailover() throws InterruptedException
   {
     //Clients see this when the servers disconnect
-    addExpectedException("Could not find any server");
+    IgnoredException.addIgnoredException("Could not find any server");
     durableFailover(1);
     durableFailoverAfterReconnect(1);
   }
@@ -1102,10 +1113,10 @@ public class DurableClientTestCase extends DistributedTestCase {
     final int durableClientTimeout = 60; // keep the client alive for 60 seconds
     Pool clientPool;
     if (redundancyLevel == 1) {
-      clientPool = getClientPool(getServerHostName(Host.getHost(0)), server1Port, server2Port, true); 
+      clientPool = getClientPool(NetworkUtils.getServerHostName(Host.getHost(0)), server1Port, server2Port, true); 
     }
     else {
-      clientPool = getClientPool(getServerHostName(Host.getHost(0)), server1Port, server2Port, true, 0);
+      clientPool = getClientPool(NetworkUtils.getServerHostName(Host.getHost(0)), server1Port, server2Port, true, 0);
     }
     
     this.durableClientVM.invoke(CacheServerTestUtil.class, "disableShufflingOfEndpoints");
@@ -1139,7 +1150,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         
     // Start normal publisher client
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), server1Port, server2Port, false), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), server1Port, server2Port, false), regionName});
 
     // Publish some entries
     final int numberOfEntries = 1;
@@ -1296,10 +1307,10 @@ public class DurableClientTestCase extends DistributedTestCase {
     final int durableClientTimeout = 60; // keep the client alive for 60 seconds
     Pool clientPool;
     if (redundancyLevel == 1) {
-      clientPool = getClientPool(getServerHostName(Host.getHost(0)), server1Port, server2Port, true); 
+      clientPool = getClientPool(NetworkUtils.getServerHostName(Host.getHost(0)), server1Port, server2Port, true); 
     }
     else {
-      clientPool = getClientPool(getServerHostName(Host.getHost(0)), server1Port, server2Port, true, 0);
+      clientPool = getClientPool(NetworkUtils.getServerHostName(Host.getHost(0)), server1Port, server2Port, true, 0);
     }
     
     this.durableClientVM.invoke(CacheServerTestUtil.class, "disableShufflingOfEndpoints");
@@ -1328,7 +1339,7 @@ public class DurableClientTestCase extends DistributedTestCase {
     
     // Start normal publisher client
     this.publisherClientVM.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(publisherClientVM.getHost()), server1Port, server2Port, false), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(publisherClientVM.getHost()), server1Port, server2Port, false), regionName});
 
     // Publish some entries
     final int numberOfEntries = 1;
@@ -1667,7 +1678,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         return getAllClientProxyState();
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 50 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 50 * 1000, 200, true);
   }
   
   protected static void checkProxyIsAlive(final CacheClientProxy proxy) {
@@ -1679,7 +1690,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 15 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 15 * 1000, 200, true);
   }
   
   protected static int getNumberOfClientProxies() {
@@ -1728,7 +1739,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         return "never received marker ack";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 3 * 60 * 1000, 200/*0*/, true);
+    Wait.waitForCriterion(ev, 3 * 60 * 1000, 200/*0*/, true);
   }
 
   /**
@@ -1783,7 +1794,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         
         // wait for primary (and interest) recovery
         // recovery satisfier task currently uses ping interval value
-        DistributedTestCase.waitForCriterion(waitForPrimaryRecovery, 30000, 1000, true);
+        Wait.waitForCriterion(waitForPrimaryRecovery, 30000, 1000, true);
       }
     });
   }
@@ -1905,7 +1916,7 @@ public class DurableClientTestCase extends DistributedTestCase {
             return "cq numHAQueuedEvents stat was expected to be " + expectedNumber + " but was instead " + cqQuery.getVsdStats().getNumHAQueuedEvents();
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 10 * 1000, 200, true);
         
         assertEquals(expectedNumber, cqQuery.getVsdStats().getNumHAQueuedEvents());
       }
@@ -1936,7 +1947,7 @@ public class DurableClientTestCase extends DistributedTestCase {
             return "queue size stat was expected to be " + expectedNumber + " but was instead " + clientProxy.getQueueSizeStat();
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 10 * 1000, 200, true);
         
         assertTrue(clientProxy.getQueueSizeStat() == expectedNumber || clientProxy.getQueueSizeStat() == remaining);
       }
@@ -2005,7 +2016,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         CacheServerTestUtil.class,
         "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(durableClientVM.getHost()),
+            getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()),
                 serverPort1, true), regionName,
             getClientDistributedSystemProperties(durableClientId, durableTimeoutInSeconds),
             Boolean.TRUE });
@@ -2016,7 +2027,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         CacheServerTestUtil.class,
         "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(durableClientVM.getHost()),
+            getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()),
                 serverPort1, true), regionName,
             getClientDistributedSystemProperties(durableClientId),
             Boolean.TRUE });
@@ -2027,7 +2038,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         CacheServerTestUtil.class,
         "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(vm.getHost()),
+            getClientPool(NetworkUtils.getServerHostName(vm.getHost()),
                 serverPort1, serverPort2, true), regionName,
             getClientDistributedSystemProperties(durableClientId),
             Boolean.TRUE });
@@ -2038,7 +2049,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         CacheServerTestUtil.class,
         "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(vm.getHost()),
+            getClientPool(NetworkUtils.getServerHostName(vm.getHost()),
                 serverPort1, false), regionName });
   }
   
@@ -2047,7 +2058,7 @@ public class DurableClientTestCase extends DistributedTestCase {
         CacheServerTestUtil.class,
         "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(vm.getHost()),
+            getClientPool(NetworkUtils.getServerHostName(vm.getHost()),
                 serverPort1, serverPort2, false), regionName });
   }
   
@@ -2077,7 +2088,7 @@ public class DurableClientTestCase extends DistributedTestCase {
             return "No primary updater";
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+        Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
         assertTrue(CacheServerTestUtil.getPool().isPrimaryUpdaterAlive());
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/management/CacheServerManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/CacheServerManagementDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/CacheServerManagementDUnitTest.java
index 58c682b..b4e589f 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/CacheServerManagementDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/CacheServerManagementDUnitTest.java
@@ -49,10 +49,14 @@ import com.gemstone.gemfire.management.internal.JmxManagerLocatorRequest;
 import com.gemstone.gemfire.management.internal.JmxManagerLocatorResponse;
 import com.gemstone.gemfire.management.internal.MBeanJMXAdapter;
 import com.gemstone.gemfire.management.internal.SystemManagementService;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Cache Server related management test cases
@@ -89,8 +93,8 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
     super.setUp();
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownLocatorTestBase() throws Exception {
     disconnectAllFromDS();
   }
 
@@ -118,7 +122,7 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
 
     final int port = server.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Create client.
     cqDUnitTest.createClient(client, port, host0);
@@ -142,7 +146,7 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
 
     // Close.
 
-    pause(2000);
+    Wait.pause(2000);
     checkNavigation(managingNode,member,serverPort);
     verifyIndex(server,serverPort);
     // This will test all CQs and will close the cq in its final step
@@ -174,7 +178,7 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
     int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     startLocatorInVM(locator, locatorPort, "");
     
-    String locators = getServerHostName(locator.getHost())+ "[" + locatorPort + "]";
+    String locators = NetworkUtils.getServerHostName(locator.getHost())+ "[" + locatorPort + "]";
     
    
     int serverPort = startBridgeServerInVM(server, null, locators);
@@ -182,7 +186,7 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
     addClientNotifListener(server,serverPort);
 
     // Start a client and make sure that proper notification is received
-    startBridgeClientInVM(client, null, getServerHostName(locator.getHost()), locatorPort);
+    startBridgeClientInVM(client, null, NetworkUtils.getServerHostName(locator.getHost()), locatorPort);
     
     //stop the client and make sure the bridge server notifies
     stopBridgeMemberVM(client);
@@ -211,7 +215,7 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
     final int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     startLocator(locator, locatorPort, "");
 
-    String locators = getServerHostName(locator.getHost())+ "[" + locatorPort + "]";
+    String locators = NetworkUtils.getServerHostName(locator.getHost())+ "[" + locatorPort + "]";
     
     //Step 2:
     int serverPort = startBridgeServerInVM(server, null, locators);
@@ -275,7 +279,7 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
         Properties props = new Properties();
         props.setProperty(DistributionConfig.MCAST_PORT_NAME, String.valueOf(0));
         props.setProperty(DistributionConfig.LOCATORS_NAME, otherLocators);
-        props.setProperty(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        props.setProperty(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
         props.setProperty(DistributionConfig.JMX_MANAGER_HTTP_PORT_NAME, "0");
         props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
         try {
@@ -283,14 +287,14 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
               + ".log");
           InetAddress bindAddr = null;
           try {
-            bindAddr = InetAddress.getByName(getServerHostName(vm.getHost()));
+            bindAddr = InetAddress.getByName(NetworkUtils.getServerHostName(vm.getHost()));
           } catch (UnknownHostException uhe) {
-            fail("While resolving bind address ", uhe);
+            Assert.fail("While resolving bind address ", uhe);
           }
           Locator locator = Locator.startLocatorAndDS(locatorPort, logFile, bindAddr, props);
           remoteObjects.put(LOCATOR_KEY, locator);
         } catch (IOException ex) {
-          fail("While starting locator on port " + locatorPort, ex);
+          Assert.fail("While starting locator on port " + locatorPort, ex);
         }
       }
     });
@@ -352,7 +356,7 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 10 * 1000, 200, true);
         assertTrue(bean.isRunning());
         TestCacheServerNotif nt = new TestCacheServerNotif();
         try {
@@ -396,7 +400,7 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
         CacheServerMXBean bean = service
             .getLocalCacheServerMXBean(serverPort);
         assertEquals(bean.getIndexCount(), 1);
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "<ExpectedString> Index is   " + bean.getIndexList()[0]
                 + "</ExpectedString> ");
         try {
@@ -461,7 +465,7 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 10 * 1000, 200, true);
         assertTrue(bean.isRunning());
         assertCacheServerConfig(bean);
 
@@ -487,7 +491,7 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
         .getMessageTimeToLive());
     assertEquals(CacheServer.DEFAULT_LOAD_POLL_INTERVAL, bean
         .getLoadPollInterval());
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "<ExpectedString> LoadProbe of the Server is  "
             + bean.fetchLoadProbe().toString() + "</ExpectedString> ");
   }
@@ -514,14 +518,14 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
 
           String clientId = bean.getClientIds()[0];
           assertNotNull(clientId);
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "<ExpectedString> ClientId of the Server is  " + clientId
                   + "</ExpectedString> ");
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "<ExpectedString> Active Query Count  "
                   + bean.getActiveCQCount() + "</ExpectedString> ");
           
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "<ExpectedString> Registered Query Count  "
                   + bean.getRegisteredCQCount() + "</ExpectedString> ");
 
@@ -563,7 +567,7 @@ public class CacheServerManagementDUnitTest extends LocatorTestBase {
     @Override
     public void handleNotification(Notification notification, Object handback) {
       assertNotNull(notification);
-      getLogWriter().info("Expected String :" + notification.toString());
+      LogWriterUtils.getLogWriter().info("Expected String :" + notification.toString());
     }
 
   }


[21/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestResultPolicyDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestResultPolicyDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestResultPolicyDUnitTest.java
index 5e74812..597da3d 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestResultPolicyDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestResultPolicyDUnitTest.java
@@ -33,9 +33,13 @@ import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.cache.client.*;
 
 /**
@@ -59,7 +63,7 @@ public class InterestResultPolicyDUnitTest extends DistributedTestCase
 
   private static final String REGION_NAME = "InterestResultPolicyDUnitTest_region" ;
 
-  private ExpectedException expectedEx;
+  private IgnoredException expectedEx;
 
   /**
    * Creates a test instance with the given name
@@ -78,7 +82,7 @@ public class InterestResultPolicyDUnitTest extends DistributedTestCase
   public void setUp() throws Exception {
     super.setUp();
     disconnectAllFromDS();
-    pause(5000);
+    Wait.pause(5000);
     final Host host = Host.getHost(0);
     vm0 = host.getVM(0);
     vm1 = host.getVM(1);
@@ -90,10 +94,9 @@ public class InterestResultPolicyDUnitTest extends DistributedTestCase
    * Closes the cache on server and client
    */
   @Override
-  public void tearDown2() throws Exception
-  {
+  protected final void preTearDown() throws Exception {
     // might get ServerConnectivityExceptions during shutdown
-    this.expectedEx = addExpectedException(ServerConnectivityException.class
+    this.expectedEx = IgnoredException.addIgnoredException(ServerConnectivityException.class
         .getName());
     // close server
     vm0.invoke(InterestResultPolicyDUnitTest.class, "closeCache");
@@ -102,8 +105,7 @@ public class InterestResultPolicyDUnitTest extends DistributedTestCase
   }
 
   @Override
-  protected void realTearDown() throws Exception {
-    super.realTearDown();
+  protected void postTearDown() throws Exception {
     if (this.expectedEx != null) {
       this.expectedEx.remove();
     }
@@ -125,7 +127,7 @@ public class InterestResultPolicyDUnitTest extends DistributedTestCase
     objArr[0] = InterestResultPolicy.NONE;
     objArr[1] = new Integer(PREPOPULATED_ENTRIES);
     vm1.invoke(InterestResultPolicyDUnitTest.class, "createClientCache", new Object[] {
-      getServerHostName(Host.getHost(0)), new Integer(PORT)});
+      NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT)});
     vm1.invoke(InterestResultPolicyDUnitTest.class, "registerInterest", objArr);
     vm1.invoke(InterestResultPolicyDUnitTest.class, "verifyResult", objArr);
     logger.fine("testPolicyNone END");
@@ -147,7 +149,7 @@ public class InterestResultPolicyDUnitTest extends DistributedTestCase
     objArr[0] = InterestResultPolicy.KEYS;
     objArr[1] = new Integer(PREPOPULATED_ENTRIES);
     vm1.invoke(InterestResultPolicyDUnitTest.class, "createClientCache", new Object[] {
-      getServerHostName(Host.getHost(0)), new Integer(PORT)});;
+      NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT)});;
     vm1.invoke(InterestResultPolicyDUnitTest.class, "registerInterest", objArr);
     vm1.invoke(InterestResultPolicyDUnitTest.class, "verifyResult", objArr);
     logger.fine("testPolicyKeys END");
@@ -169,7 +171,7 @@ public class InterestResultPolicyDUnitTest extends DistributedTestCase
     objArr[0] = InterestResultPolicy.KEYS_VALUES;
     objArr[1] = new Integer(PREPOPULATED_ENTRIES);
     vm1.invoke(InterestResultPolicyDUnitTest.class, "createClientCache", new Object[] {
-      getServerHostName(Host.getHost(0)), new Integer(PORT)});
+      NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT)});
     vm1.invoke(InterestResultPolicyDUnitTest.class, "registerInterest", objArr);
     vm1.invoke(InterestResultPolicyDUnitTest.class, "verifyResult", objArr);
     logger.fine("testPolicyKeyValues END");
@@ -194,7 +196,7 @@ public class InterestResultPolicyDUnitTest extends DistributedTestCase
     /* registering for 5 extra keys */
     objArr[1] = new Integer(PREPOPULATED_ENTRIES + 5);
     vm1.invoke(InterestResultPolicyDUnitTest.class, "createClientCache", new Object[] {
-      getServerHostName(Host.getHost(0)), new Integer(PORT)});
+      NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT)});
     vm1.invoke(InterestResultPolicyDUnitTest.class, "registerInterest", objArr);
     vm1.invoke(InterestResultPolicyDUnitTest.class, "verifyResult", objArr);
     Integer cnt = (Integer)vm0.invoke(InterestResultPolicyDUnitTest.class,
@@ -334,7 +336,7 @@ public class InterestResultPolicyDUnitTest extends DistributedTestCase
       region1.registerInterest(keylist, policy);
     }
     catch (CacheWriterException e) {
-      fail("failed to register interestlist for the client", e);
+      Assert.fail("failed to register interestlist for the client", e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/NewRegionAttributesDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/NewRegionAttributesDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/NewRegionAttributesDUnitTest.java
index 74fde05..97bef89 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/NewRegionAttributesDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/NewRegionAttributesDUnitTest.java
@@ -32,6 +32,7 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * This tests that basic entry operations work properly when regions are
@@ -73,7 +74,7 @@ public class NewRegionAttributesDUnitTest extends DistributedTestCase
   public void setUp() throws Exception {
     super.setUp();
     disconnectAllFromDS();
-    pause(5000);
+    Wait.pause(5000);
     final Host host = Host.getHost(0);
     vm0 = host.getVM(0);
     vm1 = host.getVM(1);
@@ -98,8 +99,8 @@ public class NewRegionAttributesDUnitTest extends DistributedTestCase
    * @throws Exception
    *           thrown if any problem occurs while closing the cache
    */
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     vm0.invoke(NewRegionAttributesDUnitTest.class, "closeCache");
     vm1.invoke(NewRegionAttributesDUnitTest.class, "closeCache");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart1DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart1DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart1DUnitTest.java
index 879d878..704a82a 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart1DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart1DUnitTest.java
@@ -16,8 +16,12 @@
  */
 package com.gemstone.gemfire.internal.cache.tier.sockets;
 
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests Redundancy Level Functionality
@@ -46,7 +50,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
         + ") never became " + expected;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
   }
   
   /*
@@ -56,7 +60,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
   public void testRedundancyNotSpecifiedNonPrimaryServerFail()
   {    
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 0);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 0);
       verifyOrderOfEndpoints();
       server2.invoke(RedundancyLevelTestBase.class, "stopServer");
       //pause(5000);      
@@ -74,10 +78,10 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
           return "pool still contains " + SERVER3;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 30 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 30 * 1000, 1000, true);
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test RedundancyNotSpecifiedNonPrimaryServerFail ",
           ex);
     }
@@ -104,7 +108,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
       //Asif: Increased the socket read timeout to 3000 sec becoz the registering 
       // of keys was timing out sometimes causing fail over to EP4 cozing 
       // below assertion to fail
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 0, 3000, 100);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 0, 3000, 100);
       assertTrue(pool.getPrimaryName().equals(SERVER1));
       verifyOrderOfEndpoints();
       server0.invoke(RedundancyLevelTestBase.class, "stopServer");
@@ -123,12 +127,12 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
           return "pool still contains " + SERVER1;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 30 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 30 * 1000, 1000, true);
       assertFalse(pool.getPrimaryName().equals(SERVER1));
       assertEquals(SERVER2, pool.getPrimaryName());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test RedundancyNotSpecifiedPrimaryServerFails ",
           ex);
     }/*finally {
@@ -145,7 +149,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedNonFailoverEPFails()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1);
       waitConnectedServers(4);
       assertEquals(1, pool.getRedundantNames().size());
       //assertTrue(pool.getRedundantNames().contains(SERVER1));      
@@ -166,7 +170,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedNonFailoverEPFails ",
           ex);
     }
@@ -184,7 +188,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
     try {
       
       FailOverDetectionByCCU = true;
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1, 250, 500);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1, 250, 500);
       waitConnectedServers(4);
       assertEquals(1, pool.getRedundantNames().size());
       // assertTrue(pool.getRedundantNames()
@@ -206,7 +210,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedNonFailoverEPFailsDetectionByCCU ",
           ex);
     }
@@ -221,7 +225,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
   public void _testRedundancySpecifiedNonFailoverEPFailsDetectionByRegisterInterest()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,250, 500);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,250, 500);
       waitConnectedServers(4);
       assertEquals(1, pool.getRedundantNames().size());
       // assertTrue(pool.getRedundantNames()
@@ -245,7 +249,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedNonFailoverEPFailsDetectionByRegisterInterest ",
           ex);
     }
@@ -261,7 +265,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
   public void _testRedundancySpecifiedNonFailoverEPFailsDetectionByUnregisterInterest()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,250,500);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,250,500);
       waitConnectedServers(4);
       assertEquals(1, pool.getRedundantNames().size());
       // assertTrue(pool.getRedundantNames()
@@ -284,7 +288,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedNonFailoverEPFailsDetectionByUnregisterInterest ",
           ex);
     }    
@@ -299,7 +303,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedNonFailoverEPFailsDetectionByPut()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,500,1000);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,500,1000);
       waitConnectedServers(4);
       assertEquals(1, pool.getRedundantNames().size());
       // assertTrue(pool.getRedundantNames()
@@ -322,7 +326,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
      // assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedNonFailoverEPFailsDetectionByPut ",
           ex);
     }
@@ -339,7 +343,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedNonPrimaryEPFails()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1);
       waitConnectedServers(4);
       assertEquals(1, pool.getRedundantNames().size());
       assertTrue(pool.getPrimaryName().equals(SERVER1));
@@ -361,7 +365,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedNonFailoverEPFails ",
           ex);
     }
@@ -380,7 +384,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
     try {
       
       FailOverDetectionByCCU = true;
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1, 250, 500);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1, 250, 500);
       waitConnectedServers(4);
       assertEquals(1, pool.getRedundantNames().size());
       assertTrue(pool.getPrimaryName().equals(SERVER1));
@@ -402,7 +406,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedNonPrimaryEPFailsDetectionByCCU ",
           ex);
     }
@@ -419,7 +423,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedNonPrimaryEPFailsDetectionByRegisterInterest()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,250, 500);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,250, 500);
       waitConnectedServers(4);
       assertEquals(1, pool.getRedundantNames().size());
       assertTrue(pool.getPrimaryName().equals(SERVER1));
@@ -443,7 +447,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedNonPrimaryEPFailsDetectionByRegisterInterest ",
           ex);
     }
@@ -460,7 +464,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedNonPrimaryEPFailsDetectionByUnregisterInterest()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,250,500);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,250,500);
       waitConnectedServers(4);
       assertEquals(1, pool.getRedundantNames().size());
       assertTrue(pool.getPrimaryName().equals(SERVER1));
@@ -483,7 +487,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedNonPrimaryEPFailsDetectionByUnregisterInterest ",
           ex);
     }
@@ -500,7 +504,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedNonPrimaryEPFailsDetectionByPut()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,250,500);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,250,500);
       waitConnectedServers(4);
       assertEquals(1, pool.getRedundantNames().size());
       assertTrue(pool.getPrimaryName().equals(SERVER1));
@@ -527,7 +531,7 @@ public class RedundancyLevelPart1DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedNonPrimaryEPFailsDetectionByPut ",
           ex);
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart2DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart2DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart2DUnitTest.java
index 1c83206..6f31d3b 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart2DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart2DUnitTest.java
@@ -16,8 +16,12 @@
  */
 package com.gemstone.gemfire.internal.cache.tier.sockets;
 
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
 {
@@ -42,7 +46,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
         + ") never became " + expected;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 2 * 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 2 * 60 * 1000, 1000, true);
   }
   
   /*
@@ -58,7 +62,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedPrimaryEPFails()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1);
       waitConnectedServers(4);
       assertTrue(pool.getPrimaryName().equals(SERVER1));
       assertTrue(pool.getRedundantNames().contains(SERVER2));
@@ -79,7 +83,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedPrimaryEPFails ",
           ex);
     }
@@ -100,7 +104,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
     try {
       
       FailOverDetectionByCCU = true;
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,3000,100);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,3000,100);
       waitConnectedServers(4);
       assertTrue(pool.getPrimaryName().equals(SERVER1));
       assertTrue(pool.getRedundantNames().contains(SERVER2));
@@ -120,7 +124,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedPrimaryEPFailsDetectionByCCU ",
           ex);
     }
@@ -139,7 +143,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedPrimaryEPFailsDetectionByRegisterInterest()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,3000, 100);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,3000, 100);
       waitConnectedServers(4);
       assertTrue(pool.getPrimaryName().equals(SERVER1));
       assertTrue(pool.getRedundantNames().contains(SERVER2));
@@ -161,7 +165,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedPrimaryEPFailsDetectionByRegisterInterest ",
           ex);
     }
@@ -180,7 +184,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedPrimaryEPFailsDetectionByUnregisterInterest()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,3000,100);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,3000,100);
       waitConnectedServers(4);
       assertTrue(pool.getPrimaryName().equals(SERVER1));
       assertTrue(pool.getRedundantNames().contains(SERVER2));
@@ -201,7 +205,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedPrimaryEPFailsDetectionByUnregisterInterest ",
           ex);
     }
@@ -220,7 +224,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedPrimaryEPFailsDetectionByPut()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,3000, 100);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1,3000, 100);
       waitConnectedServers(4);
       assertTrue(pool.getPrimaryName().equals(SERVER1));
       assertTrue(pool.getRedundantNames().contains(SERVER2));
@@ -241,7 +245,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
       //assertEquals(1, proxy.getDeadServers().size());
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedPrimaryEPFailsDetectionByPut ",
           ex);
     }
@@ -256,7 +260,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedPrimarySecondaryEPFails()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1);
       waitConnectedServers(4);
       assertEquals(1, pool.getRedundantNames().size());
       assertTrue(pool.getPrimaryName().equals(SERVER1));
@@ -279,7 +283,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
           "verifyInterestRegistration");
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedPrimarySecondaryEPFails ",
           ex);
     }
@@ -297,7 +301,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedEPFails()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 2);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 2);
       waitConnectedServers(4);
       assertEquals(2, pool.getRedundantNames().size());
       assertTrue(pool.getPrimaryName().equals(SERVER1));
@@ -331,14 +335,14 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
       // not to the active server as redundancy level is satisfied.
       server2.invoke(RedundancyLevelTestBase.class, "startServer");
       //pause(10000);
-      pause(1000);
+      Wait.pause(1000);
       verifyOrderOfEndpoints();
       //assertEquals(3, pool.getRedundantNames().size());
       //assertEquals(4, pool.getConnectedServerCount());
       server2.invoke(RedundancyLevelTestBase.class, "verifyNoCCP");
     }
     catch (Exception ex) {
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedEPFails ",
           ex);
     }
@@ -358,7 +362,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
       // make sure that the client connects to only two servers and
       // redundancyLevel
       // unsatisfied with one
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 2);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 2);
       // let the client connect to servers
       //pause(10000);      
       verifyLiveAndRedundantServers(2, 1);
@@ -385,7 +389,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
       // verify that redundancy level is satisfied
       server1.invoke(RedundancyLevelTestBase.class, "startServer");
       //pause(10000);
-      pause(1000);
+      Wait.pause(1000);
       verifyOrderOfEndpoints();
       //assertEquals(3, pool.getRedundantNames().size());
       //assertEquals(4, pool.getConnectedServerCount());      
@@ -397,7 +401,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
 
     }
     catch (Exception ex) {
-      fail("test failed due to exception in test noRedundancyLevelServerFail ",
+      Assert.fail("test failed due to exception in test noRedundancyLevelServerFail ",
           ex);
     }
 
@@ -413,7 +417,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
     try {
       // TODO: Yogesh
       server1.invoke(RedundancyLevelTestBase.class, "stopServer");
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 2);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 2);
       // let the client connect to servers
       //pause(10000);      
       verifyLiveAndRedundantServers(3, 2);
@@ -426,7 +430,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
       assertFalse(pool.getRedundantNames().contains(SERVER2));
       // start server
       server1.invoke(RedundancyLevelTestBase.class, "startServer");
-      pause(1000);
+      Wait.pause(1000);
       verifyOrderOfEndpoints();
       //assertEquals(3, pool.getRedundantNames().size());
       //assertEquals(4, pool.getConnectedServerCount());      
@@ -438,7 +442,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
 
     }
     catch (Exception ex) {
-      fail("test failed due to exception in test noRedundancyLevelServerFail ",
+      Assert.fail("test failed due to exception in test noRedundancyLevelServerFail ",
           ex);
     }
   }
@@ -453,7 +457,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
     try {
       // TODO: Yogesh
       server2.invoke(RedundancyLevelTestBase.class, "stopServer");
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, -1/* not specified */);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, -1/* not specified */);
       // let the client connect to servers
       //pause(10000);
       verifyLiveAndRedundantServers(3, 2);
@@ -482,7 +486,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
           "verifyInterestRegistration");
     }
     catch (Exception ex) {
-      fail("test failed due to exception in test noRedundancyLevelServerFail ",
+      Assert.fail("test failed due to exception in test noRedundancyLevelServerFail ",
           ex);
     }
   }
@@ -518,7 +522,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
   public void testRedundancySpecifiedMoreThanEPs()
   {
     try {
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 5);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 5);
       assertEquals(3, pool.getRedundantNames().size());
       server0.invoke(RedundancyLevelTestBase.class, "verifyCCP");
       server1.invoke(RedundancyLevelTestBase.class, "verifyCCP");
@@ -527,7 +531,7 @@ public class RedundancyLevelPart2DUnitTest extends RedundancyLevelTestBase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedMoreThanEPs ",
           ex);
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart3DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart3DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart3DUnitTest.java
index c1266e7..ac5d8b7 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart3DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelPart3DUnitTest.java
@@ -18,8 +18,10 @@ package com.gemstone.gemfire.internal.cache.tier.sockets;
 
 import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 
 /**
@@ -50,7 +52,7 @@ public class RedundancyLevelPart3DUnitTest extends RedundancyLevelTestBase
   {
     try {
       CacheServerTestUtil.disableShufflingOfEndpoints();
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 3);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 3);
       createEntriesK1andK2();
       registerK1AndK2();
       assertEquals(3, pool.getRedundantNames().size());
@@ -136,7 +138,7 @@ public class RedundancyLevelPart3DUnitTest extends RedundancyLevelTestBase
     }
      catch (Exception ex) {
         ex.printStackTrace();
-        fail(
+        Assert.fail(
             "test failed due to exception in test testRedundancySpecifiedMoreThanEPs ",
             ex);
      }
@@ -154,7 +156,7 @@ public class RedundancyLevelPart3DUnitTest extends RedundancyLevelTestBase
   {
     try {
       CacheServerTestUtil.disableShufflingOfEndpoints();
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 0);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 0);
       createEntriesK1andK2();
       registerK1AndK2();
       assertEquals(0, pool.getRedundantNames().size());
@@ -188,7 +190,7 @@ public class RedundancyLevelPart3DUnitTest extends RedundancyLevelTestBase
     }
      catch (Exception ex) {
         ex.printStackTrace();
-        fail(
+        Assert.fail(
             "test failed due to exception in test testRedundancySpecifiedMoreThanEPs ",
             ex);
      }
@@ -206,7 +208,7 @@ public class RedundancyLevelPart3DUnitTest extends RedundancyLevelTestBase
     try {
 //      long maxWaitTime = 60000;
       CacheServerTestUtil.disableShufflingOfEndpoints();
-      createClientCache(getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1);
+      createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), PORT1, PORT2, PORT3, PORT4, 1);
       createEntriesK1andK2();
       registerK1AndK2();
       assertEquals(1, pool.getRedundantNames().size());
@@ -237,7 +239,7 @@ public class RedundancyLevelPart3DUnitTest extends RedundancyLevelTestBase
      }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail(
+      Assert.fail(
           "test failed due to exception in test testRedundancySpecifiedMoreThanEPs ",
           ex);
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelTestBase.java
index 7f95f8b..03ed215 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RedundancyLevelTestBase.java
@@ -40,9 +40,14 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.ClientServerObserver;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 
 /**
@@ -107,7 +112,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
     server2 = host.getVM(2);
     server3 = host.getVM(3);
 
-    addExpectedException("java.net.SocketException||java.net.ConnectException");
+    IgnoredException.addIgnoredException("java.net.SocketException||java.net.ConnectException");
 
     // start servers first
     PORT1 = ((Integer)server0.invoke(RedundancyLevelTestBase.class,
@@ -119,7 +124,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
     PORT4 = ((Integer)server3.invoke(RedundancyLevelTestBase.class,
         "createServerCache")).intValue();
 
-    String hostName = getServerHostName(Host.getHost(0));
+    String hostName = NetworkUtils.getServerHostName(Host.getHost(0));
     SERVER1 = hostName + PORT1;
     SERVER2 = hostName + PORT2;
     SERVER3 = hostName + PORT3;
@@ -169,7 +174,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
 
       CacheServerImpl bs = (CacheServerImpl)cache.getCacheServers()
           .iterator().next();
@@ -186,7 +191,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
 
 
       Iterator iter_prox = ccn.getClientProxies().iterator();
@@ -204,13 +209,13 @@ public class RedundancyLevelTestBase extends DistributedTestCase
             return excuse;
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+        Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
         // assertTrue("Dispatcher on primary should be alive",   proxy._messageDispatcher.isAlive());
       }
 
     }
     catch (Exception ex) {
-      fail("while setting verifyDispatcherIsAlive  ", ex);
+      Assert.fail("while setting verifyDispatcherIsAlive  ", ex);
     }
   }
 
@@ -228,7 +233,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
 
       CacheServerImpl bs = (CacheServerImpl)cache.getCacheServers()
           .iterator().next();
@@ -245,7 +250,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
 
       Iterator iter_prox = ccn.getClientProxies().iterator();
       if (iter_prox.hasNext()) {
@@ -256,7 +261,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("while setting verifyDispatcherIsNotAlive  ", ex);
+      Assert.fail("while setting verifyDispatcherIsNotAlive  ", ex);
     }
   }
   
@@ -271,7 +276,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
         + pool.getRedundantNames() + ") does not contain " + server;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 2000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 2000, true);
   }
 
   public static void verifyLiveAndRedundantServers(final int liveServers,
@@ -289,7 +294,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
             + ") to become " + redundantServers;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 120 * 1000, 2 * 1000, true);
+    Wait.waitForCriterion(wc, 120 * 1000, 2 * 1000, true);
   }
   
   public static void verifyDeadServers(int deadServers )
@@ -326,7 +331,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
       assertEquals(r1.getEntry(k2).getValue(), k2);
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -342,7 +347,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.registerK1AndK2()", ex);
+      Assert.fail("failed while region.registerK1AndK2()", ex);
     }
   }
   public static void unregisterInterest()
@@ -352,7 +357,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
       r.unregisterInterest("k1");      
     }
     catch (Exception e) {
-      fail("test failed due to ", e);
+      Assert.fail("test failed due to ", e);
     }
   }
   
@@ -382,7 +387,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
       
       CacheServerImpl bs = (CacheServerImpl)cache.getCacheServers()
           .iterator().next();
@@ -401,10 +406,10 @@ public class RedundancyLevelTestBase extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
     }
     catch (Exception ex) {
-      fail("exception in verifyCCP()", ex);
+      Assert.fail("exception in verifyCCP()", ex);
     }
   }  
 
@@ -420,7 +425,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
               + cache.getCacheServers().size() + ") never became 1";
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 180 * 1000, 2000, true);
+      Wait.waitForCriterion(wc, 180 * 1000, 2000, true);
 
       CacheServerImpl bs = (CacheServerImpl)cache.getCacheServers()
           .iterator().next();
@@ -436,7 +441,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
           return "Notifier's proxies is empty";
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 180 * 1000, 2000, true);
+      Wait.waitForCriterion(wc, 180 * 1000, 2000, true);
 
       Iterator iter_prox = ccn.getClientProxies().iterator();
 
@@ -462,7 +467,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
             return excuse;
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 180 * 1000, 2 * 1000, true);
+        Wait.waitForCriterion(wc, 180 * 1000, 2 * 1000, true);
         
         Set keysMap = ccp.cils[RegisterInterestTracker.interestListIndex]
           .getProfile(Region.SEPARATOR + REGION_NAME)
@@ -489,7 +494,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
       }
     }
     catch (Exception e) {
-      fail("failed while stopServer()", e);
+      Assert.fail("failed while stopServer()", e);
     }
   }
 
@@ -503,7 +508,7 @@ public class RedundancyLevelTestBase extends DistributedTestCase
       bs.start();
     }
     catch (Exception ex) {
-      fail("while startServer()  ", ex);
+      Assert.fail("while startServer()  ", ex);
     }
   }
 
@@ -619,11 +624,9 @@ public class RedundancyLevelTestBase extends DistributedTestCase
 //     }
   }
 
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     try {
-      super.tearDown2();
-    
       if(!FailOverDetectionByCCU)
         ClientServerObserverHolder.setInstance(oldBo);   
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegionCloseDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegionCloseDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegionCloseDUnitTest.java
index 88b684c..a6ee5ba 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegionCloseDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegionCloseDUnitTest.java
@@ -32,9 +32,13 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.cache.client.*;
 
 /**
@@ -75,7 +79,7 @@ public class RegionCloseDUnitTest extends DistributedTestCase
 
     PORT1 =  ((Integer)server1.invoke(RegionCloseDUnitTest.class, "createServerCache" )).intValue();
     client1.invoke(RegionCloseDUnitTest.class, "createClientCache", new Object[] {
-      getServerHostName(host), new Integer(PORT1)});
+      NetworkUtils.getServerHostName(host), new Integer(PORT1)});
 
   }
 
@@ -157,7 +161,7 @@ public class RegionCloseDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 15 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 15 * 1000, 200, true);
     assertEquals(1, bs.getAcceptor().getCacheClientNotifier().getClientProxies().size());
 
     Iterator iter = bs.getAcceptor().getCacheClientNotifier().getClientProxies().iterator();
@@ -181,7 +185,7 @@ public class RegionCloseDUnitTest extends DistributedTestCase
       pool.destroy();
     }
     catch (Exception ex) {
-      fail("failed while region close", ex);
+      Assert.fail("failed while region close", ex);
     }
   }
 
@@ -196,7 +200,7 @@ public class RegionCloseDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 40 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 40 * 1000, 200, true);
 
     final CacheServerImpl bs = (CacheServerImpl)c.getCacheServers().iterator()
         .next();
@@ -208,7 +212,7 @@ public class RegionCloseDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 40 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 40 * 1000, 200, true);
     
     ev = new WaitCriterion() {
       public boolean done() {
@@ -218,7 +222,7 @@ public class RegionCloseDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 40 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 40 * 1000, 200, true);
     // assertNull(c.getRegion("/"+clientMembershipId));
     assertEquals(0, bs.getAcceptor().getCacheClientNotifier()
         .getClientProxies().size());
@@ -232,13 +236,11 @@ public class RegionCloseDUnitTest extends DistributedTestCase
     }
   }
 
-  public void tearDown2() throws Exception
-  {
-	super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     //close client
     client1.invoke(RegionCloseDUnitTest.class, "closeCache");
     //close server
     server1.invoke(RegionCloseDUnitTest.class, "closeCache");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegisterInterestBeforeRegionCreationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegisterInterestBeforeRegionCreationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegisterInterestBeforeRegionCreationDUnitTest.java
index 6d71b43..fd50d6e 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegisterInterestBeforeRegionCreationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegisterInterestBeforeRegionCreationDUnitTest.java
@@ -33,7 +33,10 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.internal.cache.CacheObserverAdapter;
 import com.gemstone.gemfire.internal.cache.CacheObserverHolder;
 import com.gemstone.gemfire.cache.client.*;
@@ -102,9 +105,8 @@ public class RegisterInterestBeforeRegionCreationDUnitTest extends DistributedTe
    * close the cache on all the vms
    * @throws Exception
    */
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     client1.invoke(RegisterInterestBeforeRegionCreationDUnitTest.class, "closeCache");
     client2.invoke(RegisterInterestBeforeRegionCreationDUnitTest.class, "closeCache");
     server1.invoke(RegisterInterestBeforeRegionCreationDUnitTest.class, "closeCache");
@@ -168,7 +170,7 @@ public class RegisterInterestBeforeRegionCreationDUnitTest extends DistributedTe
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 5 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 5 * 1000, 200, true);
       }
     };
     return putFromServer;
@@ -204,11 +206,11 @@ public class RegisterInterestBeforeRegionCreationDUnitTest extends DistributedTe
     
     //client1 connected to server1
     client1.invoke(RegisterInterestBeforeRegionCreationDUnitTest.class, "createClient",
-        new Object[] { getServerHostName(server1.getHost()), new Integer(PORT1) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
     
     //client2 connected to server2
     client2.invoke(RegisterInterestBeforeRegionCreationDUnitTest.class, "createClient",
-        new Object[] { getServerHostName(server1.getHost()), new Integer(PORT2) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT2) });
   }
 
   public static Integer createServer(Boolean createRegion) throws Exception

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegisterInterestKeysDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegisterInterestKeysDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegisterInterestKeysDUnitTest.java
index 691f98d..203a742 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegisterInterestKeysDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/RegisterInterestKeysDUnitTest.java
@@ -23,9 +23,13 @@ import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.cache.client.*;
 
 /**
@@ -65,7 +69,7 @@ public class RegisterInterestKeysDUnitTest extends DistributedTestCase
   public void setUp() throws Exception {
     super.setUp();
     disconnectAllFromDS();
-    pause(5000);
+    Wait.pause(5000);
 
     final Host host = Host.getHost(0);
     //Server1 VM
@@ -85,15 +89,15 @@ public class RegisterInterestKeysDUnitTest extends DistributedTestCase
       host.getVM(i).invoke(getClass(), "createImpl", null);
     }
 
-    getLogWriter().info("implementation class is " + impl.getClass());
+    LogWriterUtils.getLogWriter().info("implementation class is " + impl.getClass());
 
     PORT1 =  ((Integer)server1.invoke(impl.getClass(), "createServerCache" )).intValue();
     PORT2 =  ((Integer)server2.invoke(impl.getClass(), "createServerCache" )).intValue();
     
     client1.invoke(impl.getClass(), "createClientCache", new Object[] { 
-      getServerHostName(server1.getHost()), new Integer(PORT1),new Integer(PORT2)});
+      NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1),new Integer(PORT2)});
     client2.invoke(impl.getClass(), "createClientCache", new Object[] {
-      getServerHostName(server1.getHost()), new Integer(PORT1),new Integer(PORT2)});
+      NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1),new Integer(PORT2)});
 
   }
 
@@ -136,7 +140,7 @@ public class RegisterInterestKeysDUnitTest extends DistributedTestCase
       assertEquals(r1.getEntry("key1").getValue(), "key-1");
     }
     catch (Exception ex) {
-      fail("failed while createEntriesK1()", ex);
+      Assert.fail("failed while createEntriesK1()", ex);
     }
   }
 
@@ -220,7 +224,7 @@ public class RegisterInterestKeysDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while registering interest", ex);
+      Assert.fail("failed while registering interest", ex);
     }
   }
 
@@ -232,15 +236,13 @@ public class RegisterInterestKeysDUnitTest extends DistributedTestCase
     }
   }
 
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     //close client
     client1.invoke(impl.getClass(), "closeCache");
     client2.invoke(impl.getClass(), "closeCache");
     //close server
     server1.invoke(impl.getClass(), "closeCache");
     server2.invoke(impl.getClass(), "closeCache");
-
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ReliableMessagingDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ReliableMessagingDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ReliableMessagingDUnitTest.java
index ffab9b9..ece92d7 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ReliableMessagingDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ReliableMessagingDUnitTest.java
@@ -40,9 +40,15 @@ import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
 import com.gemstone.gemfire.internal.cache.ha.HAHelper;
 import com.gemstone.gemfire.internal.cache.ha.HARegionQueue;
 import com.gemstone.gemfire.internal.cache.ha.ThreadIdentifier;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * 
@@ -107,15 +113,15 @@ public class ReliableMessagingDUnitTest extends DistributedTestCase
    * QRM to other redundant servers.    
    */
   public void testPeriodicAckSendByClientPrimaryFailover() throws Exception {    
-    addExpectedException("java.net.ConnectException");
+    IgnoredException.addIgnoredException("java.net.ConnectException");
     createEntries();
     setClientServerObserverForBeforeSendingClientAck();    
     server1.invoke(ReliableMessagingDUnitTest.class, "putOnServer");
-    getLogWriter().info("Entering waitForServerUpdate");
+    LogWriterUtils.getLogWriter().info("Entering waitForServerUpdate");
     waitForServerUpdate();    
-    getLogWriter().info("Entering waitForCallback");
+    LogWriterUtils.getLogWriter().info("Entering waitForCallback");
     waitForCallback();
-    getLogWriter().info("Entering waitForClientAck");
+    LogWriterUtils.getLogWriter().info("Entering waitForClientAck");
     waitForClientAck();
     server2.invoke(ReliableMessagingDUnitTest.class, "checkTidAndSeq");
   }
@@ -145,7 +151,7 @@ public class ReliableMessagingDUnitTest extends DistributedTestCase
           (System.currentTimeMillis() - start) < maxWaitTime);
       sleep(1000);
     }
-    getLogWriter().info("seo = " + seo);
+    LogWriterUtils.getLogWriter().info("seo = " + seo);
     assertTrue("Creation time " + creationTime + " supposed to be same as seo " 
         + seo.getCreationTime(), creationTime == seo.getCreationTime());
   }
@@ -163,7 +169,7 @@ public class ReliableMessagingDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 10 * 1000, 200, true);
     Map.Entry entry;
     synchronized (map) {
       Iterator iter = map.entrySet().iterator();
@@ -174,7 +180,7 @@ public class ReliableMessagingDUnitTest extends DistributedTestCase
           .getValue();
       assertFalse(seo.getAckSend());
       creationTime = seo.getCreationTime();
-      getLogWriter().info("seo is " + seo.toString());
+      LogWriterUtils.getLogWriter().info("seo is " + seo.toString());
       assertTrue("Creation time not set", creationTime != 0);
       
       Object args[] =
@@ -235,7 +241,7 @@ public class ReliableMessagingDUnitTest extends DistributedTestCase
       Thread.sleep(ms);
     }
     catch (InterruptedException e) {
-      fail("Interrupted", e);
+      Assert.fail("Interrupted", e);
     }
   }
   
@@ -264,7 +270,7 @@ public class ReliableMessagingDUnitTest extends DistributedTestCase
       }
     }
     catch (Exception e) {
-      fail("failed while stopServer()", e);
+      Assert.fail("failed while stopServer()", e);
     }
   }
   
@@ -293,13 +299,13 @@ public class ReliableMessagingDUnitTest extends DistributedTestCase
     origObserver = ClientServerObserverHolder.setInstance(new ClientServerObserverAdapter() {
       public void beforeSendingClientAck()
       {
-        getLogWriter().info("beforeSendingClientAck invoked");
+        LogWriterUtils.getLogWriter().info("beforeSendingClientAck invoked");
         setCreationTimeTidAndSeq();   
         server1.invoke(ReliableMessagingDUnitTest.class, "stopServer");
         checkServerCount(1,1);
         server2.invoke(ReliableMessagingDUnitTest.class, "checkEmptyDispatchedMsgs");        
         PoolImpl.BEFORE_SENDING_CLIENT_ACK_CALLBACK_FLAG = false;       
-        getLogWriter().info("end of beforeSendingClientAck");
+        LogWriterUtils.getLogWriter().info("end of beforeSendingClientAck");
             }
     });
   }
@@ -370,7 +376,7 @@ public class ReliableMessagingDUnitTest extends DistributedTestCase
     server.setPort(port);
     server.setNotifyBySubscription(true);
     server.start();
-    getLogWriter().info("Server started at PORT = " + port);
+    LogWriterUtils.getLogWriter().info("Server started at PORT = " + port);
 
     return new Integer(server.getPort());
   }
@@ -382,7 +388,7 @@ public class ReliableMessagingDUnitTest extends DistributedTestCase
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "");
     cache = test.createCache(props);
-    String host = getServerHostName(Host.getHost(0));
+    String host = NetworkUtils.getServerHostName(Host.getHost(0));
     PoolImpl p = (PoolImpl)PoolManager.createFactory()
       .addServer(host, PORT1)
       .addServer(host, PORT2)
@@ -408,10 +414,9 @@ public class ReliableMessagingDUnitTest extends DistributedTestCase
     pool = p;
   }
 
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     creationTime = 0;
-    super.tearDown2();   
     closeCache();
     server1.invoke(ReliableMessagingDUnitTest.class, "closeCache");
     server2.invoke(ReliableMessagingDUnitTest.class, "closeCache");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/UnregisterInterestDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/UnregisterInterestDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/UnregisterInterestDUnitTest.java
index c4ebead..dedd915 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/UnregisterInterestDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/UnregisterInterestDUnitTest.java
@@ -37,9 +37,13 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.FilterProfile;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author ashetkar
@@ -78,7 +82,8 @@ public class UnregisterInterestDUnitTest extends DistributedTestCase {
     client2.invoke(UnregisterInterestDUnitTest.class, "createClientCache", new Object[]{client2.getHost(), port});
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
     server0.invoke(UnregisterInterestDUnitTest.class, "closeCache");
     client1.invoke(UnregisterInterestDUnitTest.class, "closeCache");
@@ -214,7 +219,7 @@ public class UnregisterInterestDUnitTest extends DistributedTestCase {
       checkFilters(value, valueInv);
       break;
     default:
-      fail("Invalid interest type: " + interestType, new IllegalArgumentException("Invalid interest type: " + interestType));
+      Assert.fail("Invalid interest type: " + interestType, new IllegalArgumentException("Invalid interest type: " + interestType));
     }
   }
 
@@ -258,7 +263,7 @@ public class UnregisterInterestDUnitTest extends DistributedTestCase {
     case filter:
       break;
     default:
-      fail("Invalid interest type: " + interestType, new IllegalArgumentException("Invalid interest type: " + interestType));
+      Assert.fail("Invalid interest type: " + interestType, new IllegalArgumentException("Invalid interest type: " + interestType));
     }
   }
 
@@ -302,14 +307,14 @@ public class UnregisterInterestDUnitTest extends DistributedTestCase {
         return "Expected to receive " + inv + " invalidates but received " + pool.getInvalidateCount();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10000, 100, true);
+    Wait.waitForCriterion(wc, 10000, 100, true);
   }
 
   public static Integer createCacheAndStartServer() throws Exception {
     DistributedSystem ds = new UnregisterInterestDUnitTest("UnregisterInterestDUnitTest").getSystem();
     ds.disconnect();
     Properties props = new Properties();
-    props.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+    props.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     CacheFactory cf = new CacheFactory(props);
     cache = cf.create();
     RegionFactory rf = ((GemFireCacheImpl)cache).createRegionFactory(RegionShortcut.REPLICATE);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/UpdatePropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/UpdatePropagationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/UpdatePropagationDUnitTest.java
index c4dca37..0c60c46 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/UpdatePropagationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/UpdatePropagationDUnitTest.java
@@ -44,9 +44,14 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.ServerLocation;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.EventID;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  *
@@ -123,12 +128,12 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
     PORT2 =  ((Integer)server2.invoke(getClass(), "createServerCache" )).intValue();
 
     client1.invoke(getClass(), "createClientCache", new Object[] {
-      getServerHostName(server1.getHost()), new Integer(PORT1),new Integer(PORT2)});
+      NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1),new Integer(PORT2)});
     client2.invoke(getClass(), "createClientCache", new Object[] {
-      getServerHostName(server1.getHost()), new Integer(PORT1),new Integer(PORT2)});
+      NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1),new Integer(PORT2)});
     
-    addExpectedException("java.net.SocketException");
-    addExpectedException("Unexpected IOException");
+    IgnoredException.addIgnoredException("java.net.SocketException");
+    IgnoredException.addIgnoredException("Unexpected IOException");
 
   }
   
@@ -191,7 +196,7 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, maxWaitTime, 200, true);
+        Wait.waitForCriterion(ev, maxWaitTime, 200, true);
       }
     });
 
@@ -216,7 +221,7 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, maxWaitTime, 200, true);
+        Wait.waitForCriterion(ev, maxWaitTime, 200, true);
       }
     });
 
@@ -244,7 +249,7 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, maxWaitTime, 200, true);
+        Wait.waitForCriterion(ev, maxWaitTime, 200, true);
       }
     });
 
@@ -252,7 +257,7 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
     // Client1 should not receive updated value while client2 should receive
     client1.invoke(impl.getClass(),
         "acquireConnectionsAndPutonK1andK2",
-        new Object[] { getServerHostName(client1.getHost())});
+        new Object[] { NetworkUtils.getServerHostName(client1.getHost())});
     //pause(5000);
     //Check if both the puts ( on key1 & key2 ) have reached the servers
     server1.invoke(impl.getClass(), "verifyUpdates");
@@ -301,7 +306,7 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, maxWaitTime, 200, true);
+        Wait.waitForCriterion(ev, maxWaitTime, 200, true);
       }
     });
     client2.invoke(new CacheSerializableRunnable("Wait for server on port1 to be dead") {
@@ -325,7 +330,7 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, maxWaitTime, 200, true);
+        Wait.waitForCriterion(ev, maxWaitTime, 200, true);
       }
     });
 
@@ -348,7 +353,7 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, maxWaitTime, 200, true);
+        Wait.waitForCriterion(ev, maxWaitTime, 200, true);
       }
     });
 
@@ -368,18 +373,18 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, maxWaitTime, 200, true);
+        Wait.waitForCriterion(ev, maxWaitTime, 200, true);
       }
     });
     
-    pause(5000);
+    Wait.pause(5000);
 
     //Do a put on Server1 via Connection object from client1.
     // Client1 should not receive updated value while client2 should receive
     client1.invoke(impl.getClass(),
         "acquireConnectionsAndPutonK1andK2",
-        new Object[] { getServerHostName(client1.getHost())});
-    pause(5000);
+        new Object[] { NetworkUtils.getServerHostName(client1.getHost())});
+    Wait.pause(5000);
     //Check if both the puts ( on key1 & key2 ) have reached the servers
     server1.invoke(impl.getClass(), "verifyUpdates");
     server2.invoke(impl.getClass(), "verifyUpdates");
@@ -465,7 +470,7 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
       }
     }
     catch (Exception ex) {
-      fail("failed while createEntriesK1andK2()", ex);
+      Assert.fail("failed while createEntriesK1andK2()", ex);
     }
   }
 
@@ -537,7 +542,7 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while registering interest", ex);
+      Assert.fail("failed while registering interest", ex);
     }
   }
 
@@ -551,7 +556,7 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
       assertEquals("key-2", r.getEntry("key2").getValue());
     }
     catch (Exception ex) {
-      fail("failed while verifyNoUpdates()", ex);
+      Assert.fail("failed while verifyNoUpdates()", ex);
     }
   }
 
@@ -571,7 +576,7 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
       }
     }
     catch (Exception ex) {
-      fail("failed while region", ex);
+      Assert.fail("failed while region", ex);
     }
   }
 
@@ -583,17 +588,15 @@ public class UpdatePropagationDUnitTest extends DistributedTestCase
     }
   }
 
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     //close client
     client1.invoke(getClass(), "closeCache");
     client2.invoke(getClass(), "closeCache");
     //close server
     server1.invoke(getClass(), "closeCache");
     server2.invoke(getClass(), "closeCache");
-
   }
-
 }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/VerifyEventIDGenerationInP2PDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/VerifyEventIDGenerationInP2PDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/VerifyEventIDGenerationInP2PDUnitTest.java
index 16fb199..83df124 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/VerifyEventIDGenerationInP2PDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/VerifyEventIDGenerationInP2PDUnitTest.java
@@ -30,6 +30,7 @@ import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EventID;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -148,7 +149,7 @@ public class VerifyEventIDGenerationInP2PDUnitTest extends DistributedTestCase
       assertEquals(r.getEntry("key-1").getValue(), "key-1");
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -160,7 +161,7 @@ public class VerifyEventIDGenerationInP2PDUnitTest extends DistributedTestCase
       r.get("key-1");
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
 
@@ -171,9 +172,8 @@ public class VerifyEventIDGenerationInP2PDUnitTest extends DistributedTestCase
     return new Boolean(temp);
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
     vm0.invoke(VerifyEventIDGenerationInP2PDUnitTest.class, "closeCache");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/VerifyUpdatesFromNonInterestEndPointDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/VerifyUpdatesFromNonInterestEndPointDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/VerifyUpdatesFromNonInterestEndPointDUnitTest.java
index d1c3da7..c2a0016 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/VerifyUpdatesFromNonInterestEndPointDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/VerifyUpdatesFromNonInterestEndPointDUnitTest.java
@@ -33,9 +33,12 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.cache.client.*;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 import com.gemstone.gemfire.cache.client.internal.Connection;
@@ -73,7 +76,7 @@ public class VerifyUpdatesFromNonInterestEndPointDUnitTest extends DistributedTe
   public void setUp() throws Exception {
     super.setUp();
     disconnectAllFromDS();
-    pause(5000);
+    Wait.pause(5000);
     final Host host = Host.getHost(0);
     vm0 = host.getVM(0);
     vm1 = host.getVM(1);
@@ -84,7 +87,7 @@ public class VerifyUpdatesFromNonInterestEndPointDUnitTest extends DistributedTe
     PORT2 =  ((Integer)vm1.invoke(VerifyUpdatesFromNonInterestEndPointDUnitTest.class, "createServerCache" )).intValue();
 
     vm2.invoke(VerifyUpdatesFromNonInterestEndPointDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(vm0.getHost()), new Integer(PORT1),new Integer(PORT2)});
+        new Object[] { NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1),new Integer(PORT2)});
 
 
   }
@@ -110,7 +113,7 @@ public class VerifyUpdatesFromNonInterestEndPointDUnitTest extends DistributedTe
     vm2.invoke(VerifyUpdatesFromNonInterestEndPointDUnitTest.class, "registerKey");
 
     vm2.invoke(VerifyUpdatesFromNonInterestEndPointDUnitTest.class, "acquireConnectionsAndPut", new Object[] { new Integer(PORT2)});
-    pause(30000);
+    Wait.pause(30000);
     vm2.invoke(VerifyUpdatesFromNonInterestEndPointDUnitTest.class, "verifyPut");
   }
 
@@ -155,7 +158,7 @@ public class VerifyUpdatesFromNonInterestEndPointDUnitTest extends DistributedTe
       assertEquals(r1.getEntry("key-2").getValue(), "key-2");
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -217,7 +220,7 @@ public class VerifyUpdatesFromNonInterestEndPointDUnitTest extends DistributedTe
 
     }
     catch (Exception ex) {
-      fail("failed while registerKey()", ex);
+      Assert.fail("failed while registerKey()", ex);
     }
   }
 
@@ -231,7 +234,7 @@ public class VerifyUpdatesFromNonInterestEndPointDUnitTest extends DistributedTe
       assertEquals("key-2", r.getEntry("key-2").getValue());
     }
     catch (Exception ex) {
-      fail("failed while verifyPut()", ex);
+      Assert.fail("failed while verifyPut()", ex);
     }
   }
 
@@ -243,14 +246,12 @@ public class VerifyUpdatesFromNonInterestEndPointDUnitTest extends DistributedTe
     }
   }
 
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     //close client
     vm2.invoke(VerifyUpdatesFromNonInterestEndPointDUnitTest.class, "closeCache");
     //close server
     vm0.invoke(VerifyUpdatesFromNonInterestEndPointDUnitTest.class, "closeCache");
     vm1.invoke(VerifyUpdatesFromNonInterestEndPointDUnitTest.class, "closeCache");
-
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/versions/RegionVersionVectorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/versions/RegionVersionVectorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/versions/RegionVersionVectorJUnitTest.java
index 0efea5a..df85998 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/versions/RegionVersionVectorJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/versions/RegionVersionVectorJUnitTest.java
@@ -35,7 +35,7 @@ import com.gemstone.gemfire.internal.HeapDataOutputStream;
 import com.gemstone.gemfire.internal.InternalDataSerializer;
 import com.gemstone.gemfire.internal.Version;
 import com.gemstone.gemfire.internal.cache.persistence.DiskStoreID;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
 @Category(UnitTest.class)
@@ -56,7 +56,7 @@ public class RegionVersionVectorJUnitTest extends TestCase {
   public void testRegionVersionVectors() throws Exception {
     // this is just a quick set of unit tests for basic RVV functionality
     
-    final String local = DistributedTestCase.getIPLiteral();
+    final String local = NetworkUtils.getIPLiteral();
     InternalDistributedMember server1 = new InternalDistributedMember(local, 101);
     InternalDistributedMember server2 = new InternalDistributedMember(local, 102);
     InternalDistributedMember server3 = new InternalDistributedMember(local, 103);


[29/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionDUnitTest.java
index 72780f1..be5b082 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionDUnitTest.java
@@ -56,9 +56,13 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServerTestBase {
   private static final String TEST_FUNCTION7 = TestFunction.TEST_FUNCTION7;
@@ -211,7 +215,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
     isByName = new Boolean(true);
     toRegister = new Boolean(true);
 
-    final ExpectedException ex = addExpectedException("did not send last result");
+    final IgnoredException ex = IgnoredException.addIgnoredException("did not send last result");
     client.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
         "serverSingleKeyExecution_NoLastResult", new Object[] { isByName,
             toRegister });
@@ -485,7 +489,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       if (!(ex.getCause() instanceof ServerConnectivityException)
           && !(ex.getCause() instanceof FunctionInvocationTargetException)) {
         ex.printStackTrace();
-        getLogWriter().info("Exception : ", ex);
+        LogWriterUtils.getLogWriter().info("Exception : ", ex);
         fail("Test failed after the execute operation");
       }
     }
@@ -521,8 +525,8 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       assertEquals(1, ((List)rs.getResult()).size());
     } catch (Exception ex) {
         ex.printStackTrace();
-        getLogWriter().info("Exception : ", ex);
-        fail("Test failed after the execute operation", ex);
+        LogWriterUtils.getLogWriter().info("Exception : ", ex);
+        Assert.fail("Test failed after the execute operation", ex);
     }
   }
   
@@ -556,7 +560,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       assertEquals(1, ((List)rs.getResult()).size());
     } catch (Exception ex) {
         ex.printStackTrace();
-        getLogWriter().info("Exception : ", ex);
+        LogWriterUtils.getLogWriter().info("Exception : ", ex);
         fail("Test failed after the execute operation");
     }
   }
@@ -668,7 +672,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       String excuse;
       public boolean done() {
         int sz = pool.getConnectedServerCount();
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Checking for the Live Servers : Expected  : " + expectedLiveServers
                 + " Available :" + sz);
         if (sz == expectedLiveServers.intValue()) {
@@ -681,7 +685,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
   }
   
   public static void executeFunction() throws ServerException,
@@ -701,7 +705,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       ResultCollector rc1 = dataSet.withFilter(testKeysSet).withArgs(Boolean.TRUE).execute(
           function.getId());
       List l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       assertEquals(3, l.size());
 
       for (Iterator i = l.iterator(); i.hasNext();) {
@@ -709,7 +713,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       }
     }
     catch (Exception e) {
-      getLogWriter().info("Got an exception : " + e.getMessage());
+      LogWriterUtils.getLogWriter().info("Got an exception : " + e.getMessage());
       assertTrue(e instanceof EOFException || e instanceof SocketException
           || e instanceof SocketTimeoutException
           || e instanceof ServerException || e instanceof IOException
@@ -730,7 +734,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
     ResultCollector rc1 = dataSet.withFilter(testKeysSet).withArgs(Boolean.TRUE).execute(
         function.getId());
     List l = ((List)rc1.getResult());
-    getLogWriter().info("Result size : " + l.size());
+    LogWriterUtils.getLogWriter().info("Result size : " + l.size());
     return l;
   }
   
@@ -778,7 +782,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
     PartitionedRegion region = (PartitionedRegion)cache.getRegion(PartitionedRegionName);
     HashMap localBucket2RegionMap = (HashMap)region
     .getDataStore().getSizeLocally();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
     "Size of the " + PartitionedRegionName + " in this VM :- "
         + localBucket2RegionMap.size());
     Set entrySet = localBucket2RegionMap.entrySet();
@@ -821,7 +825,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
             .iterator().next());
       }
       catch (Exception expected) {
-        getLogWriter().info("Exception : " + expected.getMessage());
+        LogWriterUtils.getLogWriter().info("Exception : " + expected.getMessage());
         expected.printStackTrace();
         fail("Test failed after the put operation");
       }
@@ -850,7 +854,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       List l = null;
       ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE,  function, isByName);
       l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       assertEquals(3, l.size());
       for (Iterator i = l.iterator(); i.hasNext();) {
         assertEquals(Boolean.TRUE, i.next());
@@ -872,7 +876,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       assertEquals(origVals, foundVals);
       
     }catch(Exception e){
-      fail("Test failed after the put operation", e);
+      Assert.fail("Test failed after the put operation", e);
       
     }
   }
@@ -902,7 +906,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE,
           function, isByName);
       l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       assertEquals(3, l.size());
       for (Iterator i = l.iterator(); i.hasNext();) {
         assertTrue(i.next() instanceof MyFunctionExecutionException);
@@ -987,14 +991,14 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE,
           function, isByName);
       l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       assertEquals(3, l.size());
       for (Iterator i = l.iterator(); i.hasNext();) {
         assertEquals(Boolean.TRUE, i.next());
       }
 
     }catch(Exception e){
-      fail("Test failed after the put operation", e);
+      Assert.fail("Test failed after the put operation", e);
       
     }
   }
@@ -1023,8 +1027,8 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
 
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
-      fail("Test failed after the put operation",ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
+      Assert.fail("Test failed after the put operation",ex);
     }
   }
   
@@ -1067,13 +1071,13 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
         }
       });
       l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       assertEquals(3, l.size());
       for (Iterator i = l.iterator(); i.hasNext();) {
         assertEquals(Boolean.TRUE, i.next());
       }
     }catch(Exception e){
-      getLogWriter().info("Exception : " + e.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception : " + e.getMessage());
       e.printStackTrace();
       fail("Test failed after the put operation");
       
@@ -1127,7 +1131,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
     }
     catch (Throwable e) {
       e.printStackTrace();
-      fail("This is not expected Exception", e);
+      Assert.fail("This is not expected Exception", e);
     }
 
   }
@@ -1163,7 +1167,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
           .toLocalizedString("return any"))));
     }
     catch (Exception notexpected) {
-      fail("Test failed during execute or sleeping", notexpected);
+      Assert.fail("Test failed during execute or sleeping", notexpected);
     } finally {
       cache.getLogger().info("<ExpectedException action=remove>" +
           "FunctionException" +
@@ -1245,8 +1249,8 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
 
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
-      fail("Test failed after the put operation",ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
+      Assert.fail("Test failed after the put operation",ex);
     }
   }
   
@@ -1300,8 +1304,8 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
-      fail("Test failed after the put operation", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
+      Assert.fail("Test failed after the put operation", ex);
     }
     
     try {
@@ -1320,8 +1324,8 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
-      fail("Test failed after the put operation", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
+      Assert.fail("Test failed after the put operation", ex);
     }
   }
   
@@ -1382,8 +1386,8 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
 
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
-      fail("Test failed after the put operation",ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
+      Assert.fail("Test failed after the put operation",ex);
     }
     
     Region region2 = cache.getRegion(PartitionedRegionName+"2");
@@ -1454,7 +1458,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("This is not expected Exception", ex);
+      Assert.fail("This is not expected Exception", ex);
     }
   }
   
@@ -1494,7 +1498,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       });
     }
     catch (Exception expected) {
-      getLogWriter().fine("Exception occured : " + expected.getMessage());
+      LogWriterUtils.getLogWriter().fine("Exception occured : " + expected.getMessage());
       assertTrue(expected.getMessage().contains(
           "No target node found for KEY = " + testKey)
           || expected.getMessage()
@@ -1556,8 +1560,8 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
 
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
-      fail("Test failed after the put operation",ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
+      Assert.fail("Test failed after the put operation",ex);
     }
   }
   
@@ -1612,7 +1616,7 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
       });
     }
     catch (Exception expected) {
-      getLogWriter().fine("Exception occured : " + expected.getMessage());
+      LogWriterUtils.getLogWriter().fine("Exception occured : " + expected.getMessage());
       assertTrue(expected.getCause().getMessage().contains(
           "Could not create an instance of  com.gemstone.gemfire.internal.cache.execute.PRClientServerRegionFunctionExecutionDUnitTest$UnDeserializable"));
     } finally {
@@ -1633,11 +1637,6 @@ public class PRClientServerRegionFunctionExecutionDUnitTest extends PRClientServ
     }
   }
   
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   /**
    * Attempt to do a client server function execution with an arg that fail deserialization
    * on the server. The client should see an exception instead of a hang if bug 43430 is fixed.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionFailoverDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionFailoverDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionFailoverDUnitTest.java
index 5ee1472..3615ce4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionFailoverDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionFailoverDUnitTest.java
@@ -44,10 +44,15 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
@@ -64,15 +69,10 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    addExpectedException("Connection reset");
-    addExpectedException("SocketTimeoutException");
-    addExpectedException("ServerConnectivityException");
-    addExpectedException("Socket Closed");
-  }
-  
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("SocketTimeoutException");
+    IgnoredException.addIgnoredException("ServerConnectivityException");
+    IgnoredException.addIgnoredException("Socket Closed");
   }
   
   public void testserverMultiKeyExecution_SocektTimeOut() {
@@ -90,7 +90,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
    */
   public void testServerFailoverWithTwoServerAliveHA()
       throws InterruptedException {
-    addExpectedException("FunctionInvocationTargetException");
+    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
     ArrayList commonAttributes = createCommonServerAttributes(
         "TestPartitionedRegion", null, 1, 13, null);
     createClientServerScenarion(commonAttributes, 20, 20, 20);
@@ -116,9 +116,9 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
     client.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
         "verifyDeadAndLiveServers", new Object[] { new Integer(1),
             new Integer(2) });
-    DistributedTestCase.join(async[0], 6 * 60 * 1000, getLogWriter());
+    ThreadUtils.join(async[0], 6 * 60 * 1000);
     if (async[0].getException() != null) {
-      fail("UnExpected Exception Occured : ", async[0].getException());
+      Assert.fail("UnExpected Exception Occured : ", async[0].getException());
     }
     List l = (List)async[0].getReturnValue();
     assertEquals(2, l.size());
@@ -130,7 +130,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
    */
   public void testServerCacheClosedFailoverWithTwoServerAliveHA()
       throws InterruptedException {
-    addExpectedException("FunctionInvocationTargetException");
+    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
     ArrayList commonAttributes = createCommonServerAttributes(
         "TestPartitionedRegion", null, 1, 13, null);
     createClientServerScenarion(commonAttributes, 20, 20, 20);
@@ -156,9 +156,9 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
     client.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
         "verifyDeadAndLiveServers", new Object[] { new Integer(1),
             new Integer(2) });
-    DistributedTestCase.join(async[0], 5 * 60 * 1000, getLogWriter());
+    ThreadUtils.join(async[0], 5 * 60 * 1000);
     if (async[0].getException() != null) {
-      fail("UnExpected Exception Occured : ", async[0].getException());
+      Assert.fail("UnExpected Exception Occured : ", async[0].getException());
     }
     List l = (List)async[0].getReturnValue();
     assertEquals(2, l.size());
@@ -180,7 +180,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
 
   public void testOnRegionFailoverWithTwoServerDownHA()
       throws InterruptedException {
-    addExpectedException("FunctionInvocationTargetException");
+    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
     createScenario();
 
     server1.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
@@ -194,7 +194,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
 
     client.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
         "createProxyRegion",
-        new Object[] { getServerHostName(server1.getHost()) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()) });
 
     Function function = new TestFunction(true,
         TestFunction.TEST_FUNCTION_HA_REGION);
@@ -211,7 +211,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
   // retry attempts is 2
   public void testOnRegionFailoverWithOneServerDownHA()
       throws InterruptedException {
-    addExpectedException("FunctionInvocationTargetException");
+    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
     createScenario();
 
     server1.invokeAsync(PRClientServerRegionFunctionExecutionDUnitTest.class,
@@ -225,7 +225,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
 
     client.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
         "createProxyRegion",
-        new Object[] { getServerHostName(server1.getHost()) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()) });
 
     Function function = new TestFunction(true,
         TestFunction.TEST_FUNCTION_HA_REGION);
@@ -245,7 +245,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
    */
   public void testOnRegionFailoverNonHA() throws InterruptedException { // See #47489 before enabling it
     createScenario();
-    addExpectedException("FunctionInvocationTargetException");
+    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
     server1.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
         "createReplicatedRegion");
 
@@ -257,7 +257,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
 
     client.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
         "createProxyRegion",
-        new Object[] { getServerHostName(server1.getHost()) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()) });
 
     Function function = new TestFunction(true,
         TestFunction.TEST_FUNCTION_NONHA_REGION);
@@ -290,7 +290,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
 
     client.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
         "createProxyRegion",
-        new Object[] { getServerHostName(server1.getHost()) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()) });
 
     //Make sure the buckets are created.
     client.invoke(new SerializableRunnable() {
@@ -341,7 +341,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
 
   public void testServerBucketMovedException() throws InterruptedException {
 
-    addExpectedException("BucketMovedException");
+    IgnoredException.addIgnoredException("BucketMovedException");
     final Host host = Host.getHost(0);
     VM server1 = host.getVM(0);
     VM server2 = host.getVM(1);
@@ -355,7 +355,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
 
     final int portLocator = AvailablePort
         .getRandomAvailablePort(AvailablePort.SOCKET);
-    final String hostLocator = getServerHostName(server1.getHost());
+    final String hostLocator = NetworkUtils.getServerHostName(server1.getHost());
     final String locator = hostLocator + "[" + portLocator + "]";
 
     startLocatorInVM(portLocator);
@@ -400,7 +400,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
 
   public void testServerBucketMovedException_LocalServer()
       throws InterruptedException {
-    addExpectedException("BucketMovedException");
+    IgnoredException.addIgnoredException("BucketMovedException");
 
     final Host host = Host.getHost(0);
     VM server1 = host.getVM(0);
@@ -413,7 +413,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
 
     final int portLocator = AvailablePort
         .getRandomAvailablePort(AvailablePort.SOCKET);
-    final String hostLocator = getServerHostName(server1.getHost());
+    final String hostLocator = NetworkUtils.getServerHostName(server1.getHost());
     final String locator = hostLocator + "[" + portLocator + "]";
 
     startLocatorInVM(portLocator);
@@ -460,14 +460,14 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
     File logFile = new File("locator-" + locatorPort + ".log");
 
     Properties props = new Properties();
-    props = getAllDistributedSystemProperties(props);
+    props = DistributedTestUtils.getAllDistributedSystemProperties(props);
     props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
     
     try {
       locator = Locator.startLocatorAndDS(locatorPort, logFile, null, props);
     }
     catch (IOException e) {
-      fail("Unable to start locator ", e);
+      Assert.fail("Unable to start locator ", e);
     }
   }
   
@@ -492,7 +492,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
     PartitionAttributesFactory paf = new PartitionAttributesFactory();
     if (isAccessor) {
@@ -505,7 +505,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
     attr.setPartitionAttributes(paf.create());
     region = cache.createRegion(regionName, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + regionName + " created Successfully :"
             + region.toString());
     return port;
@@ -538,7 +538,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
     RegionAttributes attrs = factory.create();
     region = cache.createRegion(regionName, attrs);
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region " + regionName + " created Successfully :"
             + region.toString());
   }
@@ -547,7 +547,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
     for(int i = 0 ; i < 113; i++){
       region.put(i, "KB_"+i);
     }
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region " + regionName + " Have size :"
             + region.size());
   }
@@ -556,7 +556,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
     Execution execute = FunctionService.onRegion(region);
     ResultCollector rc = execute.withArgs(Boolean.TRUE).execute(
         new TestFunction(true, TestFunction.TEST_FUNCTION_LASTRESULT));
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Exeuction Result :"
             + rc.getResult());
     List l = ((List)rc.getResult());
@@ -564,7 +564,7 @@ public class PRClientServerRegionFunctionExecutionFailoverDUnitTest extends
   }
   
   public static void checkSize(){
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + regionName + " Have size :"
             + region.size());
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest.java
index 0344a55..f1d354b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest.java
@@ -47,8 +47,13 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
     PRClientServerTestBase {
@@ -329,7 +334,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
    */
   public void testServerFailoverWithTwoServerAliveHA()
       throws InterruptedException {
-    addExpectedException("FunctionInvocationTargetException");
+    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
     ArrayList commonAttributes = createCommonServerAttributes(
         "TestPartitionedRegion", null, 1, 13, null);
     createClientServerScenarion(commonAttributes, 20, 20, 20);
@@ -362,9 +367,9 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
     client.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
         "verifyDeadAndLiveServers", new Object[] { new Integer(1),
             new Integer(2) });
-    DistributedTestCase.join(async[0], 6 * 60 * 1000, getLogWriter());
+    ThreadUtils.join(async[0], 6 * 60 * 1000);
     if (async[0].getException() != null) {
-      fail("UnExpected Exception Occured : ", async[0].getException());
+      Assert.fail("UnExpected Exception Occured : ", async[0].getException());
     }
     List l = (List)async[0].getReturnValue();
 
@@ -377,7 +382,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
    */
   public void testServerCacheClosedFailoverWithTwoServerAliveHA()
       throws InterruptedException {
-    addExpectedException("FunctionInvocationTargetException");
+    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
     ArrayList commonAttributes = createCommonServerAttributes(
         "TestPartitionedRegion", null, 1, 13, null);
     createClientServerScenarion(commonAttributes, 20, 20, 20);
@@ -410,9 +415,9 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
         PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest.class,
         "verifyDeadAndLiveServers", new Object[] { new Integer(1),
             new Integer(2) });
-    DistributedTestCase.join(async[0], 5 * 60 * 1000, getLogWriter());
+    ThreadUtils.join(async[0], 5 * 60 * 1000);
     if (async[0].getException() != null) {
-      fail("UnExpected Exception Occured : ", async[0].getException());
+      Assert.fail("UnExpected Exception Occured : ", async[0].getException());
     }
     List l = (List)async[0].getReturnValue();
     assertEquals(2, l.size());
@@ -515,7 +520,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
 
       public boolean done() {
         int sz = pool.getConnectedServerCount();
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Checking for the Live Servers : Expected  : "
                 + expectedLiveServers + " Available :" + sz);
         if (sz == expectedLiveServers.intValue()) {
@@ -530,7 +535,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
   }
 
   public static void executeFunction() throws ServerException,
@@ -569,7 +574,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
       }
     }
     catch (Exception e) {
-      getLogWriter().info("Got an exception : " + e.getMessage());
+      LogWriterUtils.getLogWriter().info("Got an exception : " + e.getMessage());
       assertTrue(e instanceof EOFException || e instanceof SocketException
           || e instanceof SocketTimeoutException
           || e instanceof ServerException || e instanceof IOException
@@ -590,7 +595,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
     ResultCollector rc1 = dataSet.withFilter(testKeysSet)
         .withArgs(Boolean.TRUE).execute(function.getId());
     List l = ((List)rc1.getResult());
-    getLogWriter().info("Result size : " + l.size());
+    LogWriterUtils.getLogWriter().info("Result size : " + l.size());
     return l;
   }
 
@@ -627,7 +632,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
         .getRegion(PartitionedRegionName);
     HashMap localBucket2RegionMap = (HashMap)region.getDataStore()
         .getSizeLocally();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of the " + PartitionedRegionName + " in this VM :- "
             + localBucket2RegionMap.size());
     Set entrySet = localBucket2RegionMap.entrySet();
@@ -656,8 +661,8 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
       ResultCollector rc1 = executeOnAll(dataSet, Boolean.TRUE, function,
           isByName);
       List resultList = (List)((List)rc1.getResult());
-      getLogWriter().info("Result size : " + resultList.size());
-      getLogWriter().info("Result are SSSS : " + resultList);
+      LogWriterUtils.getLogWriter().info("Result size : " + resultList.size());
+      LogWriterUtils.getLogWriter().info("Result are SSSS : " + resultList);
       assertEquals(3, resultList.size());
 
       Iterator resultIterator = resultList.iterator();
@@ -692,7 +697,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
 
     }
     catch (Exception e) {
-      fail("Test failed after the put operation", e);
+      Assert.fail("Test failed after the put operation", e);
 
     }
   }
@@ -716,13 +721,13 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
       }
       Map resultMap = region.getAll(testKeysList);
       assertTrue(resultMap.equals(origVals));
-      pause(2000);
+      Wait.pause(2000);
       Map secondResultMap = region.getAll(testKeysList);
       assertTrue(secondResultMap.equals(origVals));
 
     }
     catch (Exception e) {
-      fail("Test failed after the put operation", e);
+      Assert.fail("Test failed after the put operation", e);
 
     }
   }
@@ -746,13 +751,13 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
       }
       Map resultMap = region.getAll(testKeysList);
       assertTrue(resultMap.equals(origVals));
-      pause(2000);
+      Wait.pause(2000);
       Map secondResultMap = region.getAll(testKeysList);
       assertTrue(secondResultMap.equals(origVals));
 
     }
     catch (Exception e) {
-      fail("Test failed after the put operation", e);
+      Assert.fail("Test failed after the put operation", e);
 
     }
   }
@@ -794,7 +799,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
             .iterator().next());
       }
       catch (Exception expected) {
-        getLogWriter().info("Exception : " + expected.getMessage());
+        LogWriterUtils.getLogWriter().info("Exception : " + expected.getMessage());
         expected.printStackTrace();
         fail("Test failed after the put operation");
       }
@@ -824,7 +829,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
       ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE,
           function, isByName);
       l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       assertEquals(3, l.size());
       for (Iterator i = l.iterator(); i.hasNext();) {
         assertEquals(Boolean.TRUE, i.next());
@@ -847,7 +852,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
 
     }
     catch (Exception e) {
-      fail("Test failed after the put operation", e);
+      Assert.fail("Test failed after the put operation", e);
 
     }
   }
@@ -877,7 +882,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
       ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE,
           function, isByName);
       l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       assertEquals(3, l.size());
       for (Iterator i = l.iterator(); i.hasNext();) {
         assertEquals(Boolean.TRUE, i.next());
@@ -885,7 +890,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
 
     }
     catch (Exception e) {
-      fail("Test failed after the function execution", e);
+      Assert.fail("Test failed after the function execution", e);
 
     }
   }
@@ -916,8 +921,8 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
-      fail("Test failed after the put operation", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
+      Assert.fail("Test failed after the put operation", ex);
     }
   }
 
@@ -959,14 +964,14 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
             }
           });
       l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       assertEquals(3, l.size());
       for (Iterator i = l.iterator(); i.hasNext();) {
         assertEquals(Boolean.TRUE, i.next());
       }
     }
     catch (Exception e) {
-      getLogWriter().info("Exception : " + e.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception : " + e.getMessage());
       e.printStackTrace();
       fail("Test failed after the put operation");
 
@@ -1018,7 +1023,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
     }
     catch (Throwable e) {
       e.printStackTrace();
-      fail("This is not expected Exception", e);
+      Assert.fail("This is not expected Exception", e);
     }
 
   }
@@ -1053,13 +1058,13 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
     }
     catch (FunctionException expected) {
       expected.printStackTrace();
-      getLogWriter().info("Exception : " + expected.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception : " + expected.getMessage());
       assertTrue(expected.getMessage().startsWith(
           (LocalizedStrings.ExecuteFunction_CANNOT_0_RESULTS_HASRESULT_FALSE
               .toLocalizedString("return any"))));
     }
     catch (Exception notexpected) {
-      fail("Test failed during execute or sleeping", notexpected);
+      Assert.fail("Test failed during execute or sleeping", notexpected);
     }
     finally {
       cache.getLogger().info(
@@ -1114,8 +1119,8 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
-      fail("Test failed after the put operation", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
+      Assert.fail("Test failed after the put operation", ex);
     }
   }
 
@@ -1141,7 +1146,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("This is not expected Exception", ex);
+      Assert.fail("This is not expected Exception", ex);
     }
   }
 
@@ -1178,7 +1183,7 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
           });
     }
     catch (Exception expected) {
-      getLogWriter().fine("Exception occured : " + expected.getMessage());
+      LogWriterUtils.getLogWriter().fine("Exception occured : " + expected.getMessage());
       assertTrue(expected.getMessage().contains(
           "No target node found for KEY = " + testKey)
           || expected.getMessage()
@@ -1240,8 +1245,8 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
-      fail("Test failed after the put operation", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
+      Assert.fail("Test failed after the put operation", ex);
     }
   }
 
@@ -1265,9 +1270,4 @@ public class PRClientServerRegionFunctionExecutionNoSingleHopDUnitTest extends
       return dataSet.withArgs(args).execute(function);
     }
   }
-
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest.java
index cf97932..8e2d2d4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest.java
@@ -47,8 +47,13 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest extends
     PRClientServerTestBase {
@@ -286,11 +291,11 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
    */
   public void testServerFailoverWithTwoServerAliveHA()
       throws InterruptedException {
-    addExpectedException("FunctionInvocationTargetException");
-    addExpectedException("Connection reset");
-    addExpectedException("SocketTimeoutException");
-    addExpectedException("ServerConnectivityException");
-    addExpectedException("Socket Closed");
+    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("SocketTimeoutException");
+    IgnoredException.addIgnoredException("ServerConnectivityException");
+    IgnoredException.addIgnoredException("Socket Closed");
     ArrayList commonAttributes = createCommonServerAttributes(
         "TestPartitionedRegion", null, 1, 13, null);
     createClientServerScenarion(commonAttributes, 20, 20, 20);
@@ -323,9 +328,9 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
     client.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
         "verifyDeadAndLiveServers", new Object[] { new Integer(1),
             new Integer(2) });
-    DistributedTestCase.join(async[0], 6 * 60 * 1000, getLogWriter());
+    ThreadUtils.join(async[0], 6 * 60 * 1000);
     if (async[0].getException() != null) {
-      fail("UnExpected Exception Occured : ", async[0].getException());
+      Assert.fail("UnExpected Exception Occured : ", async[0].getException());
     }
     List l = (List)async[0].getReturnValue();
 
@@ -338,11 +343,11 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
    */
   public void testServerCacheClosedFailoverWithTwoServerAliveHA()
       throws InterruptedException {
-    addExpectedException("FunctionInvocationTargetException");
-    addExpectedException("Connection reset");
-    addExpectedException("SocketTimeoutException");
-    addExpectedException("ServerConnectivityException");
-    addExpectedException("Socket Closed");
+    IgnoredException.addIgnoredException("FunctionInvocationTargetException");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("SocketTimeoutException");
+    IgnoredException.addIgnoredException("ServerConnectivityException");
+    IgnoredException.addIgnoredException("Socket Closed");
     ArrayList commonAttributes = createCommonServerAttributes(
         "TestPartitionedRegion", null, 1, 13, null);
     createClientServerScenarion(commonAttributes, 20, 20, 20);
@@ -375,9 +380,9 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
         PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest.class,
         "verifyDeadAndLiveServers", new Object[] { new Integer(1),
             new Integer(2) });
-    DistributedTestCase.join(async[0], 5 * 60 * 1000, getLogWriter());
+    ThreadUtils.join(async[0], 5 * 60 * 1000);
     if (async[0].getException() != null) {
-      fail("UnExpected Exception Occured : ", async[0].getException());
+      Assert.fail("UnExpected Exception Occured : ", async[0].getException());
     }
     List l = (List)async[0].getReturnValue();
     assertEquals(2, l.size());
@@ -480,7 +485,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
 
       public boolean done() {
         int sz = pool.getConnectedServerCount();
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Checking for the Live Servers : Expected  : "
                 + expectedLiveServers + " Available :" + sz);
         if (sz == expectedLiveServers.intValue()) {
@@ -495,7 +500,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
   }
 
   public static void executeFunction() throws ServerException,
@@ -534,7 +539,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
       }
     }
     catch (Exception e) {
-      getLogWriter().info("Got an exception : " + e.getMessage());
+      LogWriterUtils.getLogWriter().info("Got an exception : " + e.getMessage());
       assertTrue(e instanceof EOFException || e instanceof SocketException
           || e instanceof SocketTimeoutException
           || e instanceof ServerException || e instanceof IOException
@@ -555,7 +560,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
     ResultCollector rc1 = dataSet.withFilter(testKeysSet)
         .withArgs(Boolean.TRUE).execute(function.getId());
     List l = ((List)rc1.getResult());
-    getLogWriter().info("Result size : " + l.size());
+    LogWriterUtils.getLogWriter().info("Result size : " + l.size());
     return l;
   }
 
@@ -586,7 +591,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
         .getRegion(PartitionedRegionName);
     HashMap localBucket2RegionMap = (HashMap)region.getDataStore()
         .getSizeLocally();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of the " + PartitionedRegionName + " in this VM :- "
             + localBucket2RegionMap.size());
     Set entrySet = localBucket2RegionMap.entrySet();
@@ -615,8 +620,8 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
       ResultCollector rc1 = executeOnAll(dataSet, Boolean.TRUE, function,
           isByName);
       List resultList = (List)((List)rc1.getResult());
-      getLogWriter().info("Result size : " + resultList.size());
-      getLogWriter().info("Result are SSSS : " + resultList);
+      LogWriterUtils.getLogWriter().info("Result size : " + resultList.size());
+      LogWriterUtils.getLogWriter().info("Result are SSSS : " + resultList);
       assertEquals(3, resultList.size());
 
       Iterator resultIterator = resultList.iterator();
@@ -651,7 +656,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
 
     }
     catch (Exception e) {
-      fail("Test failed after the put operation", e);
+      Assert.fail("Test failed after the put operation", e);
 
     }
   }
@@ -675,13 +680,13 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
       }
       Map resultMap = region.getAll(testKeysList);
       assertTrue(resultMap.equals(origVals));
-      pause(2000);
+      Wait.pause(2000);
       Map secondResultMap = region.getAll(testKeysList);
       assertTrue(secondResultMap.equals(origVals));
 
     }
     catch (Exception e) {
-      fail("Test failed after the put operation", e);
+      Assert.fail("Test failed after the put operation", e);
 
     }
   }
@@ -705,13 +710,13 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
       }
       Map resultMap = region.getAll(testKeysList);
       assertTrue(resultMap.equals(origVals));
-      pause(2000);
+      Wait.pause(2000);
       Map secondResultMap = region.getAll(testKeysList);
       assertTrue(secondResultMap.equals(origVals));
 
     }
     catch (Exception e) {
-      fail("Test failed after the put operation", e);
+      Assert.fail("Test failed after the put operation", e);
 
     }
   }
@@ -753,7 +758,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
             .iterator().next());
       }
       catch (Exception expected) {
-        getLogWriter().info("Exception : " + expected.getMessage());
+        LogWriterUtils.getLogWriter().info("Exception : " + expected.getMessage());
         expected.printStackTrace();
         fail("Test failed after the put operation");
       }
@@ -783,7 +788,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
       ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE,
           function, isByName);
       l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       assertEquals(3, l.size());
       for (Iterator i = l.iterator(); i.hasNext();) {
         assertEquals(Boolean.TRUE, i.next());
@@ -806,7 +811,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
 
     }
     catch (Exception e) {
-      fail("Test failed after the put operation", e);
+      Assert.fail("Test failed after the put operation", e);
 
     }
   }
@@ -835,7 +840,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
       ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE,
           function, isByName);
       l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       assertEquals(3, l.size());
       for (Iterator i = l.iterator(); i.hasNext();) {
         assertEquals(Boolean.TRUE, i.next());
@@ -843,7 +848,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
 
     }
     catch (Exception e) {
-      fail("Test failed after the function execution", e);
+      Assert.fail("Test failed after the function execution", e);
 
     }
   }
@@ -874,8 +879,8 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
-      fail("Test failed after the put operation", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
+      Assert.fail("Test failed after the put operation", ex);
     }
   }
 
@@ -917,14 +922,14 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
             }
           });
       l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       assertEquals(3, l.size());
       for (Iterator i = l.iterator(); i.hasNext();) {
         assertEquals(Boolean.TRUE, i.next());
       }
     }
     catch (Exception e) {
-      getLogWriter().info("Exception : " + e.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception : " + e.getMessage());
       e.printStackTrace();
       fail("Test failed after the put operation");
 
@@ -976,7 +981,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
     }
     catch (Throwable e) {
       e.printStackTrace();
-      fail("This is not expected Exception", e);
+      Assert.fail("This is not expected Exception", e);
     }
 
   }
@@ -1011,13 +1016,13 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
     }
     catch (FunctionException expected) {
       expected.printStackTrace();
-      getLogWriter().info("Exception : " + expected.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception : " + expected.getMessage());
       assertTrue(expected.getMessage().startsWith(
           (LocalizedStrings.ExecuteFunction_CANNOT_0_RESULTS_HASRESULT_FALSE
               .toLocalizedString("return any"))));
     }
     catch (Exception notexpected) {
-      fail("Test failed during execute or sleeping", notexpected);
+      Assert.fail("Test failed during execute or sleeping", notexpected);
     }
     finally {
       cache.getLogger().info(
@@ -1072,8 +1077,8 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
-      fail("Test failed after the put operation", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
+      Assert.fail("Test failed after the put operation", ex);
     }
   }
 
@@ -1099,7 +1104,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("This is not expected Exception", ex);
+      Assert.fail("This is not expected Exception", ex);
     }
   }
 
@@ -1136,7 +1141,7 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
           });
     }
     catch (Exception expected) {
-      getLogWriter().fine("Exception occured : " + expected.getMessage());
+      LogWriterUtils.getLogWriter().fine("Exception occured : " + expected.getMessage());
       assertTrue(expected.getMessage().contains(
           "No target node found for KEY = " + testKey)
           || expected.getMessage()
@@ -1198,8 +1203,8 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
-      fail("Test failed after the put operation", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
+      Assert.fail("Test failed after the put operation", ex);
     }
   }
 
@@ -1223,9 +1228,4 @@ public class PRClientServerRegionFunctionExecutionSelectorNoSingleHopDUnitTest e
       return dataSet.withArgs(args).execute(function);
     }
   }
-
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionSingleHopDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionSingleHopDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionSingleHopDUnitTest.java
index d6f2fd8..1afd8f5 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionSingleHopDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerRegionFunctionExecutionSingleHopDUnitTest.java
@@ -47,8 +47,13 @@ import com.gemstone.gemfire.distributed.DistributedSystemDisconnectedException;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
   public class PRClientServerRegionFunctionExecutionSingleHopDUnitTest extends PRClientServerTestBase {
     private static final String TEST_FUNCTION7 = TestFunction.TEST_FUNCTION7;
 
@@ -64,8 +69,8 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
     @Override
     public void setUp() throws Exception {
       //Workaround for bug #52004
-      addExpectedException("InternalFunctionInvocationTargetException");
-      addExpectedException("Connection refused");
+      IgnoredException.addIgnoredException("InternalFunctionInvocationTargetException");
+      IgnoredException.addIgnoredException("Connection refused");
       super.setUp();
     }
 
@@ -132,7 +137,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
       registerFunctionAtServer(function);
       isByName = Boolean.TRUE;
       // add expected exception for server going down after wait
-      final ExpectedException expectedEx = addExpectedException(
+      final IgnoredException expectedEx = IgnoredException.addIgnoredException(
           DistributedSystemDisconnectedException.class.getName(), server1);
       try {
         client.invoke(
@@ -252,8 +257,8 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
      * After 5th attempt function will send Boolean as last result.
      */
     public void testserverMultiKeyExecution_FunctionInvocationTargetException() {
-      addExpectedException("FunctionException: IOException while sending");
-      addExpectedException("java.net.SocketException: Software caused connection abort");
+      IgnoredException.addIgnoredException("FunctionException: IOException while sending");
+      IgnoredException.addIgnoredException("java.net.SocketException: Software caused connection abort");
       createScenario();
       client.invoke(PRClientServerRegionFunctionExecutionSingleHopDUnitTest.class,
           "serverMultiKeyExecution_FunctionInvocationTargetException");
@@ -264,7 +269,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
      * object and using the name of the function
      */
     public void testserverMultiKeyExecutionNoResult_byName(){
-      addExpectedException("Cannot send result");
+      IgnoredException.addIgnoredException("Cannot send result");
       createScenario();
       Function function = new TestFunction(false,TEST_FUNCTION7);
       registerFunctionAtServer(function);
@@ -348,9 +353,9 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
       client.invoke(PRClientServerRegionFunctionExecutionDUnitTest.class,
           "verifyDeadAndLiveServers", new Object[] { new Integer(1),
               new Integer(2) });
-      DistributedTestCase.join(async[0], 6 * 60 * 1000, getLogWriter());
+      ThreadUtils.join(async[0], 6 * 60 * 1000);
       if (async[0].getException() != null) {
-        fail("UnExpected Exception Occured : ", async[0].getException());
+        Assert.fail("UnExpected Exception Occured : ", async[0].getException());
       }
       List l = (List)async[0].getReturnValue();
       
@@ -377,9 +382,9 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
       server3.invoke(PRClientServerRegionFunctionExecutionSingleHopDUnitTest.class, "startServerHA");
       server1.invoke(PRClientServerRegionFunctionExecutionSingleHopDUnitTest.class, "closeCacheHA");
       client.invoke(PRClientServerRegionFunctionExecutionSingleHopDUnitTest.class, "verifyDeadAndLiveServers",new Object[]{new Integer(1),new Integer(2)});
-      DistributedTestCase.join(async[0],  5 * 60 * 1000, getLogWriter());
+      ThreadUtils.join(async[0],  5 * 60 * 1000);
       if(async[0].getException() != null){
-        fail("UnExpected Exception Occured : ", async[0].getException());
+        Assert.fail("UnExpected Exception Occured : ", async[0].getException());
       }
       List l = (List)async[0].getReturnValue();
       assertEquals(2, l.size());
@@ -481,7 +486,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         String excuse;
         public boolean done() {
           int sz = pool.getConnectedServerCount();
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Checking for the Live Servers : Expected  : " + expectedLiveServers
                   + " Available :" + sz);
           if (sz == expectedLiveServers.intValue()) {
@@ -494,7 +499,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
     }
     
     public static void executeFunction() throws ServerException,
@@ -531,7 +536,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         }
       }
       catch (Exception e) {
-        getLogWriter().info("Got an exception : " + e.getMessage());
+        LogWriterUtils.getLogWriter().info("Got an exception : " + e.getMessage());
         assertTrue(e instanceof EOFException || e instanceof SocketException
             || e instanceof SocketTimeoutException
             || e instanceof ServerException || e instanceof IOException
@@ -552,7 +557,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
       ResultCollector rc1 = dataSet.withFilter(testKeysSet).withArgs(Boolean.TRUE).execute(
           function.getId());
       List l = ((List)rc1.getResult());
-      getLogWriter().info("Result size : " + l.size());
+      LogWriterUtils.getLogWriter().info("Result size : " + l.size());
       return l;
     }
     
@@ -587,7 +592,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
       PartitionedRegion region = (PartitionedRegion)cache.getRegion(PartitionedRegionName);
       HashMap localBucket2RegionMap = (HashMap)region
       .getDataStore().getSizeLocally();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
       "Size of the " + PartitionedRegionName + " in this VM :- "
           + localBucket2RegionMap.size());
       Set entrySet = localBucket2RegionMap.entrySet();
@@ -615,8 +620,8 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         }
         ResultCollector rc1 = executeOnAll(dataSet, Boolean.TRUE,  function, isByName);
         List resultList = ((List)rc1.getResult());
-        getLogWriter().info("Result size : " + resultList.size());
-        getLogWriter().info("Result are SSSS : " + resultList);
+        LogWriterUtils.getLogWriter().info("Result size : " + resultList.size());
+        LogWriterUtils.getLogWriter().info("Result are SSSS : " + resultList);
         assertEquals(3, resultList.size());
 
 //        while (resultIterator.hasNext()) {
@@ -644,7 +649,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         assertEquals(origVals, foundVals);
         
       }catch(Exception e){
-        fail("Test failed after the put operation", e);
+        Assert.fail("Test failed after the put operation", e);
         
       }
     }
@@ -669,12 +674,12 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         }
         Map resultMap = region.getAll(testKeysList);
         assertTrue(resultMap.equals(origVals));
-        pause(2000);
+        Wait.pause(2000);
         Map secondResultMap = region.getAll(testKeysList);
         assertTrue(secondResultMap.equals(origVals));
         
       }catch(Exception e){
-        fail("Test failed after the put operation", e);
+        Assert.fail("Test failed after the put operation", e);
         
       }
     }
@@ -698,12 +703,12 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         }
         Map resultMap = region.getAll(testKeysList);
         assertTrue(resultMap.equals(origVals));
-        pause(2000);
+        Wait.pause(2000);
         Map secondResultMap = region.getAll(testKeysList);
         assertTrue(secondResultMap.equals(origVals));
         
       }catch(Exception e){
-        fail("Test failed after the put operation", e);
+        Assert.fail("Test failed after the put operation", e);
         
       }
     }
@@ -746,7 +751,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
               .iterator().next());
         }
         catch (Exception expected) {
-          getLogWriter().info("Exception : " + expected.getMessage());
+          LogWriterUtils.getLogWriter().info("Exception : " + expected.getMessage());
           expected.printStackTrace();
           fail("Test failed after the put operation");
         }
@@ -775,7 +780,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         List l = null;
         ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE,  function, isByName);
         l = ((List)rc1.getResult());
-        getLogWriter().info("Result size : " + l.size());
+        LogWriterUtils.getLogWriter().info("Result size : " + l.size());
         assertEquals(3, l.size());
         for (Iterator i = l.iterator(); i.hasNext();) {
           assertEquals(Boolean.TRUE, i.next());
@@ -797,7 +802,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         assertEquals(origVals, foundVals);
         
       }catch(Exception e){
-        fail("Test failed after the put operation", e);
+        Assert.fail("Test failed after the put operation", e);
         
       }
     }
@@ -825,14 +830,14 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         ResultCollector rc1 = execute(dataSet, testKeysSet, Boolean.TRUE,
             function, isByName);
         l = ((List)rc1.getResult());
-        getLogWriter().info("Result size : " + l.size());
+        LogWriterUtils.getLogWriter().info("Result size : " + l.size());
         assertEquals(3, l.size());
         for (Iterator i = l.iterator(); i.hasNext();) {
           assertEquals(Boolean.TRUE, i.next());
         }
 
       }catch(Exception e){
-        fail("Test failed after the function execution", e);
+        Assert.fail("Test failed after the function execution", e);
         
       }
     }
@@ -861,8 +866,8 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 
       }catch (Exception ex) {
         ex.printStackTrace();
-        getLogWriter().info("Exception : " , ex);
-        fail("Test failed after the put operation",ex);
+        LogWriterUtils.getLogWriter().info("Exception : " , ex);
+        Assert.fail("Test failed after the put operation",ex);
       }
     }
     
@@ -905,13 +910,13 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
           }
         });
         l = ((List)rc1.getResult());
-        getLogWriter().info("Result size : " + l.size());
+        LogWriterUtils.getLogWriter().info("Result size : " + l.size());
         assertEquals(3, l.size());
         for (Iterator i = l.iterator(); i.hasNext();) {
           assertEquals(Boolean.TRUE, i.next());
         }
       }catch(Exception e){
-        getLogWriter().info("Exception : " + e.getMessage());
+        LogWriterUtils.getLogWriter().info("Exception : " + e.getMessage());
         e.printStackTrace();
         fail("Test failed after the put operation");
         
@@ -965,7 +970,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
       }
       catch (Throwable e) {
         e.printStackTrace();
-        fail("This is not expected Exception", e);
+        Assert.fail("This is not expected Exception", e);
       }
 
     }
@@ -998,12 +1003,12 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         fail("Test failed after the put operation");
       } catch(FunctionException expected) {
         expected.printStackTrace();
-        getLogWriter().info("Exception : " + expected.getMessage());
+        LogWriterUtils.getLogWriter().info("Exception : " + expected.getMessage());
         assertTrue(expected.getMessage().startsWith((LocalizedStrings.ExecuteFunction_CANNOT_0_RESULTS_HASRESULT_FALSE
             .toLocalizedString("return any"))));
       }
       catch (Exception notexpected) {
-        fail("Test failed during execute or sleeping", notexpected);
+        Assert.fail("Test failed during execute or sleeping", notexpected);
       } finally {
         cache.getLogger().info("<ExpectedException action=remove>" +
             "FunctionException" +
@@ -1056,8 +1061,8 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 
       }catch (Exception ex) {
         ex.printStackTrace();
-        getLogWriter().info("Exception : " , ex);
-        fail("Test failed after the put operation",ex);
+        LogWriterUtils.getLogWriter().info("Exception : " , ex);
+        Assert.fail("Test failed after the put operation",ex);
       }
     }
     
@@ -1083,7 +1088,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
       }
       catch (Exception ex) {
         ex.printStackTrace();
-        fail("This is not expected Exception", ex);
+        Assert.fail("This is not expected Exception", ex);
       }
     }
     
@@ -1123,7 +1128,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         });
       }
       catch (Exception expected) {
-        getLogWriter().fine("Exception occured : " + expected.getMessage());
+        LogWriterUtils.getLogWriter().fine("Exception occured : " + expected.getMessage());
         assertTrue(expected.getMessage().contains(
             "No target node found for KEY = " + testKey)
             || expected.getMessage()
@@ -1185,8 +1190,8 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 
       }catch (Exception ex) {
         ex.printStackTrace();
-        getLogWriter().info("Exception : " , ex);
-        fail("Test failed after the put operation",ex);
+        LogWriterUtils.getLogWriter().info("Exception : " , ex);
+        Assert.fail("Test failed after the put operation",ex);
       }
     }
     
@@ -1210,12 +1215,5 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
         return dataSet.withArgs(args).execute(function);
       }
     }
-    
-    @Override
-    public void tearDown2() throws Exception {
-      super.tearDown2();
-    }
-
-
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerTestBase.java
index 7026ead..4bee088 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerTestBase.java
@@ -51,11 +51,14 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class PRClientServerTestBase extends CacheTestCase {
 
@@ -151,7 +154,7 @@ public class PRClientServerTestBase extends CacheTestCase {
       server1.start();
     }
     catch (IOException e) {
-      fail("Failed to start the Server", e);
+      Assert.fail("Failed to start the Server", e);
     }
     assertTrue(server1.isRunning());
 
@@ -183,7 +186,7 @@ public class PRClientServerTestBase extends CacheTestCase {
       server1.start();
     }
     catch (IOException e) {
-      fail("Failed to start the Server", e);
+      Assert.fail("Failed to start the Server", e);
     }
     assertTrue(server1.isRunning());
 
@@ -216,7 +219,7 @@ public class PRClientServerTestBase extends CacheTestCase {
       server1.start();
     }
     catch (IOException e) {
-      fail("Failed to start the Server", e);
+      Assert.fail("Failed to start the Server", e);
     }
     assertTrue(server1.isRunning());
 
@@ -231,7 +234,7 @@ public class PRClientServerTestBase extends CacheTestCase {
       server1.start();
     }
     catch (IOException e) {
-      fail("Failed to start the Server", e);
+      Assert.fail("Failed to start the Server", e);
     }
     assertTrue(server1.isRunning());
 
@@ -254,7 +257,7 @@ public class PRClientServerTestBase extends CacheTestCase {
       server1.start();
     }       
     catch (IOException e) {
-      fail("Failed to start the Server", e);
+      Assert.fail("Failed to start the Server", e);
     }
     assertTrue(server1.isRunning());
 
@@ -409,7 +412,7 @@ public class PRClientServerTestBase extends CacheTestCase {
   public static void createCacheClientWithoutRegion(String host, Integer port1,
       Integer port2, Integer port3) throws Exception {
     CacheServerTestUtil.disableShufflingOfEndpoints();
-    getLogWriter().info("PRClientServerTestBase#createCacheClientWithoutRegion : creating pool");
+    LogWriterUtils.getLogWriter().info("PRClientServerTestBase#createCacheClientWithoutRegion : creating pool");
     serverPort1 = port1;
     serverPort2 = port2;
     serverPort3 = port3;
@@ -432,7 +435,7 @@ public class PRClientServerTestBase extends CacheTestCase {
   public static void createCacheClientWithDistributedRegion(String host, Integer port1,
       Integer port2, Integer port3) throws Exception {
     CacheServerTestUtil.disableShufflingOfEndpoints();
-    getLogWriter().info("PRClientServerTestBase#createCacheClientWithoutRegion : creating pool");
+    LogWriterUtils.getLogWriter().info("PRClientServerTestBase#createCacheClientWithoutRegion : creating pool");
     serverPort1 = port1;
     serverPort2 = port2;
     serverPort3 = port3;
@@ -474,7 +477,7 @@ public class PRClientServerTestBase extends CacheTestCase {
     serverPort2 = port2;
     serverPort3 = port3;
     client.invoke(PRClientServerTestBase.class, "createCacheClient",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2,
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2,
             port3 });
   }
 
@@ -489,7 +492,7 @@ public class PRClientServerTestBase extends CacheTestCase {
         new Object[] {commonAttributes ,new Integer(localMaxMemoryServer2) });
     serverPort1 = port1;
     client.invoke(PRClientServerTestBase.class, "createCacheClient_SingleConnection",
-        new Object[] { getServerHostName(server1.getHost()), port1});
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1});
   }
   
   
@@ -510,7 +513,7 @@ public class PRClientServerTestBase extends CacheTestCase {
     serverPort2 = port2;
     serverPort3 = port3;
     client.invoke(PRClientServerTestBase.class, "createCacheClientWith2Regions",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2,
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2,
             port3 });
   }
 
@@ -559,7 +562,7 @@ public class PRClientServerTestBase extends CacheTestCase {
     serverPort2 = port2;
     serverPort3 = port3;
     client.invoke(PRClientServerTestBase.class, "createNoSingleHopCacheClient",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2,
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2,
             port3 });
   }
   
@@ -579,13 +582,13 @@ public class PRClientServerTestBase extends CacheTestCase {
     serverPort2 = port2;
     serverPort3 = port3;
     client.invoke(PRClientServerTestBase.class, "createNoSingleHopCacheClient",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2,
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2,
             port3 });
   }
 
   
   protected void createClientServerScenarionWithoutRegion () {
-    getLogWriter().info("PRClientServerTestBase#createClientServerScenarionWithoutRegion : creating client server");
+    LogWriterUtils.getLogWriter().info("PRClientServerTestBase#createClientServerScenarionWithoutRegion : creating client server");
     createCacheInClientServer();
     Integer port1 = (Integer)server1.invoke(PRClientServerTestBase.class,
         "createCacheServer");
@@ -598,12 +601,12 @@ public class PRClientServerTestBase extends CacheTestCase {
     serverPort3 = port3;
     
     client.invoke(PRClientServerTestBase.class, "createCacheClientWithoutRegion",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2,
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2,
             port3 });    
   }
   
   protected void createClientServerScenarionWithDistributedtRegion () {
-    getLogWriter().info("PRClientServerTestBase#createClientServerScenarionWithoutRegion : creating client server");
+    LogWriterUtils.getLogWriter().info("PRClientServerTestBase#createClientServerScenarionWithoutRegion : creating client server");
     createCacheInClientServer();
     Integer port1 = (Integer)server1.invoke(PRClientServerTestBase.class,
         "createCacheServerWithDR");
@@ -617,7 +620,7 @@ public class PRClientServerTestBase extends CacheTestCase {
     
     
     client.invoke(PRClientServerTestBase.class, "createCacheClientWithDistributedRegion",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2,
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2,
             port3 });    
   }
 
@@ -675,7 +678,7 @@ public class PRClientServerTestBase extends CacheTestCase {
       assertNotNull(cache);
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      Assert.fail("Failed while creating the cache", e);
     }
   }
 
@@ -691,13 +694,13 @@ public class PRClientServerTestBase extends CacheTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 2000, 500, false);
+    Wait.waitForCriterion(wc, 2000, 500, false);
     Collection bridgeServers = cache.getCacheServers();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Start Server Bridge Servers list : " + bridgeServers.size());
     Iterator bridgeIterator = bridgeServers.iterator();
     CacheServer bridgeServer = (CacheServer)bridgeIterator.next();
-    getLogWriter().info("start Server Bridge Server" + bridgeServer);
+    LogWriterUtils.getLogWriter().info("start Server Bridge Server" + bridgeServer);
     try {
       bridgeServer.start();
     }
@@ -718,7 +721,7 @@ public class PRClientServerTestBase extends CacheTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 1000, 200, false);
+    Wait.waitForCriterion(wc, 1000, 200, false);
     try {
       Iterator iter = cache.getCacheServers().iterator();
       if (iter.hasNext()) {
@@ -731,8 +734,8 @@ public class PRClientServerTestBase extends CacheTestCase {
     }
   }  
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     closeCache();
     client.invoke(PRClientServerTestBase.class, "closeCache");
     server1.invoke(PRClientServerTestBase.class, "closeCache");
@@ -759,7 +762,7 @@ public class PRClientServerTestBase extends CacheTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 1000, 200, false);
+    Wait.waitForCriterion(wc, 1000, 200, false);
     if (cache != null && !cache.isClosed()) {
       cache.close();
     }
@@ -798,7 +801,7 @@ public class PRClientServerTestBase extends CacheTestCase {
      
       
     }catch(Exception e){
-      fail("Test failed ", e);
+      Assert.fail("Test failed ", e);
       
     }
   }
@@ -839,7 +842,7 @@ public class PRClientServerTestBase extends CacheTestCase {
       }
       assertTrue(expectedBucketSet.isEmpty());
     }catch(Exception e){
-      fail("Test failed ", e);
+      Assert.fail("Test failed ", e);
       
     }
   


[58/62] [abbrv] incubator-geode git commit: GEODE-268: Adding explanation to OperationContext.isClientUpdate()

Posted by je...@apache.org.
GEODE-268: Adding explanation to OperationContext.isClientUpdate()

This closes #43


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/99e4aaf4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/99e4aaf4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/99e4aaf4

Branch: refs/heads/feature/GEODE-17
Commit: 99e4aaf450ef8f9e25f5755d03dc73d73085200c
Parents: 28f5391
Author: Dave Barnes <db...@pivotal.io>
Authored: Mon Nov 23 12:23:08 2015 -0800
Committer: Dan Smith <up...@apache.org>
Committed: Tue Feb 9 22:52:15 2016 -0800

----------------------------------------------------------------------
 .../gemstone/gemfire/cache/operations/OperationContext.java    | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/99e4aaf4/gemfire-core/src/main/java/com/gemstone/gemfire/cache/operations/OperationContext.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/operations/OperationContext.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/operations/OperationContext.java
index ab9b442..5cae85d 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/operations/OperationContext.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/operations/OperationContext.java
@@ -481,7 +481,11 @@ public abstract class OperationContext {
   public abstract boolean isPostOperation();
 
   /**
-   * True if the context is created before sending the updates to a client.
+   * When called post-operation, returns true if the operation was one that performed an update.
+   * An update occurs when one of the following methods on <code>getOperationCode()</code> returns true:
+   * <code>isPut()</code>, <code>isPutAll()</code>, <code>isDestroy()</code>, <code>isRemoveAll()</code>,
+   * <code>isInvalidate()</code>, <code>isRegionCreate()</code>, <code>isRegionClear()</code>, <code>isRegionDestroy()</code>.
+   * Otherwise, returns false.
    * 
    * @since 6.6
    */


[41/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionCCEDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionCCEDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionCCEDUnitTest.java
index 0ba2e27..a29a569 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionCCEDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionCCEDUnitTest.java
@@ -66,7 +66,9 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
-import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author Bruce Schuchardt
@@ -252,7 +254,7 @@ public class DistributedAckRegionCCEDUnitTest extends DistributedAckRegionDUnitT
               "fake_id", "fake_id_ustring", DistributionManager.NORMAL_DM_TYPE, null, null);
           tag.setMemberID(mbr);
         } catch (UnknownHostException e) {
-          fail("could not create member id", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("could not create member id", e);
         }
         
         // generate an event to distribute that contains the fake version tag
@@ -269,7 +271,7 @@ public class DistributedAckRegionCCEDUnitTest extends DistributedAckRegionDUnitT
     try {
       partialCreate.getResult();
     } catch (Throwable e) {
-      fail("async invocation in vm2 failed", e);
+      com.gemstone.gemfire.test.dunit.Assert.fail("async invocation in vm2 failed", e);
     }
   }  
 
@@ -322,7 +324,7 @@ public class DistributedAckRegionCCEDUnitTest extends DistributedAckRegionDUnitT
           CCRegion.put("cckey0", "ccvalue");
           CCRegion.put("cckey0", "ccvalue");  // version number will end up at 4
         } catch (CacheException ex) {
-          fail("While creating region", ex);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
         }
       }
     };
@@ -342,7 +344,7 @@ public class DistributedAckRegionCCEDUnitTest extends DistributedAckRegionDUnitT
         try {
           entry.makeTombstone(CCRegion, tag);
         } catch (RegionClearedException e) {
-          fail("region was mysteriously cleared during unit testing", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("region was mysteriously cleared during unit testing", e);
         }
       }
     });
@@ -401,12 +403,12 @@ public class DistributedAckRegionCCEDUnitTest extends DistributedAckRegionDUnitT
           return "waiting for GC to occur";
         }
       };
-      waitForCriterion(waitForGC, 20000, 1000, true);
-      pause(5000);
+      Wait.waitForCriterion(waitForGC, 20000, 1000, true);
+      Wait.pause(5000);
       long gcCount = CCRegion.getCachePerfStats().getTombstoneGCCount();
       assertTrue("expected a few GCs, but not " + (gcCount - initialCount), gcCount < (initialCount + 20));
     } catch (CacheException ex) {
-      fail("While creating region", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
     } finally {
       TombstoneService.EXPIRED_TOMBSTONE_LIMIT = saveExpiredTombstoneLimit;
       TombstoneService.FORCE_GC_MEMORY_EVENTS = false;
@@ -460,7 +462,7 @@ public class DistributedAckRegionCCEDUnitTest extends DistributedAckRegionDUnitT
           }
           assertEquals("expected no conflated events", 0, CCRegion.getCachePerfStats().getConflatedEventsCount());
         } catch (CacheException ex) {
-          fail("While creating region", ex);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
         }
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionCCEOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionCCEOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionCCEOffHeapDUnitTest.java
index 4b7e49d..1c1cbaf 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionCCEOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionCCEOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -38,7 +39,7 @@ public class DistributedAckRegionCCEOffHeapDUnitTest extends DistributedAckRegio
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -48,12 +49,8 @@ public class DistributedAckRegionCCEOffHeapDUnitTest extends DistributedAckRegio
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionDUnitTest.java
index 27da3d6..f957e49 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionDUnitTest.java
@@ -25,7 +25,9 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -60,7 +62,7 @@ public class DistributedAckRegionDUnitTest extends MultiVMRegionTestCase {
   public Properties getDistributedSystemProperties() {
     Properties p = new Properties();
     p.put(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
-    p.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    p.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
     return p;
   }
 
@@ -90,7 +92,7 @@ public class DistributedAckRegionDUnitTest extends MultiVMRegionTestCase {
             createRegion(name, "INCOMPATIBLE_ROOT", getRegionAttributes());
 
           } catch (CacheException ex) {
-            fail("While creating ACK region", ex);
+            Assert.fail("While creating ACK region", ex);
           }
         }
       });
@@ -109,7 +111,7 @@ public class DistributedAckRegionDUnitTest extends MultiVMRegionTestCase {
             }
 
           } catch (CacheException ex) {
-            fail("While creating GLOBAL Region", ex);
+            Assert.fail("While creating GLOBAL Region", ex);
           }
         }
       });
@@ -127,7 +129,7 @@ public class DistributedAckRegionDUnitTest extends MultiVMRegionTestCase {
             }
 
           } catch (CacheException ex) {
-            fail("While creating NOACK Region", ex);
+            Assert.fail("While creating NOACK Region", ex);
           }
         }
       });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionOffHeapDUnitTest.java
index 5d7f8f6..cbf8367 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckRegionOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -38,7 +39,7 @@ public class DistributedAckRegionOffHeapDUnitTest extends DistributedAckRegionDU
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -48,12 +49,8 @@ public class DistributedAckRegionOffHeapDUnitTest extends DistributedAckRegionDU
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedMulticastRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedMulticastRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedMulticastRegionDUnitTest.java
index 3728cf6..77e8253 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedMulticastRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedMulticastRegionDUnitTest.java
@@ -37,6 +37,7 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -60,9 +61,8 @@ public class DistributedMulticastRegionDUnitTest extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     clean();
-    super.tearDown2();
   }
   
   private void clean(){
@@ -72,7 +72,7 @@ public class DistributedMulticastRegionDUnitTest extends CacheTestCase {
               disconnectFromDS();
             }
         };
-    invokeInEveryVM(cleanVM);    
+    Invoke.invokeInEveryVM(cleanVM);    
   }
   
   public void testMulticastEnabled() {
@@ -172,7 +172,7 @@ public class DistributedMulticastRegionDUnitTest extends CacheTestCase {
     locator1Vm.invoke(new SerializableCallable() {
       @Override
       public Object call() {
-        final File locatorLogFile = new File(testName + "-locator-" + locatorPort + ".log");
+        final File locatorLogFile = new File(getTestMethodName() + "-locator-" + locatorPort + ".log");
         final Properties locatorProps = new Properties();
         locatorProps.setProperty(DistributionConfig.NAME_NAME, "LocatorWithMcast");
         locatorProps.setProperty(DistributionConfig.MCAST_PORT_NAME, mcastport);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionCCEDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionCCEDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionCCEDUnitTest.java
index c65101e..a1cc2cd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionCCEDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionCCEDUnitTest.java
@@ -33,10 +33,13 @@ import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
-import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 public class DistributedNoAckRegionCCEDUnitTest extends
     DistributedNoAckRegionDUnitTest {
@@ -126,7 +129,7 @@ public class DistributedNoAckRegionCCEDUnitTest extends
     AsyncInvocation vm1Ops = vm1.invokeAsync(DistributedNoAckRegionCCEDUnitTest.class, "doManyOps");
     AsyncInvocation vm2Ops = vm2.invokeAsync(DistributedNoAckRegionCCEDUnitTest.class, "doManyOps");
     // pause to let a bunch of operations build up
-    pause(5000);
+    Wait.pause(5000);
     AsyncInvocation a0 = vm3.invokeAsync(DistributedNoAckRegionCCEDUnitTest.class, "clearRegion");
     vm0.invoke(DistributedNoAckRegionCCEDUnitTest.class, "unblockListener");
     vm1.invoke(DistributedNoAckRegionCCEDUnitTest.class, "unblockListener");
@@ -139,7 +142,7 @@ public class DistributedNoAckRegionCCEDUnitTest extends
 //    if (a0failed && a1failed) {
 //      fail("neither member saw event conflation - check stats for " + name);
 //    }
-    pause(2000);//this test has with noack, thus we should wait before validating entries
+    Wait.pause(2000);//this test has with noack, thus we should wait before validating entries
     // check consistency of the regions
     Map r0Contents = (Map)vm0.invoke(this.getClass(), "getCCRegionContents");
     Map r1Contents = (Map)vm1.invoke(this.getClass(), "getCCRegionContents");
@@ -171,25 +174,25 @@ public class DistributedNoAckRegionCCEDUnitTest extends
         if (event.isOriginRemote()) {
           synchronized(this) {
             while (ListenerBlocking) {
-              getLogWriter().info("blocking cache operations for " + event.getDistributedMember());
+              LogWriterUtils.getLogWriter().info("blocking cache operations for " + event.getDistributedMember());
               blocked = true;
               try {
                 wait();
               } catch (InterruptedException e) {
                 Thread.currentThread().interrupt();
-                getLogWriter().info("blocking cache listener interrupted");
+                LogWriterUtils.getLogWriter().info("blocking cache listener interrupted");
                 return;
               }
             }
           }
           if (blocked) {
-            getLogWriter().info("allowing cache operations for " + event.getDistributedMember());
+            LogWriterUtils.getLogWriter().info("allowing cache operations for " + event.getDistributedMember());
           }
         }
       }
       @Override
       public void close() {
-        getLogWriter().info("closing blocking listener");
+        LogWriterUtils.getLogWriter().info("closing blocking listener");
         ListenerBlocking = false;
         synchronized(this) {
           notifyAll();
@@ -314,7 +317,7 @@ public class DistributedNoAckRegionCCEDUnitTest extends
             }
             CCRegion = (LocalRegion)f.create(name);
           } catch (CacheException ex) {
-            fail("While creating region", ex);
+            Assert.fail("While creating region", ex);
           }
         }
       };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionCCEOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionCCEOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionCCEOffHeapDUnitTest.java
index 4d3bb80..c068876 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionCCEOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionCCEOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -38,7 +39,7 @@ public class DistributedNoAckRegionCCEOffHeapDUnitTest extends DistributedNoAckR
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -48,12 +49,8 @@ public class DistributedNoAckRegionCCEOffHeapDUnitTest extends DistributedNoAckR
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionDUnitTest.java
index aa39758..1d680f0 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionDUnitTest.java
@@ -28,11 +28,15 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
 import com.gemstone.gemfire.internal.cache.DistributedRegion;
 import com.gemstone.gemfire.internal.cache.StateFlushOperation;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This class tests the functionality of a cache {@link Region region}
@@ -104,7 +108,7 @@ public class DistributedNoAckRegionDUnitTest
             assertTrue(getRootRegion("INCOMPATIBLE_ROOT").getAttributes().getScope().isDistributedNoAck());
             assertTrue(region.getAttributes().getScope().isDistributedNoAck());
           } catch (CacheException ex) {
-            fail("While creating NO ACK region", ex);
+            Assert.fail("While creating NO ACK region", ex);
           }
         }
       });
@@ -126,7 +130,7 @@ public class DistributedNoAckRegionDUnitTest
 //            assertNull(getRootRegion());
 
           } catch (CacheException ex) {
-            fail("While creating GLOBAL Region", ex);
+            Assert.fail("While creating GLOBAL Region", ex);
           }
         }
       });
@@ -147,7 +151,7 @@ public class DistributedNoAckRegionDUnitTest
 //            assertNull(getRootRegion());
 
           } catch (CacheException ex) {
-            fail("While creating ACK Region", ex);
+            Assert.fail("While creating ACK Region", ex);
           }
         }
       });
@@ -180,12 +184,12 @@ public class DistributedNoAckRegionDUnitTest
     SerializableRunnable create = new
       CacheSerializableRunnable("Create Mirrored Region") {
         public void run2() throws CacheException {
-          getLogWriter().info("testBug30705: Start creating Mirrored Region"); 
+          LogWriterUtils.getLogWriter().info("testBug30705: Start creating Mirrored Region"); 
           AttributesFactory factory =
             new AttributesFactory(getRegionAttributes());
           factory.setDataPolicy(DataPolicy.REPLICATE);
           createRegion(name, factory.create());
-          getLogWriter().info("testBug30705: Finished creating Mirrored Region"); 
+          LogWriterUtils.getLogWriter().info("testBug30705: Finished creating Mirrored Region"); 
         }
       };
       
@@ -197,14 +201,14 @@ public class DistributedNoAckRegionDUnitTest
           Object key = new Integer(0x42);
           Object value = new byte[0];
           assertNotNull(value);
-          getLogWriter().info("testBug30705: Started Distributed NoAck Puts"); 
+          LogWriterUtils.getLogWriter().info("testBug30705: Started Distributed NoAck Puts"); 
           for (int i = 0; i < NUM_PUTS; i++) {
             if (stopPutting) {
-              getLogWriter().info("testBug30705: Interrupted Distributed Ack Puts after " + i + " PUTS"); 
+              LogWriterUtils.getLogWriter().info("testBug30705: Interrupted Distributed Ack Puts after " + i + " PUTS"); 
               break;
             }
             if ((i % 1000) == 0) {
-              getLogWriter().info("testBug30705: modification #" + i); 
+              LogWriterUtils.getLogWriter().info("testBug30705: modification #" + i); 
             }
             rgn.put(key, value);
           }          
@@ -216,18 +220,18 @@ public class DistributedNoAckRegionDUnitTest
 
     vm0.invoke(new CacheSerializableRunnable("Put data") {
         public void run2() throws CacheException {
-          getLogWriter().info("testBug30705: starting initial data load"); 
+          LogWriterUtils.getLogWriter().info("testBug30705: starting initial data load"); 
           Region region =
             getRootRegion().getSubregion(name);
           final byte[] value = new byte[valueSize];
           Arrays.fill(value, (byte)0x42);
           for (int i = 0; i < numEntries; i++) {
             if ((i % 1000) == 0) {
-              getLogWriter().info("testBug30705: initial put #" + i); 
+              LogWriterUtils.getLogWriter().info("testBug30705: initial put #" + i); 
             }
             region.put(new Integer(i), value);
           }
-          getLogWriter().info("testBug30705: finished initial data load"); 
+          LogWriterUtils.getLogWriter().info("testBug30705: finished initial data load"); 
         }
       });
 
@@ -236,19 +240,19 @@ public class DistributedNoAckRegionDUnitTest
     
     // do initial image
     try {
-      getLogWriter().info("testBug30705: before the critical create");
+      LogWriterUtils.getLogWriter().info("testBug30705: before the critical create");
       vm2.invoke(create);
-      getLogWriter().info("testBug30705: after the critical create");
+      LogWriterUtils.getLogWriter().info("testBug30705: after the critical create");
    } finally {
       // test passes if this does not hang
-      getLogWriter().info("testBug30705: INTERRUPTING Distributed NoAck Puts after GetInitialImage");
+      LogWriterUtils.getLogWriter().info("testBug30705: INTERRUPTING Distributed NoAck Puts after GetInitialImage");
       vm0.invoke(new SerializableRunnable("Interrupt Puts") {
         public void run() {
-          getLogWriter().info("testBug30705: interrupting putter"); 
+          LogWriterUtils.getLogWriter().info("testBug30705: interrupting putter"); 
           stopPutting = true;
         }
       });
-      DistributedTestCase.join(async, 30 * 1000, getLogWriter());
+      ThreadUtils.join(async, 30 * 1000);
       // wait for overflow queue to quiesce before continuing
       vm2.invoke(new SerializableRunnable("Wait for Overflow Queue") {
         public void run() {
@@ -260,7 +264,7 @@ public class DistributedNoAckRegionDUnitTest
               return "overflow queue remains nonempty";
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+          Wait.waitForCriterion(ev, 30 * 1000, 200, true);
 //          pause(100);
 //           try {
 //             getRootRegion().getSubregion(name).destroyRegion();
@@ -270,20 +274,20 @@ public class DistributedNoAckRegionDUnitTest
         }
        });
     } // finally
-   getLogWriter().info("testBug30705: at end of test");
+   LogWriterUtils.getLogWriter().info("testBug30705: at end of test");
    if (async.exceptionOccurred()) {
-     fail("Got exception", async.getException());
+     Assert.fail("Got exception", async.getException());
    }
   }
 
   @Override
   protected void pauseIfNecessary(int ms) {
-    pause(ms);
+    Wait.pause(ms);
   }
   
   @Override
   protected void pauseIfNecessary() {
-    pause();
+    Wait.pause();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionOffHeapDUnitTest.java
index 2aa1f1e..e668d3f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedNoAckRegionOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -38,7 +39,7 @@ public class DistributedNoAckRegionOffHeapDUnitTest extends DistributedNoAckRegi
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -48,12 +49,8 @@ public class DistributedNoAckRegionOffHeapDUnitTest extends DistributedNoAckRegi
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DynamicRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DynamicRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DynamicRegionDUnitTest.java
index f8baf5a..19d2bb7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DynamicRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DynamicRegionDUnitTest.java
@@ -32,6 +32,7 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.internal.OSProcess;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheCreation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -71,8 +72,9 @@ public class DynamicRegionDUnitTest extends CacheTestCase {
    * in a test.
    * </p>
    */
-  public void tearDown2() throws Exception {
-    getLogWriter().info("Running tearDown in " + this);
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
+    LogWriterUtils.getLogWriter().info("Running tearDown in " + this);
     try {
       //Asif destroy dynamic regions at the end of the test
       CacheSerializableRunnable destroyDynRegn = new CacheSerializableRunnable("Destroy Dynamic regions") {
@@ -89,15 +91,13 @@ public class DynamicRegionDUnitTest extends CacheTestCase {
       if(dr != null) {
           dr.localDestroyRegion();      
       }
-
-      super.tearDown2();
     } 
     catch (VirtualMachineError e) {
       SystemFailure.initiateFailure(e);
       throw e;
     }
     catch (Throwable t) {
-      getLogWriter().severe("tearDown in " + this + " failed due to " + t);
+      LogWriterUtils.getLogWriter().severe("tearDown in " + this + " failed due to " + t);
     }
     finally {
       try {
@@ -108,11 +108,11 @@ public class DynamicRegionDUnitTest extends CacheTestCase {
         throw e;
       }
       catch (Throwable t) {
-        getLogWriter().severe("tearDown in " + this + " failed to disconnect all DS due to " + t);  
+        LogWriterUtils.getLogWriter().severe("tearDown in " + this + " failed to disconnect all DS due to " + t);  
       }
     }
     if (! DynamicRegionFactory.get().isClosed()) {
-      getLogWriter().severe("DynamicRegionFactory not closed!", new Exception());
+      LogWriterUtils.getLogWriter().severe("DynamicRegionFactory not closed!", new Exception());
     }
   }
   
@@ -230,7 +230,7 @@ public class DynamicRegionDUnitTest extends CacheTestCase {
       DynamicRegionFactory.get().createDynamicRegion(drFullPath, "subregion" + i);
     }
     
-    getLogWriter().info("testPeerRegion - check #1 make sure other region has new dynamic subregion");
+    LogWriterUtils.getLogWriter().info("testPeerRegion - check #1 make sure other region has new dynamic subregion");
     checkForRegionOtherVm(drFullPath, true);
 
     // spot check the subregions
@@ -239,13 +239,13 @@ public class DynamicRegionDUnitTest extends CacheTestCase {
     // now see if OTHER can recreate which should fetch meta-info from controller
     recreateOtherVm();
 
-    getLogWriter().info("testPeerRegion - check #2 make sure other region has dynamic region after restarting through getInitialImage");
+    LogWriterUtils.getLogWriter().info("testPeerRegion - check #2 make sure other region has dynamic region after restarting through getInitialImage");
     checkForRegionOtherVm(drFullPath, true);
 
     // now close the controller and see if OTHER can still fetch meta-info from disk
     closeCache();
     recreateOtherVm();
-    getLogWriter().info("testPeerRegion - check #3 make sure dynamic region can be recovered from disk");
+    LogWriterUtils.getLogWriter().info("testPeerRegion - check #3 make sure dynamic region can be recovered from disk");
     checkForRegionOtherVm(drFullPath, true);
     for (int i=0; i<10; i++) {
       checkForSubregionOtherVm(drFullPath + "/subregion" + i, true);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalLockingDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalLockingDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalLockingDUnitTest.java
index c6b5a05..0830b7e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalLockingDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalLockingDUnitTest.java
@@ -29,6 +29,7 @@ import com.gemstone.gemfire.cache.RegionExistsException;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.TimeoutException;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -71,13 +72,13 @@ public class GlobalLockingDUnitTest extends CacheTestCase {
    * Tests for 32356 R2 tryLock w/ 0 timeout broken in Distributed Lock Service
    */
   public void testBug32356() throws Exception {
-    getLogWriter().fine("[testBug32356]");
+    LogWriterUtils.getLogWriter().fine("[testBug32356]");
     Host host = Host.getHost(0);
     final String name = this.getUniqueName();
     final Object key = "32356";
 
     // lock/unlock '32356' in all vms... (make all vms aware of token)
-    getLogWriter().fine("[testBug32356] lock/unlock '32356' in all vms");
+    LogWriterUtils.getLogWriter().fine("[testBug32356] lock/unlock '32356' in all vms");
     for (int i = 0; i < 4; i++) {
       final int vm = i;
       host.getVM(vm).invoke(new CacheSerializableRunnable("testBug32356_step1") {
@@ -92,7 +93,7 @@ public class GlobalLockingDUnitTest extends CacheTestCase {
     }
 
     // attempt try-lock of zero wait time in all vms
-    getLogWriter().fine("[testBug32356] attempt try-lock of zero wait time in all vms");
+    LogWriterUtils.getLogWriter().fine("[testBug32356] attempt try-lock of zero wait time in all vms");
     for (int i = 0; i < 4; i++) {
       final int vm = i;
       host.getVM(vm).invoke(new CacheSerializableRunnable("testBug32356_step2") {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionCCEDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionCCEDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionCCEDUnitTest.java
index 22b303f..c328995 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionCCEDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionCCEDUnitTest.java
@@ -36,6 +36,7 @@ import com.gemstone.gemfire.internal.cache.RegionClearedException;
 import com.gemstone.gemfire.internal.cache.RegionEntry;
 import com.gemstone.gemfire.internal.cache.TombstoneService;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -167,7 +168,7 @@ public class GlobalRegionCCEDUnitTest extends GlobalRegionDUnitTest {
           CCRegion.put("cckey0", "ccvalue");
           CCRegion.put("cckey0", "ccvalue"); // version number will end up at 4
         } catch (CacheException ex) {
-          fail("While creating region", ex);
+          Assert.fail("While creating region", ex);
         }
       }
     };
@@ -189,7 +190,7 @@ public class GlobalRegionCCEDUnitTest extends GlobalRegionDUnitTest {
         try {
           entry.makeTombstone(CCRegion, tag);
         } catch (RegionClearedException e) {
-          fail("region was mysteriously cleared during unit testing", e);
+          Assert.fail("region was mysteriously cleared during unit testing", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionCCEOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionCCEOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionCCEOffHeapDUnitTest.java
index 8579548..653cab2 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionCCEOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionCCEOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -38,7 +39,7 @@ public class GlobalRegionCCEOffHeapDUnitTest extends GlobalRegionCCEDUnitTest {
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -48,12 +49,8 @@ public class GlobalRegionCCEOffHeapDUnitTest extends GlobalRegionCCEDUnitTest {
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionDUnitTest.java
index f08b66f..008f951 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionDUnitTest.java
@@ -31,10 +31,12 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.TimeoutException;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -86,7 +88,7 @@ public class GlobalRegionDUnitTest extends MultiVMRegionTestCase {
           try {
             createRegion(name, "INCOMPATIBLE_ROOT", getRegionAttributes());
           } catch (CacheException ex) {
-            fail("While creating GLOBAL region", ex);
+            Assert.fail("While creating GLOBAL region", ex);
           }
           assertTrue(getRootRegion("INCOMPATIBLE_ROOT").getAttributes().getScope().isGlobal());
         }
@@ -108,7 +110,7 @@ public class GlobalRegionDUnitTest extends MultiVMRegionTestCase {
             }
 
           } catch (CacheException ex) {
-            fail("While creating GLOBAL Region", ex);
+            Assert.fail("While creating GLOBAL Region", ex);
           }
         }
       });
@@ -134,7 +136,7 @@ public class GlobalRegionDUnitTest extends MultiVMRegionTestCase {
             }
 
           } catch (CacheException ex) {
-            fail("While creating GLOBAL Region", ex);
+            Assert.fail("While creating GLOBAL Region", ex);
           }
         }
       });
@@ -245,7 +247,7 @@ public class GlobalRegionDUnitTest extends MultiVMRegionTestCase {
                       SystemFailure.setFailure((VirtualMachineError)e); // don't throw
                     }
                     String s = "Uncaught exception in thread " + t;
-                    fail(s, e);
+                    Assert.fail(s, e);
                   }
                 };
 
@@ -254,7 +256,7 @@ public class GlobalRegionDUnitTest extends MultiVMRegionTestCase {
               Thread thread = new Thread(group, new Runnable() {
                   public void run() {
                     try {
-                      getLogWriter().info("testSynchronousIncrements." + this);
+                      LogWriterUtils.getLogWriter().info("testSynchronousIncrements." + this);
                       final Random rand = new Random(System.identityHashCode(this));
                       try {
                         Region region = getRootRegion().getSubregion(name);
@@ -278,15 +280,15 @@ public class GlobalRegionDUnitTest extends MultiVMRegionTestCase {
                           region.put(key, value);
                           assertEquals(value, region.get(key));
                           
-                          getLogWriter().info("testSynchronousIncrements." + 
+                          LogWriterUtils.getLogWriter().info("testSynchronousIncrements." + 
                               this + ": " + key + " -> " + value);
                           lock.unlock();
                         }
   
                       } catch (InterruptedException ex) {
-                        fail("While incrementing", ex);
+                        Assert.fail("While incrementing", ex);
                       } catch (Exception ex) {
-                        fail("While incrementing", ex);
+                        Assert.fail("While incrementing", ex);
                       }
                     }
                     catch (VirtualMachineError e) {
@@ -294,7 +296,7 @@ public class GlobalRegionDUnitTest extends MultiVMRegionTestCase {
                       throw e;
                     }
                     catch (Throwable t) {
-                      getLogWriter().info("testSynchronousIncrements." + 
+                      LogWriterUtils.getLogWriter().info("testSynchronousIncrements." + 
                           this + " caught Throwable", t);
                     }
                   }
@@ -304,7 +306,7 @@ public class GlobalRegionDUnitTest extends MultiVMRegionTestCase {
             }
             
             for (int i = 0; i < threads.length; i++) {
-              DistributedTestCase.join(threads[i], 30 * 1000, getLogWriter());
+              ThreadUtils.join(threads[i], 30 * 1000);
             }
           }
         };
@@ -315,9 +317,9 @@ public class GlobalRegionDUnitTest extends MultiVMRegionTestCase {
     }
 
     for (int i = 0; i < vmCount; i++) {
-      DistributedTestCase.join(invokes[i], 5 * 60 * 1000, getLogWriter());
+      ThreadUtils.join(invokes[i], 5 * 60 * 1000);
       if (invokes[i].exceptionOccurred()) {
-        fail("invocation failed", invokes[i].getException());
+        Assert.fail("invocation failed", invokes[i].getException());
       }
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionOffHeapDUnitTest.java
index 4af6072..c652c55 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/GlobalRegionOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -38,7 +39,7 @@ public class GlobalRegionOffHeapDUnitTest extends GlobalRegionDUnitTest {
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -48,15 +49,9 @@ public class GlobalRegionOffHeapDUnitTest extends GlobalRegionDUnitTest {
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
-
-  
   
   @Override
   public void DISABLED_testNBRegionInvalidationDuringGetInitialImage() throws Throwable {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/LRUEvictionControllerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/LRUEvictionControllerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/LRUEvictionControllerDUnitTest.java
index df02539..b3a85bc 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/LRUEvictionControllerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/LRUEvictionControllerDUnitTest.java
@@ -46,6 +46,7 @@ import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.Resou
 import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
 import com.gemstone.gemfire.internal.cache.lru.LRUStatistics;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -303,7 +304,7 @@ public class LRUEvictionControllerDUnitTest extends CacheTestCase {
       CacheListenerAdapter() {
         public void afterCreate(EntryEvent event) {
           try {
-            getLogWriter().info("AFTER CREATE");
+            LogWriterUtils.getLogWriter().info("AFTER CREATE");
             region.put(key, value2);
 
           } 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/LocalRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/LocalRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/LocalRegionDUnitTest.java
index 0ea842d..2ed532b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/LocalRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/LocalRegionDUnitTest.java
@@ -26,6 +26,7 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.TimeoutException;
+import com.gemstone.gemfire.test.dunit.Assert;
 
 /**
  * Tests the functionality of a {@link Scope#LOCAL locally scoped}
@@ -124,7 +125,7 @@ public class LocalRegionDUnitTest extends CacheListenerTestCase {
             helper.netSearch(true);
 
           } catch (TimeoutException ex) {
-            fail("Why did I timeout?", ex);
+            Assert.fail("Why did I timeout?", ex);
           }
 
           return null;



[52/62] [abbrv] incubator-geode git commit: GEODE-909: fix port issue in MemoryThresholdsOffHeapDUnitTest

Posted by je...@apache.org.
GEODE-909: fix port issue in MemoryThresholdsOffHeapDUnitTest

* Set cacheserver port to 'zero' in-order for system to pick-up random port instead of using AvailablePortHelper.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/8db793d9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/8db793d9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/8db793d9

Branch: refs/heads/feature/GEODE-17
Commit: 8db793d9dc55eb73022980deda2626e04c83868c
Parents: d232e25
Author: Sai Boorlagadda <sb...@pivotal.io>
Authored: Mon Feb 8 14:29:51 2016 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Tue Feb 9 10:55:33 2016 -0800

----------------------------------------------------------------------
 .../cache/management/MemoryThresholdsOffHeapDUnitTest.java   | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8db793d9/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
index 4a205f3..5c65b1a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
@@ -245,14 +245,12 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
     final VM server1 = host.getVM(0);
     final VM server2 = host.getVM(1);
     
-    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int port1 = ports[0];
-    final int port2 = ports[1];
     final String regionName = "offHeapDisabledThresholds";
 
-    startCacheServer(server1, port1, 0f, 0f,
+    //set port to 0 in-order for system to pickup a random port.
+    startCacheServer(server1, 0, 0f, 0f,
         regionName, false/*createPR*/, false/*notifyBySubscription*/, 0);
-    startCacheServer(server2, port2, 0f, 0f,
+    startCacheServer(server2, 0, 0f, 0f,
         regionName, false/*createPR*/, false/*notifyBySubscription*/, 0);
 
     registerTestMemoryThresholdListener(server1);


[50/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/LocatorLoadBalancingDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/LocatorLoadBalancingDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/LocatorLoadBalancingDUnitTest.java
index 1c8e9eb..3c6a980 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/LocatorLoadBalancingDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/LocatorLoadBalancingDUnitTest.java
@@ -47,11 +47,14 @@ import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.PoolFactoryImpl;
 import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 import com.gemstone.gemfire.internal.logging.LocalLogWriter;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author dsmith
@@ -98,7 +101,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
     int serverPort = startBridgeServerInVM(vm1, new String[] {"a", "b"},  locators);
     
     ServerLoad expectedLoad = new ServerLoad(0f, 1 / 800.0f, 0f, 1f);
-    ServerLocation expectedLocation = new ServerLocation(getServerHostName(vm0
+    ServerLocation expectedLocation = new ServerLocation(NetworkUtils.getServerHostName(vm0
         .getHost()), serverPort);
     Map expected = new HashMap();
     expected.put(expectedLocation, expectedLoad);
@@ -107,7 +110,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
     
     int serverPort2 = startBridgeServerInVM(vm2, new String[] {"a", "b"},  locators);
     
-    ServerLocation expectedLocation2 = new ServerLocation(getServerHostName(vm0
+    ServerLocation expectedLocation2 = new ServerLocation(NetworkUtils.getServerHostName(vm0
         .getHost()), serverPort2);
     
     expected.put(expectedLocation2, expectedLoad);
@@ -130,18 +133,18 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
     int serverPort = startBridgeServerInVM(vm1, new String[] {"a", "b"},  locators);
     
     ServerLoad expectedLoad = new ServerLoad(2/800f, 1 / 800.0f, 0f, 1f);
-    ServerLocation expectedLocation = new ServerLocation(getServerHostName(host), serverPort);
+    ServerLocation expectedLocation = new ServerLocation(NetworkUtils.getServerHostName(host), serverPort);
     Map expected = new HashMap();
     expected.put(expectedLocation, expectedLoad);
     
     ClientConnectionResponse response;
     response = (ClientConnectionResponse) TcpClient.requestToServer(InetAddress
-        .getByName(getServerHostName(host)), locatorPort,
+        .getByName(NetworkUtils.getServerHostName(host)), locatorPort,
         new ClientConnectionRequest(Collections.EMPTY_SET, null), 10000);
     Assert.assertEquals(expectedLocation, response.getServer());
     
     response = (ClientConnectionResponse) TcpClient.requestToServer(InetAddress
-        .getByName(getServerHostName(host)), locatorPort,
+        .getByName(NetworkUtils.getServerHostName(host)), locatorPort,
         new ClientConnectionRequest(Collections.EMPTY_SET, null), 10000, true);
     Assert.assertEquals(expectedLocation, response.getServer());
     
@@ -150,13 +153,13 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
     
     QueueConnectionResponse response2;
     response2 = (QueueConnectionResponse) TcpClient.requestToServer(InetAddress
-        .getByName(getServerHostName(host)), locatorPort,
+        .getByName(NetworkUtils.getServerHostName(host)), locatorPort,
         new QueueConnectionRequest(null, 2,
             Collections.EMPTY_SET, null, false), 10000, true);
     Assert.assertEquals(Collections.singletonList(expectedLocation), response2.getServers());
     
     response2 = (QueueConnectionResponse) TcpClient
-        .requestToServer(InetAddress.getByName(getServerHostName(host)),
+        .requestToServer(InetAddress.getByName(NetworkUtils.getServerHostName(host)),
             locatorPort, new QueueConnectionRequest(null, 5, Collections.EMPTY_SET, null,
                 false), 10000, true);
     
@@ -186,13 +189,13 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
     
     //We expect 0 load
     Map expected = new HashMap();
-    ServerLocation expectedLocation = new ServerLocation(getServerHostName(host), serverPort);
+    ServerLocation expectedLocation = new ServerLocation(NetworkUtils.getServerHostName(host), serverPort);
     ServerLoad expectedLoad = new ServerLoad(0f, 1 / 800.0f, 0f, 1f);
     expected.put(expectedLocation, expectedLoad);
     checkLocatorLoad(vm0, expected);
     
     PoolFactoryImpl pf = new PoolFactoryImpl(null);
-    pf.addServer(getServerHostName(host), serverPort);
+    pf.addServer(NetworkUtils.getServerHostName(host), serverPort);
     pf.setMinConnections(8);
     pf.setMaxConnections(8);
     pf.setSubscriptionEnabled(true);
@@ -233,7 +236,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
     startBridgeServerInVM(vm2, new String[] {"a", "b"},  locators);
     
     PoolFactoryImpl pf = new PoolFactoryImpl(null);
-    pf.addLocator(getServerHostName(host), locatorPort);
+    pf.addLocator(NetworkUtils.getServerHostName(host), locatorPort);
     pf.setMinConnections(80);
     pf.setMaxConnections(80);
     pf.setSubscriptionEnabled(false);
@@ -267,7 +270,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
             return excuse;
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 5 * 60 * 1000, 1000, true);
+        Wait.waitForCriterion(wc, 5 * 60 * 1000, 1000, true);
       }
     };
     
@@ -290,7 +293,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
             return "connection count never reached " + count;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, MAX_WAIT, 200, true);
+        Wait.waitForCriterion(ev, MAX_WAIT, 200, true);
       }
     };
     if(vm == null) {
@@ -321,7 +324,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
     startBridgeServerInVM(vm3, new String[] {"b"},  locators);
     
     PoolFactoryImpl pf = new PoolFactoryImpl(null);
-    pf.addLocator(getServerHostName(host), locatorPort);
+    pf.addLocator(NetworkUtils.getServerHostName(host), locatorPort);
     pf.setMinConnections(12);
     pf.setSubscriptionEnabled(false);
     pf.setServerGroup("a");
@@ -333,7 +336,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
     checkConnectionCount(vm2, 6);
     checkConnectionCount(vm3, 0);
     
-    getLogWriter().info("pool1 prefilled");
+    LogWriterUtils.getLogWriter().info("pool1 prefilled");
     
     PoolFactoryImpl pf2 = (PoolFactoryImpl) PoolManager.createFactory();
     pf2.init(pf.getPoolAttributes());
@@ -347,9 +350,9 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
     checkConnectionCount(vm2, 9);
     checkConnectionCount(vm3, 9);
     
-    getLogWriter().info("pool2 prefilled");
+    LogWriterUtils.getLogWriter().info("pool2 prefilled");
     
-    ServerLocation location1 = new ServerLocation(getServerHostName(host), serverPort1);
+    ServerLocation location1 = new ServerLocation(NetworkUtils.getServerHostName(host), serverPort1);
     PoolImpl pool1 = (PoolImpl) PoolManager.getAll().get(POOL_NAME);
     Assert.assertEquals("a", pool1.getServerGroup());
     
@@ -358,7 +361,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
       pool1.acquireConnection();
     }
     
-    getLogWriter().info("aquired 15 connections in pool1");
+    LogWriterUtils.getLogWriter().info("aquired 15 connections in pool1");
     
     //now the load should be equal
     checkConnectionCount(vm1, 9);
@@ -370,7 +373,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
       pool2.acquireConnection();
     }
     
-    getLogWriter().info("aquired 12 connections in pool2");
+    LogWriterUtils.getLogWriter().info("aquired 12 connections in pool2");
     
     //interleave creating connections in both pools
     for(int i = 0; i < 6; i++) {
@@ -378,7 +381,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
       pool2.acquireConnection();
     }
     
-    getLogWriter().info("interleaved 6 connections from pool1 with 6 connections from pool2");
+    LogWriterUtils.getLogWriter().info("interleaved 6 connections from pool1 with 6 connections from pool2");
     
     //The load should still be balanced
     checkConnectionCount(vm1, 13);
@@ -404,8 +407,8 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
     int serverPort2 = startBridgeServerInVM(vm2, null, locators, new String[] {REGION_NAME}, new MyLoadProbe(load2 ));
     
     HashMap expected = new HashMap();
-    ServerLocation l1 = new ServerLocation(getServerHostName(host), serverPort1);
-    ServerLocation l2 = new ServerLocation(getServerHostName(host), serverPort2);
+    ServerLocation l1 = new ServerLocation(NetworkUtils.getServerHostName(host), serverPort1);
+    ServerLocation l2 = new ServerLocation(NetworkUtils.getServerHostName(host), serverPort2);
     expected.put(l1, load1);
     expected.put(l2, load2);
     checkLocatorLoad(vm0, expected);
@@ -425,7 +428,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
     checkLocatorLoad(vm0, expected);
     
     PoolFactoryImpl pf = new PoolFactoryImpl(null);
-    pf.addLocator(getServerHostName(host), locatorPort);
+    pf.addLocator(NetworkUtils.getServerHostName(host), locatorPort);
     pf.setMinConnections(20);
     pf.setSubscriptionEnabled(true);
     pf.setIdleTimeout(-1);
@@ -455,7 +458,7 @@ public class LocatorLoadBalancingDUnitTest extends LocatorTestBase {
             return "load map never became equal to " + expected;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, MAX_WAIT, 200, true);
+        Wait.waitForCriterion(ev, MAX_WAIT, 200, true);
       }
     });
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/LocatorTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/LocatorTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/LocatorTestBase.java
index 72f1a5c..f35e2fa 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/LocatorTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/LocatorTestBase.java
@@ -42,8 +42,12 @@ import com.gemstone.gemfire.distributed.Locator;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.PoolFactoryImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -67,7 +71,8 @@ public abstract class LocatorTestBase  extends DistributedTestCase {
     super(name);
   }
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     
     SerializableRunnable tearDown = new SerializableRunnable("tearDown") {
       public void run() {
@@ -94,9 +99,13 @@ public abstract class LocatorTestBase  extends DistributedTestCase {
     };
     //We seem to like leaving the DS open if we can for
     //speed, but lets at least destroy our cache and locator.
-    invokeInEveryVM(tearDown);
+    Invoke.invokeInEveryVM(tearDown);
     tearDown.run();
-    super.tearDown2();
+    
+    postTearDownLocatorTestBase();
+  }
+  
+  protected void postTearDownLocatorTestBase() throws Exception {
   }
   
   protected void startLocatorInVM(final VM vm, final int locatorPort, final String otherLocators) {
@@ -108,21 +117,21 @@ public abstract class LocatorTestBase  extends DistributedTestCase {
         Properties props = new Properties();
         props.setProperty(DistributionConfig.MCAST_PORT_NAME, String.valueOf(0));
         props.setProperty(DistributionConfig.LOCATORS_NAME, otherLocators);
-        props.setProperty(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        props.setProperty(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
         props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
         try {
           File logFile = new File(testName + "-locator" + locatorPort
               + ".log");
           InetAddress bindAddr = null;
           try {
-            bindAddr = InetAddress.getByName(getServerHostName(vm.getHost()));
+            bindAddr = InetAddress.getByName(NetworkUtils.getServerHostName(vm.getHost()));
           } catch (UnknownHostException uhe) {
-            fail("While resolving bind address ", uhe);
+            Assert.fail("While resolving bind address ", uhe);
           }
           Locator locator = Locator.startLocatorAndDS(locatorPort, logFile, bindAddr, props);
           remoteObjects.put(LOCATOR_KEY, locator);
         } catch (IOException ex) {
-          fail("While starting locator on port " + locatorPort, ex);
+          Assert.fail("While starting locator on port " + locatorPort, ex);
         }
       }
     });
@@ -302,7 +311,7 @@ public abstract class LocatorTestBase  extends DistributedTestCase {
   public String getLocatorString(Host host, int[] locatorPorts) {
     StringBuffer str = new StringBuffer();
     for(int i = 0; i < locatorPorts.length; i++) {
-      str.append(getServerHostName(host))
+      str.append(NetworkUtils.getServerHostName(host))
           .append("[")
           .append(locatorPorts[i])
           .append("]");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/SSLNoClientAuthDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/SSLNoClientAuthDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/SSLNoClientAuthDUnitTest.java
index 1c79129..bb1cc09 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/SSLNoClientAuthDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/SSLNoClientAuthDUnitTest.java
@@ -266,14 +266,12 @@ public class SSLNoClientAuthDUnitTest extends DistributedTestCase {
     }
   }
   
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     final Host host = Host.getHost(0);
     VM serverVM = host.getVM(1);
     VM clientVM = host.getVM(2);
     clientVM.invoke(SSLNoClientAuthDUnitTest.class, "closeClientCacheTask");
     serverVM.invoke(SSLNoClientAuthDUnitTest.class, "closeCacheTask");
-    super.tearDown2();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/pooling/ConnectionManagerJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/pooling/ConnectionManagerJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/pooling/ConnectionManagerJUnitTest.java
index a398975..d0b2991 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/pooling/ConnectionManagerJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/pooling/ConnectionManagerJUnitTest.java
@@ -59,8 +59,9 @@ import com.gemstone.gemfire.internal.cache.PoolStats;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ServerQueueStatus;
 import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 import com.gemstone.gemfire.internal.logging.LocalLogWriter;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -159,7 +160,7 @@ public class ConnectionManagerJUnitTest {
         return "waiting for manager " + descrip; 
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 200, 200, true);
+    Wait.waitForCriterion(ev, 200, 200, true);
   }
   
   @Test
@@ -426,7 +427,7 @@ public class ConnectionManagerJUnitTest {
     }
     
     for(int i = 0; i < updaterCount; i++) {
-      DistributedTestCase.join(updaters[i], 30 * 1000, null);
+      ThreadUtils.join(updaters[i], 30 * 1000);
     }
 
     if(exception.get() !=null) {
@@ -490,7 +491,7 @@ public class ConnectionManagerJUnitTest {
     }
     
     for(int i = 0; i < updaterCount; i++) {
-      DistributedTestCase.join(updaters[i], 30 * 1000, null);
+      ThreadUtils.join(updaters[i], 30 * 1000);
     }
 
     if(exception.get() !=null) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsDUnitTest.java
index b9e213a..4e4b10f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsDUnitTest.java
@@ -79,12 +79,20 @@ import com.gemstone.gemfire.internal.cache.control.ResourceAdvisor;
 import com.gemstone.gemfire.internal.cache.control.ResourceListener;
 import com.gemstone.gemfire.internal.cache.control.TestMemoryThresholdListener;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests the Heap Memory thresholds of {@link ResourceManager}
@@ -129,15 +137,14 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(this.setHeapMemoryMonitorTestMode);
-    addExpectedException(expectedEx);
-    addExpectedException(expectedBelow);
+    Invoke.invokeInEveryVM(this.setHeapMemoryMonitorTestMode);
+    IgnoredException.addIgnoredException(expectedEx);
+    IgnoredException.addIgnoredException(expectedBelow);
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(resetResourceManager);
-    super.tearDown2();
+  protected void preTearDownClientServerTestCase() throws Exception {
+    Invoke.invokeInEveryVM(resetResourceManager);
   }
 
   public void testPRClientPutRejection() throws Exception {
@@ -263,7 +270,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
             return dr.getMemoryThresholdReachedMembers().size() == 0;
           }
         };
-        waitForCriterion(wc, 30000, 10, true);
+        Wait.waitForCriterion(wc, 30000, 10, true);
         return null;
       }
     });
@@ -474,7 +481,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
     verifyListenerValue(server1, MemoryState.EVICTION, 2, true);
     verifyListenerValue(server1, MemoryState.NORMAL, 1, true);
     
-    this.getLogWriter().info("before NORMAL->CRITICAL->NORMAL");
+    LogWriterUtils.getLogWriter().info("before NORMAL->CRITICAL->NORMAL");
     //NORMAL -> EVICTION -> NORMAL
     server2.invoke(new SerializableCallable("NORMAL->CRITICAL->NORMAL") {
       public Object call() throws Exception {
@@ -484,7 +491,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
         return null;
       }
     });
-    this.getLogWriter().info("after NORMAL->CRITICAL->NORMAL");
+    LogWriterUtils.getLogWriter().info("after NORMAL->CRITICAL->NORMAL");
 
     verifyListenerValue(server2, MemoryState.CRITICAL, 2, true);
     verifyListenerValue(server2, MemoryState.EVICTION, 3, true);
@@ -646,7 +653,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
             return keyFoundOnSickMember && caughtException;
           }
         };
-        waitForCriterion(wc, 30000, 10, true);
+        Wait.waitForCriterion(wc, 30000, 10, true);
         return null;
       }
     });
@@ -661,7 +668,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
       server1.invoke(new SerializableCallable("local destroy sick member") {
         public Object call() throws Exception {
           Region r = getRootRegion().getSubregion(regionName);
-          getLogWriter().info("PRLocalDestroy");
+          LogWriterUtils.getLogWriter().info("PRLocalDestroy");
           r.localDestroyRegion();
           return null;
         }
@@ -699,7 +706,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
             return done;
           }
         };
-        waitForCriterion(wc, 30000, 10, true);
+        Wait.waitForCriterion(wc, 30000, 10, true);
         return null;
       }
     });
@@ -814,7 +821,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
   }
 
   public void testDRFunctionExecutionRejection() throws Exception {
-    addExpectedException("LowMemoryException");
+    IgnoredException.addIgnoredException("LowMemoryException");
     final Host host = Host.getHost(0);
     final VM server1 = host.getVM(0);
     final VM server2 = host.getVM(1);
@@ -832,7 +839,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
     
     final RejectFunction function = new RejectFunction();
     final RejectFunction function2 = new RejectFunction("noRejFunc", false);
-    invokeInEveryVM(new SerializableCallable("register function") {
+    Invoke.invokeInEveryVM(new SerializableCallable("register function") {
       public Object call() throws Exception {
         FunctionService.registerFunction(function);
         FunctionService.registerFunction(function2);
@@ -915,7 +922,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
           fail("expected low memory exception was not thrown");
         } catch (FunctionException e) {
           if (!(e.getCause().getCause() instanceof LowMemoryException)) {
-            fail("unexpected exception ", e);
+            Assert.fail("unexpected exception ", e);
           }
           //expected
         }
@@ -928,7 +935,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
 
   // this test is DISABLED due to intermittent failures.  See bug #52222
   public void disabledtestPRFunctionExecutionRejection() throws Exception {
-    addExpectedException("LowMemoryException");
+    IgnoredException.addIgnoredException("LowMemoryException");
     final Host host = Host.getHost(0);
     final VM accessor = host.getVM(0);
     final VM server1 = host.getVM(1);
@@ -961,7 +968,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
     
     final RejectFunction function = new RejectFunction();
     final RejectFunction function2 = new RejectFunction("noRejFunc", false);
-    invokeInEveryVM(new SerializableCallable("register function") {
+    Invoke.invokeInEveryVM(new SerializableCallable("register function") {
       public Object call() throws Exception {
         FunctionService.registerFunction(function);
         FunctionService.registerFunction(function2);
@@ -1046,7 +1053,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
           fail("expected low memory exception was not thrown");
         } catch (FunctionException e) {
           if (!(e.getCause().getCause() instanceof LowMemoryException)) {
-            fail("unexpected exception", e);
+            Assert.fail("unexpected exception", e);
           }
           //expected
         }
@@ -1141,7 +1148,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
     
     final RejectFunction function = new RejectFunction();
     final RejectFunction function2 = new RejectFunction("noRejFunc", false);
-    invokeInEveryVM(new SerializableCallable("register function") {
+    Invoke.invokeInEveryVM(new SerializableCallable("register function") {
       public Object call() throws Exception {
         FunctionService.registerFunction(function);
         FunctionService.registerFunction(function2);
@@ -1187,7 +1194,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
           fail("expected LowMemoryExcception was not thrown");
         } catch (ServerOperationException e) {
           if (!(e.getCause().getMessage().matches(".*low.*memory.*"))) {
-            fail("unexpected exception", e);
+            Assert.fail("unexpected exception", e);
           }
           //expected
         }
@@ -1280,7 +1287,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
         getCache();
 
         PoolFactory pf = PoolManager.createFactory();
-        pf.addServer(getServerHostName(server.getHost()), serverPort);
+        pf.addServer(NetworkUtils.getServerHostName(server.getHost()), serverPort);
         pf.create("pool1");
         
         AttributesFactory af = new AttributesFactory();
@@ -1305,14 +1312,14 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
           }
         } catch (ServerOperationException ex) {
           if (!catchServerException) {
-            fail("Unexpected exception: ", ex);
+            Assert.fail("Unexpected exception: ", ex);
           }
           if (!(ex.getCause() instanceof LowMemoryException)) {
-            fail("Unexpected exception: ", ex);
+            Assert.fail("Unexpected exception: ", ex);
           }
         } catch (LowMemoryException low) {
           if (!catchLowMemoryException) {
-            fail("Unexpected exception: ", low);
+            Assert.fail("Unexpected exception: ", low);
           }
         }
         return null;
@@ -1342,18 +1349,18 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
           }
         } catch (ServerOperationException ex) {
           if (!catchServerException) {
-            fail("Unexpected exception: ", ex);
+            Assert.fail("Unexpected exception: ", ex);
           }
           if (!(ex.getCause() instanceof LowMemoryException)) {
-            fail("Unexpected exception: ", ex);
+            Assert.fail("Unexpected exception: ", ex);
           }
           for(Integer me: temp.keySet()) {
             assertFalse("Key " + me + " should not exist", r.containsKey(me));
           }
         } catch (LowMemoryException low) {
-          getLogWriter().info("Caught LowMemoryException", low);
+          LogWriterUtils.getLogWriter().info("Caught LowMemoryException", low);
           if (!catchLowMemoryException) {
-            fail("Unexpected exception: ", low);
+            Assert.fail("Unexpected exception: ", low);
           }
           for(Integer me: temp.keySet()) {
             assertFalse("Key " + me + " should not exist", r.containsKey(me));
@@ -1535,7 +1542,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
             throw new IllegalStateException("Unknown memory state");
         }
         if (useWaitCriterion) {
-          waitForCriterion(wc, 30000, 10, true);
+          Wait.waitForCriterion(wc, 30000, 10, true);
         }
         return null;
       }
@@ -1555,7 +1562,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
             return numberOfProfiles == ra.adviseGeneric().size();
           }
         };
-        waitForCriterion(wc, 30000, 10, true);
+        Wait.waitForCriterion(wc, 30000, 10, true);
         return null;
       }
     });
@@ -1570,7 +1577,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
 
   protected Properties getServerProperties() {
     Properties p = new Properties();
-    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+getDUnitLocatorPort()+"]");
+    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     return p;
   }
 
@@ -1657,7 +1664,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
       }
     };
     final String tenuredPoolName = HeapMemoryMonitor.getTenuredMemoryPoolMXBean().getName();
-    getLogWriter().info("TenuredPoolName:"+tenuredPoolName);
+    LogWriterUtils.getLogWriter().info("TenuredPoolName:"+tenuredPoolName);
     final List list = internalSystem.getStatsList();
     assertFalse(list.isEmpty());
     
@@ -1667,10 +1674,10 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
         int i=0;
         synchronized (list) {
           for (Object o : list) {
-            getLogWriter().info("List:"+(++i)+":"+o);
+            LogWriterUtils.getLogWriter().info("List:"+(++i)+":"+o);
             if (o instanceof StatisticsImpl) {
               StatisticsImpl si = (StatisticsImpl)o;
-              getLogWriter().info("stat:"+si.getTextId());
+              LogWriterUtils.getLogWriter().info("stat:"+si.getTextId());
               if (si.getTextId().contains(tenuredPoolName)) {
                 sampler.addLocalStatListener(l, si, "currentUsedMemory");
                 return true;
@@ -1684,7 +1691,7 @@ public class MemoryThresholdsDUnitTest extends ClientServerTestCase {
         return "Waiting for " + tenuredPoolName + " statistics to be added to create listener for";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 5000, 10, true);
+    Wait.waitForCriterion(wc, 5000, 10, true);
     
     assertTrue("expected at least one stat listener, found " +
         sampler.getLocalListeners().size(),

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
index 43b53a3..4a205f3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
@@ -68,12 +68,20 @@ import com.gemstone.gemfire.internal.cache.control.ResourceListener;
 import com.gemstone.gemfire.internal.cache.control.TestMemoryThresholdListener;
 import com.gemstone.gemfire.internal.cache.partitioned.RegionAdvisor;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests the Off-Heap Memory thresholds of {@link ResourceManager}
@@ -101,16 +109,15 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
 
   @Override
   public void setUp() throws Exception {
-    addExpectedException(expectedEx);
-    addExpectedException(expectedBelow);
+    IgnoredException.addIgnoredException(expectedEx);
+    IgnoredException.addIgnoredException(expectedBelow);
   }
 
 
 
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(this.resetResourceManager);
-    super.tearDown2();
+  protected void preTearDownClientServerTestCase() throws Exception {
+    Invoke.invokeInEveryVM(this.resetResourceManager);
   }
 
   private SerializableCallable resetResourceManager = new SerializableCallable() {
@@ -495,7 +502,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return dr.getMemoryThresholdReachedMembers().size() == 0;
           }
         };
-        waitForCriterion(wc, 10000, 10, true);
+        Wait.waitForCriterion(wc, 10000, 10, true);
         return null;
       }
     });
@@ -596,7 +603,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return r.memoryThresholdReached.get();
           }
         };
-        waitForCriterion(wc, 30*1000, 10, true);
+        Wait.waitForCriterion(wc, 30*1000, 10, true);
         {
           Integer k = new Integer(2); 
           assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
@@ -611,7 +618,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return !r.memoryThresholdReached.get();
           }
         };
-        waitForCriterion(wc, 30*1000, 10, true);
+        Wait.waitForCriterion(wc, 30*1000, 10, true);
         {
           Integer k = new Integer(3);
           assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
@@ -660,7 +667,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return r.memoryThresholdReached.get();
           }
         };
-        waitForCriterion(wc, 30*1000, 10, true);
+        Wait.waitForCriterion(wc, 30*1000, 10, true);
         {
           Integer k = new Integer(5);
           assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
@@ -675,7 +682,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return !r.memoryThresholdReached.get();
           }
         };
-        waitForCriterion(wc, 30*1000, 10, true);
+        Wait.waitForCriterion(wc, 30*1000, 10, true);
         {
           Integer k = new Integer(6);
           assertEquals(k.toString(), r.get(k, new Integer(expectedInvocations++)));
@@ -832,7 +839,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return keyFoundOnSickMember && caughtException;
           }
         };
-        waitForCriterion(wc, 10000, 10, true);
+        Wait.waitForCriterion(wc, 10000, 10, true);
         return null;
       }
     });
@@ -862,7 +869,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
         vm.invoke(new SerializableCallable("local destroy sick member") {
           public Object call() throws Exception {
             Region r = getRootRegion().getSubregion(regionName);
-            getLogWriter().info("PRLocalDestroy");
+            LogWriterUtils.getLogWriter().info("PRLocalDestroy");
             r.localDestroyRegion();
             return null;
           }
@@ -906,7 +913,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return done;
           }
         };
-        waitForCriterion(wc, 10000, 10, true);
+        Wait.waitForCriterion(wc, 10000, 10, true);
         return null;
       }
     });
@@ -1001,7 +1008,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return false;
           }
         };
-        waitForCriterion(wc, 30*1000, 10, true);
+        Wait.waitForCriterion(wc, 30*1000, 10, true);
         
         final Integer k = new Integer(2); // reload with same key again and again
         final Integer expectedInvocations3 = new Integer(expectedInvocations.getAndIncrement());
@@ -1046,7 +1053,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return !r.memoryThresholdReached.get();
           }
         };
-        waitForCriterion(wc, 30*1000, 10, true);
+        Wait.waitForCriterion(wc, 30*1000, 10, true);
         
         Integer k = new Integer(3); // same key as previously used, this time is should stick
         Integer expectedInvocations8 = new Integer(expectedInvocations.incrementAndGet());
@@ -1196,7 +1203,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return r.memoryThresholdReached.get();
           }
         };
-        waitForCriterion(wc, 30*1000, 10, true);
+        Wait.waitForCriterion(wc, 30*1000, 10, true);
         { 
           Integer k = new Integer(2);
           assertEquals(k.toString(), r.get(k));
@@ -1217,7 +1224,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return !r.memoryThresholdReached.get();
           }
         };
-        waitForCriterion(wc, 30*1000, 10, true);
+        Wait.waitForCriterion(wc, 30*1000, 10, true);
         
         {
           Integer k = new Integer(3);
@@ -1318,14 +1325,14 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
           }
         } catch (ServerOperationException ex) {
           if (!catchServerException) {
-            fail("Unexpected exception: ", ex);
+            Assert.fail("Unexpected exception: ", ex);
           }
           if (!(ex.getCause() instanceof LowMemoryException)) {
-            fail("Unexpected exception: ", ex);
+            Assert.fail("Unexpected exception: ", ex);
           }
         } catch (LowMemoryException low) {
           if (!catchLowMemoryException) {
-            fail("Unexpected exception: ", low);
+            Assert.fail("Unexpected exception: ", low);
           }
         }
         return null;
@@ -1355,18 +1362,18 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
           }
         } catch (ServerOperationException ex) {
           if (!catchServerException) {
-            fail("Unexpected exception: ", ex);
+            Assert.fail("Unexpected exception: ", ex);
           }
           if (!(ex.getCause() instanceof LowMemoryException)) {
-            fail("Unexpected exception: ", ex);
+            Assert.fail("Unexpected exception: ", ex);
           }
           for(Integer me: temp.keySet()) {
             assertFalse("Key " + me + " should not exist", r.containsKey(me));
           }
         } catch (LowMemoryException low) {
-          getLogWriter().info("Caught LowMemoryException", low);
+          LogWriterUtils.getLogWriter().info("Caught LowMemoryException", low);
           if (!catchLowMemoryException) {
-            fail("Unexpected exception: ", low);
+            Assert.fail("Unexpected exception: ", low);
           }
           for(Integer me: temp.keySet()) {
             assertFalse("Key " + me + " should not exist", r.containsKey(me));
@@ -1431,7 +1438,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
                 return null;
               }
             };
-            waitForCriterion(waitForCritical, 30*1000, 9, false);
+            Wait.waitForCriterion(waitForCritical, 30*1000, 9, false);
             th.validateUpdateStateAndSendEventBeforeProcess(bytesUsedAfterSmallKey + 943720 + 8, MemoryState.EVICTION_DISABLED_CRITICAL);
           } finally {
             ohm.testHook = null;
@@ -1472,7 +1479,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             }
           };
         }
-        waitForCriterion(wc, 30000, 9, true);
+        Wait.waitForCriterion(wc, 30000, 9, true);
         getCache().getLoggerI18n().fine(removeExpectedExString);
         return bytesUsedAfterSmallKey;
       }
@@ -1510,7 +1517,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return ohm.getState().isNormal();
           }
         };
-        waitForCriterion(wc, 30000, 9, true);
+        Wait.waitForCriterion(wc, 30000, 9, true);
         getCache().getLogger().fine(MemoryThresholdsOffHeapDUnitTest.this.removeExpectedBelow);
         return;
       }
@@ -1648,7 +1655,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
         getCache();
 
         PoolFactory pf = PoolManager.createFactory();
-        pf.addServer(getServerHostName(server.getHost()), serverPort);
+        pf.addServer(NetworkUtils.getServerHostName(server.getHost()), serverPort);
         pf.create("pool1");
         
         AttributesFactory af = new AttributesFactory();
@@ -1781,7 +1788,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
           throw new IllegalStateException("Unknown memory state");
         }
         if (useWaitCriterion) {
-          waitForCriterion(wc, 5000, 10, true);
+          Wait.waitForCriterion(wc, 5000, 10, true);
         }
         return null;
       }
@@ -1801,7 +1808,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
             return numberOfProfiles == ra.adviseGeneric().size();
           }
         };
-        waitForCriterion(wc, 10000, 10, true);
+        Wait.waitForCriterion(wc, 10000, 10, true);
         return null;
       }
     });
@@ -1809,7 +1816,7 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
   
   private Properties getOffHeapProperties() {
     Properties p = new Properties();
-    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+getDUnitLocatorPort()+"]");
+    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     p.setProperty(DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME, "1m");
     return p;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/ResourceManagerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/ResourceManagerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/ResourceManagerDUnitTest.java
index 19f0612..0595948 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/ResourceManagerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/ResourceManagerDUnitTest.java
@@ -61,6 +61,7 @@ import com.gemstone.gemfire.internal.cache.partitioned.RemoveBucketMessage;
 import com.gemstone.gemfire.internal.cache.partitioned.RemoveBucketMessage.RemoveBucketResponse;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
@@ -1022,9 +1023,9 @@ public class ResourceManagerDUnitTest extends CacheTestCase {
             assertEquals(1, sr.size());
             assertEquals(value, sr.iterator().next());
           } catch (QueryException ex) {
-            fail("didn't expect a QueryException", ex);
+            Assert.fail("didn't expect a QueryException", ex);
           } catch (QueryInvalidException ex2) {
-            fail("didn't expect QueryInvalidException", ex2);
+            Assert.fail("didn't expect QueryInvalidException", ex2);
           }
         }
       });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/mapInterface/PutAllGlobalLockJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/mapInterface/PutAllGlobalLockJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/mapInterface/PutAllGlobalLockJUnitTest.java
index 48f1c56..2f9be54 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/mapInterface/PutAllGlobalLockJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/mapInterface/PutAllGlobalLockJUnitTest.java
@@ -36,7 +36,7 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.distributed.DistributedSystem;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 @Category(IntegrationTest.class)
@@ -79,7 +79,7 @@ public class PutAllGlobalLockJUnitTest {
         }
         try {
             testRegion.putAll(trialMap);
-            DistributedTestCase.join(this.thread, 30 * 1000, null);
+            ThreadUtils.join(this.thread, 30 * 1000);
             assertTrue(this.testOK);
         } catch (Exception e) {
             fail("Test has failed due to "+e);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/partition/PartitionRegionHelperDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/partition/PartitionRegionHelperDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/partition/PartitionRegionHelperDUnitTest.java
index f70e7a2..d09c6e0 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/partition/PartitionRegionHelperDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/partition/PartitionRegionHelperDUnitTest.java
@@ -43,10 +43,11 @@ import com.gemstone.gemfire.internal.cache.BucketRegion;
 import com.gemstone.gemfire.internal.cache.ForceReattemptException;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionHelper;
-import com.gemstone.gemfire.internal.cache.partitioned.fixed.FixedPartitioningTestBase;
 import com.gemstone.gemfire.internal.cache.partitioned.fixed.FixedPartitioningTestBase.Months_Accessor;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -249,7 +250,7 @@ public class PartitionRegionHelperDUnitTest extends CacheTestCase {
             date = sdf.parse(dateString);
           }
           catch (ParseException e) {
-            FixedPartitioningTestBase.fail("Exception Occured while parseing date", e);
+            Assert.fail("Exception Occured while parseing date", e);
           }
           String value = month.toString() + 10;
           region.put(date, value);
@@ -272,7 +273,7 @@ public class PartitionRegionHelperDUnitTest extends CacheTestCase {
             date = sdf.parse(dateString);
           }
           catch (ParseException e) {
-            FixedPartitioningTestBase.fail("Exception Occured while parseing date", e);
+            Assert.fail("Exception Occured while parseing date", e);
           }
           DistributedMember key1Pri = PartitionRegionHelper.getPrimaryMemberForKey(region, date);
           assertNotNull(key1Pri);
@@ -468,7 +469,7 @@ public class PartitionRegionHelperDUnitTest extends CacheTestCase {
           assertTrue(buk0.getBucketAdvisor().isPrimary());
         }
         catch (ForceReattemptException e) {
-          getLogWriter().severe(e);
+          LogWriterUtils.getLogWriter().severe(e);
           fail();
         }
       }
@@ -485,14 +486,14 @@ public class PartitionRegionHelperDUnitTest extends CacheTestCase {
           assertNotNull(k1e);
         }
         catch (ForceReattemptException e) {
-          getLogWriter().severe(e);
+          LogWriterUtils.getLogWriter().severe(e);
           fail();
         }
       }
     };
     for (DistributedMember bom: buk0AllMems) {
       VM v = d2v.get(bom);
-      getLogWriter().info("Visiting bucket owner member " + bom + " for key " + buk0Key1);
+      LogWriterUtils.getLogWriter().info("Visiting bucket owner member " + bom + " for key " + buk0Key1);
       v.invoke(assertHasBucket);
     }
 
@@ -507,14 +508,14 @@ public class PartitionRegionHelperDUnitTest extends CacheTestCase {
           assertFalse(buk0.getBucketAdvisor().isPrimary());
         }
         catch (ForceReattemptException e) {
-          getLogWriter().severe(e);
+          LogWriterUtils.getLogWriter().severe(e);
           fail();
         }
       }
     };
     for (DistributedMember redm: buk0Redundants) {
       VM v = d2v.get(redm);
-      getLogWriter().info("Visiting redundant member " + redm + " for key " + buk0Key1);
+      LogWriterUtils.getLogWriter().info("Visiting redundant member " + redm + " for key " + buk0Key1);
       v.invoke(assertRed);
     }
   }
@@ -588,7 +589,7 @@ public class PartitionRegionHelperDUnitTest extends CacheTestCase {
         } catch(IllegalStateException expected) {
           System.err.println(expected);
         } catch (UnknownHostException e) {
-          fail("Unknown host", e);
+          Assert.fail("Unknown host", e);
         }
         assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 1), member0, member1);
 
@@ -668,7 +669,7 @@ public class PartitionRegionHelperDUnitTest extends CacheTestCase {
         } catch(IllegalStateException expected) {
           System.err.println(expected);
         } catch (UnknownHostException e) {
-          fail("Unknown host", e);
+          Assert.fail("Unknown host", e);
         }
         assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 1), member0, member1);
         
@@ -679,7 +680,7 @@ public class PartitionRegionHelperDUnitTest extends CacheTestCase {
         } catch(IllegalStateException expected) {
           System.err.println(expected);
         } catch (UnknownHostException e) {
-          fail("Unknown host", e);
+          Assert.fail("Unknown host", e);
         }
         assertHasMembers(PartitionRegionHelper.getAllMembersForKey(region, 1), member0, member1);
         

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryTestListener.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryTestListener.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryTestListener.java
index 8c43e47..e402671 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryTestListener.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryTestListener.java
@@ -30,8 +30,8 @@ import com.gemstone.gemfire.cache.client.Pool;
 import com.gemstone.gemfire.cache.query.CqEvent;
 import com.gemstone.gemfire.cache.query.CqStatusListener;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author rmadduri
@@ -238,7 +238,7 @@ public class CqQueryTestListener implements CqStatusListener {
         return "never got create event for CQ " + CqQueryTestListener.this.cqName + " key " + key;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
 
@@ -253,7 +253,7 @@ public class CqQueryTestListener implements CqStatusListener {
           " expected: " + total + " receieved: " + CqQueryTestListener.this.totalEventCount;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -266,7 +266,7 @@ public class CqQueryTestListener implements CqStatusListener {
         return "never got destroy event for key " + key + " in CQ " + CqQueryTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -279,7 +279,7 @@ public class CqQueryTestListener implements CqStatusListener {
         return "never got invalidate event for CQ " + CqQueryTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -292,7 +292,7 @@ public class CqQueryTestListener implements CqStatusListener {
         return "never got update event for CQ " + CqQueryTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
 
@@ -305,7 +305,7 @@ public class CqQueryTestListener implements CqStatusListener {
         return "never got close event for CQ " + CqQueryTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -318,7 +318,7 @@ public class CqQueryTestListener implements CqStatusListener {
         return "never got region clear event for CQ " + CqQueryTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
 
@@ -331,7 +331,7 @@ public class CqQueryTestListener implements CqStatusListener {
         return "never got region invalidate event for CQ " + CqQueryTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -354,7 +354,7 @@ public class CqQueryTestListener implements CqStatusListener {
         return "never got create error for CQ " + CqQueryTestListener.this.cqName + " messaged " + expectedMessage;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -368,7 +368,7 @@ public class CqQueryTestListener implements CqStatusListener {
           " expected: " + total + " received: " + CqQueryTestListener.this.cqsDisconnectedCount;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -382,7 +382,7 @@ public class CqQueryTestListener implements CqStatusListener {
           " expected: " + total + " receieved: " + CqQueryTestListener.this.cqsConnectedCount;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/CompactRangeIndexDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/CompactRangeIndexDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/CompactRangeIndexDUnitTest.java
index f579e58..81b545f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/CompactRangeIndexDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/CompactRangeIndexDUnitTest.java
@@ -25,11 +25,15 @@ import com.gemstone.gemfire.cache.query.internal.index.IndexManager;
 import com.gemstone.gemfire.cache.query.internal.index.IndexManager.TestHook;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable.CacheSerializableRunnableException;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 public class CompactRangeIndexDUnitTest extends DistributedTestCase{
 
@@ -43,7 +47,7 @@ public class CompactRangeIndexDUnitTest extends DistributedTestCase{
   public void setUp() throws Exception {
     super.setUp();
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -51,7 +55,7 @@ public class CompactRangeIndexDUnitTest extends DistributedTestCase{
     Host host = Host.getHost(0);
     vm0 = host.getVM(0);
     utils = new QueryTestUtils();
-    utils.createServer(vm0, getAllDistributedSystemProperties(new Properties()));
+    utils.createServer(vm0, DistributedTestUtils.getAllDistributedSystemProperties(new Properties()));
     utils.createReplicateRegion("exampleRegion", vm0);
     utils.createIndex(vm0,"type", "\"type\"", "/exampleRegion");
   }
@@ -145,7 +149,7 @@ public class CompactRangeIndexDUnitTest extends DistributedTestCase{
     });
     as0.join();
     if(as0.exceptionOccurred()){
-        fail("Query execution failed.", as0.getException());
+        Assert.fail("Query execution failed.", as0.getException());
     }
    
   }
@@ -164,7 +168,8 @@ public class CompactRangeIndexDUnitTest extends DistributedTestCase{
    
   }
   
-  public void tearDown2() throws Exception{
+  @Override
+  protected final void preTearDown() throws Exception {
     Thread.sleep(5000);
     removeHook();
     utils.closeServer(vm0);
@@ -191,7 +196,7 @@ public class CompactRangeIndexDUnitTest extends DistributedTestCase{
     @Override
     public void hook(int spot) throws RuntimeException {
      if(spot == 11){
-         pause(10);
+         Wait.pause(10);
      }
    }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/CqTimeTestListener.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/CqTimeTestListener.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/CqTimeTestListener.java
index 5f3d45d..3d3a2e4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/CqTimeTestListener.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/CqTimeTestListener.java
@@ -25,8 +25,8 @@ import com.gemstone.gemfire.cache.Operation;
 import com.gemstone.gemfire.cache.query.CqEvent;
 import com.gemstone.gemfire.cache.query.CqListener;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author anil.
@@ -198,7 +198,7 @@ public class CqTimeTestListener implements CqListener {
         return "never got create event for CQ " + CqTimeTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -211,7 +211,7 @@ public class CqTimeTestListener implements CqListener {
         return "never got destroy event for CQ " + CqTimeTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -224,7 +224,7 @@ public class CqTimeTestListener implements CqListener {
         return "never got invalidate event for CQ " + CqTimeTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -237,7 +237,7 @@ public class CqTimeTestListener implements CqListener {
         return "never got update event for CQ " + CqTimeTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
 
@@ -250,7 +250,7 @@ public class CqTimeTestListener implements CqListener {
         return "never got close event for CQ " + CqTimeTestListener.this.cqName;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/HashIndexDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/HashIndexDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/HashIndexDUnitTest.java
index afd2119..38721be 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/HashIndexDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/HashIndexDUnitTest.java
@@ -22,7 +22,9 @@ import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.query.QueryTestUtils;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -38,7 +40,7 @@ public class HashIndexDUnitTest extends DistributedTestCase{
   public void setUp() throws Exception {
     super.setUp();
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -46,7 +48,7 @@ public class HashIndexDUnitTest extends DistributedTestCase{
     Host host = Host.getHost(0);
     vm0 = host.getVM(0);
     utils = new QueryTestUtils();
-    utils.createServer(vm0, getAllDistributedSystemProperties(new Properties()));
+    utils.createServer(vm0, DistributedTestUtils.getAllDistributedSystemProperties(new Properties()));
     utils.createReplicateRegion("exampleRegion", vm0);
     utils.createHashIndex(vm0,"ID", "r.ID", "/exampleRegion r");
   }
@@ -122,9 +124,9 @@ public class HashIndexDUnitTest extends DistributedTestCase{
     });
   }
   
-  public void tearDown2() throws Exception{
+  @Override
+  protected final void preTearDown() throws Exception {
     Thread.sleep(5000);
     utils.closeServer(vm0);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/HelperTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/HelperTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/HelperTestCase.java
index 08a4882..f4132da 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/HelperTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/HelperTestCase.java
@@ -46,6 +46,7 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -239,7 +240,7 @@ public class HelperTestCase extends CacheTestCase {
         
         final ClientCacheFactory ccf = new ClientCacheFactory(properties);
         for (int i = 0; i < servers.length; i++) {
-          ccf.addPoolServer(getServerHostName(servers[i].getHost()), ports[i]);
+          ccf.addPoolServer(NetworkUtils.getServerHostName(servers[i].getHost()), ports[i]);
         }
         ccf.setPoolSubscriptionEnabled(true);
         ccf.setPoolSubscriptionRedundancy(redundancyLevel);


[39/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllCallBkSingleVMDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllCallBkSingleVMDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllCallBkSingleVMDUnitTest.java
index 572ad53..8c85ebd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllCallBkSingleVMDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllCallBkSingleVMDUnitTest.java
@@ -39,8 +39,10 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
 import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 public class PutAllCallBkSingleVMDUnitTest extends DistributedTestCase{
@@ -72,16 +74,16 @@ public class PutAllCallBkSingleVMDUnitTest extends DistributedTestCase{
       VM vm1 = host.getVM(1);
       vm0.invoke(PutAllCallBkSingleVMDUnitTest.class, "createCache");
       vm1.invoke(PutAllCallBkSingleVMDUnitTest.class, "createCache");
-      getLogWriter().fine("Cache created in successfully");
+      LogWriterUtils.getLogWriter().fine("Cache created in successfully");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(PutAllCallBkSingleVMDUnitTest.class, "closeCache");
-        vm1.invoke(PutAllCallBkSingleVMDUnitTest.class, "closeCache");
-        
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(PutAllCallBkSingleVMDUnitTest.class, "closeCache");
+      vm1.invoke(PutAllCallBkSingleVMDUnitTest.class, "closeCache");
     }
     
     public static synchronized void createCache(){
@@ -186,7 +188,7 @@ public class PutAllCallBkSingleVMDUnitTest extends DistributedTestCase{
                 obj = region.put(ob, str);
             }
         }catch(Exception ex){
-            fail("Failed while region.put", ex);
+            Assert.fail("Failed while region.put", ex);
         }
         return obj;
     }//end of putMethod
@@ -265,12 +267,12 @@ public class PutAllCallBkSingleVMDUnitTest extends DistributedTestCase{
     static class AfterCreateCallback extends CacheListenerAdapter {
         public void afterCreate(EntryEvent event){            
             putAllcounter++;
-            getLogWriter().fine("In afterCreate"+putAllcounter);
+            LogWriterUtils.getLogWriter().fine("In afterCreate"+putAllcounter);
             if (event.getOperation().isPutAll()) {
               assertEquals("putAllCreateCallback", event.getCallbackArgument());
             }
             if(putAllcounter == 25){
-                getLogWriter().fine("performingtrue");
+                LogWriterUtils.getLogWriter().fine("performingtrue");
                 afterCreate = true;
             }            
         }
@@ -279,12 +281,12 @@ public class PutAllCallBkSingleVMDUnitTest extends DistributedTestCase{
     static class AfterUpdateCallback extends CacheListenerAdapter {
         public void afterUpdate(EntryEvent event){            
             afterUpdateputAllcounter++;
-            getLogWriter().fine("In afterUpdate"+afterUpdateputAllcounter);
+            LogWriterUtils.getLogWriter().fine("In afterUpdate"+afterUpdateputAllcounter);
             if (event.getOperation().isPutAll()) {
               assertEquals("putAllAfterUpdateCallback", event.getCallbackArgument());
             }
             if(afterUpdateputAllcounter == 5){
-                getLogWriter().fine("performingtrue afterUpdate");
+                LogWriterUtils.getLogWriter().fine("performingtrue afterUpdate");
                 afterUpdate = true;
             }            
         }
@@ -292,12 +294,12 @@ public class PutAllCallBkSingleVMDUnitTest extends DistributedTestCase{
     static class BeforeCreateCallback extends CacheWriterAdapter {
           public void beforeCreate(EntryEvent event){            
             beforeCreateputAllcounter++;
-            getLogWriter().fine("In beforeCreate"+beforeCreateputAllcounter);
+            LogWriterUtils.getLogWriter().fine("In beforeCreate"+beforeCreateputAllcounter);
             if (event.getOperation().isPutAll()) {
               assertEquals("putAllCreateCallback", event.getCallbackArgument());
             }
             if(beforeCreateputAllcounter == 25){
-                getLogWriter().fine("performingtrue beforeCreateputAll");
+                LogWriterUtils.getLogWriter().fine("performingtrue beforeCreateputAll");
                 beforeCreate = true;
             }            
         }
@@ -305,12 +307,12 @@ public class PutAllCallBkSingleVMDUnitTest extends DistributedTestCase{
       static class BeforeUpdateCallback extends CacheWriterAdapter {
         public void beforeUpdate(EntryEvent event){            
             beforeUpdateputAllcounter++;
-            getLogWriter().fine("In beforeUpdate"+beforeUpdateputAllcounter);
+            LogWriterUtils.getLogWriter().fine("In beforeUpdate"+beforeUpdateputAllcounter);
             if (event.getOperation().isPutAll()) {
               assertEquals("putAllAfterUpdateCallback", event.getCallbackArgument());
             }
             if(beforeUpdateputAllcounter == 5){
-                getLogWriter().fine("performingtrue beforeUpdate");
+                LogWriterUtils.getLogWriter().fine("performingtrue beforeUpdate");
                 beforeUpdate = true;
             }            
         }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllMultiVmDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllMultiVmDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllMultiVmDUnitTest.java
index 2113118..5a37ebf 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllMultiVmDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllMultiVmDUnitTest.java
@@ -40,6 +40,7 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -72,14 +73,15 @@ public class PutAllMultiVmDUnitTest extends DistributedTestCase{
       vm1.invoke(PutAllMultiVmDUnitTest.class, "createCache");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(PutAllMultiVmDUnitTest.class, "closeCache");
-        vm1.invoke(PutAllMultiVmDUnitTest.class, "closeCache");
-        cache = null;
-        invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(PutAllMultiVmDUnitTest.class, "closeCache");
+      vm1.invoke(PutAllMultiVmDUnitTest.class, "closeCache");
+      cache = null;
+      Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
     }
     
     public static void createCache(){

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/QueueMsgDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/QueueMsgDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/QueueMsgDUnitTest.java
index 5a00ec0..82d5205 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/QueueMsgDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/QueueMsgDUnitTest.java
@@ -35,10 +35,11 @@ import com.gemstone.gemfire.cache.SubscriptionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.CachePerfStats;
 import com.gemstone.gemfire.internal.cache.DistributedRegion;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Test to make sure message queuing works.
@@ -128,7 +129,7 @@ public class QueueMsgDUnitTest extends ReliabilityTestCase {
         return "waiting for reliableQueuedOps to become 0";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 5 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 5 * 1000, 200, true);
     
     // now check that the queued op was delivered
     vm.invoke(new CacheSerializableRunnable("check") {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RRSynchronizationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RRSynchronizationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RRSynchronizationDUnitTest.java
index e707ea8..916634b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RRSynchronizationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RRSynchronizationDUnitTest.java
@@ -44,9 +44,14 @@ import com.gemstone.gemfire.internal.cache.VMCachedDeserializable;
 import com.gemstone.gemfire.internal.cache.versions.VMVersionTag;
 import com.gemstone.gemfire.internal.cache.versions.VersionSource;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * concurrency-control tests for client/server
@@ -85,7 +90,7 @@ public class RRSynchronizationDUnitTest extends CacheTestCase {
    * distributed in the 7.0 release.
    */
   public void doRegionsSyncOnPeerLoss(TestType typeOfTest) {
-    addExpectedException("killing member's ds");
+    IgnoredException.addIgnoredException("killing member's ds");
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -111,7 +116,7 @@ public class RRSynchronizationDUnitTest extends CacheTestCase {
       // Now we crash the member who "modified" vm1's cache.
       // The other replicates should perform a delta-GII for the lost member and
       // get back in sync
-      crashDistributedSystem(vm0);
+      DistributedTestUtils.crashDistributedSystem(vm0);
   
       verifySynchronized(vm2, crashedID);
     } finally {
@@ -158,7 +163,7 @@ public class RRSynchronizationDUnitTest extends CacheTestCase {
         tag.setEntryVersion(1);
         tag.setIsRemoteForTesting();
         EntryEventImpl event = EntryEventImpl.create(dr, Operation.CREATE, "Object3", true, forMember, true, false);
-        getLogWriter().info("applying this event to the cache: " + event);
+        LogWriterUtils.getLogWriter().info("applying this event to the cache: " + event);
         event.setNewValue(new VMCachedDeserializable("value3", 12));
         event.setVersionTag(tag);
         dr.getRegionMap().basicPut(event, System.currentTimeMillis(), true, false, null, false, false);
@@ -173,12 +178,12 @@ public class RRSynchronizationDUnitTest extends CacheTestCase {
         event = EntryEventImpl.create(dr, Operation.CREATE, "Object5", true, forMember, true, false);
         event.setNewValue(Token.TOMBSTONE);
         event.setVersionTag(tag);
-        getLogWriter().info("applying this event to the cache: " + event);
+        LogWriterUtils.getLogWriter().info("applying this event to the cache: " + event);
         dr.getRegionMap().basicPut(event, System.currentTimeMillis(), true, false, null, false, false);
         event.release();
 
         dr.dumpBackingMap();
-        getLogWriter().info("version vector is now " + dr.getVersionVector().fullToString());
+        LogWriterUtils.getLogWriter().info("version vector is now " + dr.getVersionVector().fullToString());
         assertTrue("should hold entry Object3 now", dr.containsKey("Object3"));
         return true;
       }
@@ -189,17 +194,17 @@ public class RRSynchronizationDUnitTest extends CacheTestCase {
     vm.invoke(new SerializableCallable("check that synchronization happened") {
       public Object call() throws Exception {
         final DistributedRegion dr = (DistributedRegion)TestRegion;
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           String waitingFor = "crashed member is still in membership view: " + crashedMember;
           boolean dumped = false;
           public boolean done() {
             if (TestRegion.getCache().getDistributionManager().isCurrentMember(crashedMember)) {
-              getLogWriter().info(waitingFor);
+              LogWriterUtils.getLogWriter().info(waitingFor);
               return false;
             }
             if (!TestRegion.containsKey("Object3")) {
               waitingFor = "entry for Object3 not found";
-              getLogWriter().info(waitingFor);
+              LogWriterUtils.getLogWriter().info(waitingFor);
               return false;
             }
             RegionEntry re = dr.getRegionMap().getEntry("Object5");
@@ -209,7 +214,7 @@ public class RRSynchronizationDUnitTest extends CacheTestCase {
                 dr.dumpBackingMap();
               }
               waitingFor = "entry for Object5 not found";
-              getLogWriter().info(waitingFor);
+              LogWriterUtils.getLogWriter().info(waitingFor);
               return false;
             }
             if (!re.isTombstone()) {
@@ -218,7 +223,7 @@ public class RRSynchronizationDUnitTest extends CacheTestCase {
                 dr.dumpBackingMap();
               }
               waitingFor = "Object5 is not a tombstone but should be: " + re;
-              getLogWriter().info(waitingFor);
+              LogWriterUtils.getLogWriter().info(waitingFor);
               return false;
             }
             return true;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
index 0700074..0d80574 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectDUnitTest.java
@@ -54,12 +54,20 @@ import com.gemstone.gemfire.distributed.internal.membership.gms.mgr.GMSMembershi
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 @SuppressWarnings("serial")
 public class ReconnectDUnitTest extends CacheTestCase
@@ -92,10 +100,10 @@ public class ReconnectDUnitTest extends CacheTestCase
           locatorPort = locPort;
           Properties props = getDistributedSystemProperties();
           locator = Locator.startLocatorAndDS(locatorPort, new File(""), props);
-          addExpectedException("com.gemstone.gemfire.ForcedDisconnectException||Possible loss of quorum");
+          IgnoredException.addIgnoredException("com.gemstone.gemfire.ForcedDisconnectException||Possible loss of quorum");
 //          MembershipManagerHelper.getMembershipManager(InternalDistributedSystem.getConnectedInstance()).setDebugJGroups(true);
         } catch (IOException e) {
-          fail("unable to start locator", e);
+          Assert.fail("unable to start locator", e);
         }
       }
     });
@@ -106,7 +114,7 @@ public class ReconnectDUnitTest extends CacheTestCase
     //Cache cache = getCache();
     closeCache();
     getSystem().disconnect();
-    getLogWriter().fine("Cache Closed ");
+    LogWriterUtils.getLogWriter().fine("Cache Closed ");
   }
 
   @Override
@@ -119,25 +127,24 @@ public class ReconnectDUnitTest extends CacheTestCase
       dsProperties.put(DistributionConfig.LOCATORS_NAME, "localHost["+this.locatorPort+"]");
       dsProperties.put(DistributionConfig.MCAST_PORT_NAME, "0");
       dsProperties.put(DistributionConfig.MEMBER_TIMEOUT_NAME, "1000");
-      dsProperties.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+      dsProperties.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
     }
     return dsProperties;
   }
   
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     try {
-      super.tearDown2();
       Host.getHost(0).getVM(locatorVMNumber).invoke(new SerializableRunnable("stop locator") {
         public void run() {
           if (locator != null) {
-            getLogWriter().info("stopping locator " + locator);
+            LogWriterUtils.getLogWriter().info("stopping locator " + locator);
             locator.stop();
           }
         }
       });
     } finally {
-      invokeInEveryVM(new SerializableRunnable() {
+      Invoke.invokeInEveryVM(new SerializableRunnable() {
         public void run() {
           ReconnectDUnitTest.savedSystem = null;
         }
@@ -167,7 +174,7 @@ public class ReconnectDUnitTest extends CacheTestCase
 
   // quorum check fails, then succeeds
   public void testReconnectWithQuorum() throws Exception {
-    addExpectedException("killing member's ds");
+    IgnoredException.addIgnoredException("killing member's ds");
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -197,7 +204,7 @@ public class ReconnectDUnitTest extends CacheTestCase
         props.put("max-num-reconnect-tries", "2");
         props.put("log-file", "autoReconnectVM"+VM.getCurrentVMNum()+"_"+getPID()+".log");
         Cache cache = new CacheFactory(props).create();
-        addExpectedException("com.gemstone.gemfire.ForcedDisconnectException||Possible loss of quorum");
+        IgnoredException.addIgnoredException("com.gemstone.gemfire.ForcedDisconnectException||Possible loss of quorum");
         Region myRegion = cache.getRegion("root/myRegion");
         ReconnectDUnitTest.savedSystem = cache.getDistributedSystem();
         myRegion.put("MyKey1", "MyValue1");
@@ -219,7 +226,7 @@ public class ReconnectDUnitTest extends CacheTestCase
      */
     System.out.println("disconnecting vm0");
     forceDisconnect(vm0);
-    pause(10000);
+    Wait.pause(10000);
     System.out.println("disconnecting vm1");
     forceDisconnect(vm1);
 
@@ -231,7 +238,7 @@ public class ReconnectDUnitTest extends CacheTestCase
       waitForReconnect(vm1);
       System.out.println("done reconnecting vm0 and vm1");
     } catch (Exception e) {
-      dumpAllStacks();
+      ThreadUtils.dumpAllStacks();
       throw e;
     }
   }
@@ -249,7 +256,7 @@ public class ReconnectDUnitTest extends CacheTestCase
   
   public void doTestReconnectOnForcedDisconnect(final boolean createInAppToo) throws Exception {
 
-    addExpectedException("killing member's ds");
+    IgnoredException.addIgnoredException("killing member's ds");
 //    getSystem().disconnect();
 //    getLogWriter().fine("Cache Closed ");
 
@@ -260,7 +267,7 @@ public class ReconnectDUnitTest extends CacheTestCase
     final int locPort = locatorPort;
     final int secondLocPort = AvailablePortHelper.getRandomAvailableTCPPort();
 
-    deleteLocatorStateFile(locPort, secondLocPort);
+    DistributedTestUtils.deleteLocatorStateFile(locPort, secondLocPort);
 
 
     final String xmlFileLoc = (new File(".")).getAbsolutePath();
@@ -311,11 +318,11 @@ public class ReconnectDUnitTest extends CacheTestCase
           Thread recreateCacheThread = new Thread("ReconnectDUnitTest.createInAppThread") {
             public void run() {
               while (!cache.isClosed()) {
-                pause(100);
+                Wait.pause(100);
               }
               try {
                 new CacheFactory(props).create();
-                getLogWriter().error("testReconnectCollidesWithApplication failed - application thread was able to create a cache");
+                LogWriterUtils.getLogWriter().error("testReconnectCollidesWithApplication failed - application thread was able to create a cache");
               } catch (IllegalStateException cacheExists) {
                 // expected
               }
@@ -335,7 +342,7 @@ public class ReconnectDUnitTest extends CacheTestCase
       public Object call() {
         final DistributedSystem ds = ReconnectDUnitTest.savedSystem;
         ReconnectDUnitTest.savedSystem = null;
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           public boolean done() {
             return ds.isReconnecting();
           }
@@ -343,8 +350,8 @@ public class ReconnectDUnitTest extends CacheTestCase
             return "waiting for ds to begin reconnecting";
           }
         }, 30000, 1000, true);
-        getLogWriter().info("entering reconnect wait for " + ds);
-        getLogWriter().info("ds.isReconnecting() = " + ds.isReconnecting());
+        LogWriterUtils.getLogWriter().info("entering reconnect wait for " + ds);
+        LogWriterUtils.getLogWriter().info("ds.isReconnecting() = " + ds.isReconnecting());
         boolean failure = true;
         try {
           ds.waitUntilReconnected(60, TimeUnit.SECONDS);
@@ -357,7 +364,7 @@ public class ReconnectDUnitTest extends CacheTestCase
           failure = false;
           return ds.getReconnectedSystem().getDistributedMember();
         } catch (InterruptedException e) {
-          getLogWriter().warning("interrupted while waiting for reconnect");
+          LogWriterUtils.getLogWriter().warning("interrupted while waiting for reconnect");
           return null;
         } finally {
           if (failure) {
@@ -373,7 +380,7 @@ public class ReconnectDUnitTest extends CacheTestCase
       public Object call() {
         final DistributedSystem ds = ReconnectDUnitTest.savedSystem;
         ReconnectDUnitTest.savedSystem = null;
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           public boolean done() {
             return ds.isReconnecting() || ds.getReconnectedSystem() != null;
           }
@@ -386,7 +393,7 @@ public class ReconnectDUnitTest extends CacheTestCase
         assertFalse(ds.isReconnecting());
         DistributedSystem newDs = InternalDistributedSystem.getAnyInstance();
         if (newDs != null) {
-          getLogWriter().warning("expected distributed system to be disconnected: " + newDs);
+          LogWriterUtils.getLogWriter().warning("expected distributed system to be disconnected: " + newDs);
           return false;
         }
         return true;
@@ -399,8 +406,8 @@ public class ReconnectDUnitTest extends CacheTestCase
     forceDisconnect(vm1);
     newdm = waitForReconnect(vm1);
     assertNotSame("expected a reconnect to occur in member", dm, newdm);
-    deleteLocatorStateFile(locPort);
-    deleteLocatorStateFile(secondLocPort);
+    DistributedTestUtils.deleteLocatorStateFile(locPort);
+    DistributedTestUtils.deleteLocatorStateFile(secondLocPort);
   }
   
   private DistributedMember getDMID(VM vm) {
@@ -418,7 +425,7 @@ public class ReconnectDUnitTest extends CacheTestCase
     	System.out.println("waitForReconnect invoked");
         final DistributedSystem ds = ReconnectDUnitTest.savedSystem;
         ReconnectDUnitTest.savedSystem = null;
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           public boolean done() {
             return ds.isReconnecting();
           }
@@ -427,7 +434,7 @@ public class ReconnectDUnitTest extends CacheTestCase
           }
         }, 30000, 1000, true);
         long waitTime = 120;
-        getLogWriter().info("VM"+VM.getCurrentVMNum() + " waiting up to "+waitTime+" seconds for reconnect to complete");
+        LogWriterUtils.getLogWriter().info("VM"+VM.getCurrentVMNum() + " waiting up to "+waitTime+" seconds for reconnect to complete");
         try {
           ds.waitUntilReconnected(waitTime, TimeUnit.SECONDS);
         } catch (InterruptedException e) {
@@ -455,7 +462,7 @@ public class ReconnectDUnitTest extends CacheTestCase
     final int locPort = locatorPort;
     final int secondLocPort = AvailablePortHelper.getRandomAvailableTCPPort();
 
-    deleteLocatorStateFile(locPort, secondLocPort);
+    DistributedTestUtils.deleteLocatorStateFile(locPort, secondLocPort);
 
     final String xmlFileLoc = (new File(".")).getAbsolutePath();
 
@@ -476,7 +483,7 @@ public class ReconnectDUnitTest extends CacheTestCase
         try {
           Locator.startLocatorAndDS(secondLocPort, null, props);
         } catch (IOException e) {
-          fail("exception starting locator", e);
+          Assert.fail("exception starting locator", e);
         }
       }
     });
@@ -525,13 +532,13 @@ public class ReconnectDUnitTest extends CacheTestCase
               return "waiting for locator to restart";
             }
           };
-          waitForCriterion(wc, 30000, 1000, false);
+          Wait.waitForCriterion(wc, 30000, 1000, false);
           if (Locator.getLocator() == null) {
-            getLogWriter().error("expected to find a running locator but getLocator() returns null");
+            LogWriterUtils.getLogWriter().error("expected to find a running locator but getLocator() returns null");
             return false;
           }
           if (((InternalLocator)Locator.getLocator()).isStopped()) {
-            getLogWriter().error("found a stopped locator");
+            LogWriterUtils.getLogWriter().error("found a stopped locator");
             return false;
           }
           return true;
@@ -563,8 +570,8 @@ public class ReconnectDUnitTest extends CacheTestCase
           gfshThread = null;
         }
       });
-      deleteLocatorStateFile(locPort);
-      deleteLocatorStateFile(secondLocPort);
+      DistributedTestUtils.deleteLocatorStateFile(locPort);
+      DistributedTestUtils.deleteLocatorStateFile(secondLocPort);
     }
   }
   
@@ -620,7 +627,7 @@ public class ReconnectDUnitTest extends CacheTestCase
     locatorPort = locPort;
     Properties config = getDistributedSystemProperties();
     config.put(DistributionConfig.ROLES_NAME, "");
-    config.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    config.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
 //    config.put("log-file", "roleLossController.log");
     //creating the DS
     getSystem(config);
@@ -644,12 +651,12 @@ public class ReconnectDUnitTest extends CacheTestCase
       pw.close();
     }
     catch (IOException ex) {
-      fail("IOException during cache.xml generation to " + file, ex);
+      Assert.fail("IOException during cache.xml generation to " + file, ex);
     }
     closeCache();
     getSystem().disconnect();
 
-    getLogWriter().info("disconnected from the system...");
+    LogWriterUtils.getLogWriter().info("disconnected from the system...");
     Host host = Host.getHost(0);
 
     VM vm0 = host.getVM(0);
@@ -660,14 +667,14 @@ public class ReconnectDUnitTest extends CacheTestCase
         "ROLERECONNECTTESTS") {
       public void run2() throws CacheException, RuntimeException
       {
-        getLogWriter().info("####### STARTING THE REAL TEST ##########");
+        LogWriterUtils.getLogWriter().info("####### STARTING THE REAL TEST ##########");
         locatorPort = locPort;
         Properties props = getDistributedSystemProperties();
         props.put("cache-xml-file", xmlFileLoc+File.separator+"RoleReconnect-cache.xml");
         props.put("max-wait-time-reconnect", "200");
         final int timeReconnect = 3;
         props.put("max-num-reconnect-tries", "3");
-        props.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        props.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
         props.put("log-file", "roleLossVM0.log");
 
         getSystem(props);
@@ -681,13 +688,13 @@ public class ReconnectDUnitTest extends CacheTestCase
           throw new RuntimeException("The test should throw a CancelException ");
         }
         catch (CancelException ignor){ // can be caused by role loss during intialization.
-          getLogWriter().info("Got Expected CancelException ");
+          LogWriterUtils.getLogWriter().info("Got Expected CancelException ");
         }
         finally {
           system.getLogWriter().info("<ExpectedException action=remove>" 
               + "CacheClosedException" + "</ExpectedException");
         }
-        getLogWriter().fine("roleLoss Sleeping SO call dumprun.sh");
+        LogWriterUtils.getLogWriter().fine("roleLoss Sleeping SO call dumprun.sh");
         WaitCriterion ev = new WaitCriterion() {
           public boolean done() {
             return reconnectTries >= timeReconnect;
@@ -696,8 +703,8 @@ public class ReconnectDUnitTest extends CacheTestCase
             return "Waiting for reconnect count " + timeReconnect + " currently " + reconnectTries;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
-        getLogWriter().fine("roleLoss done Sleeping");
+        Wait.waitForCriterion(ev, 60 * 1000, 200, true);
+        LogWriterUtils.getLogWriter().fine("roleLoss done Sleeping");
         assertEquals(timeReconnect,
             reconnectTries);
       }
@@ -753,7 +760,7 @@ public class ReconnectDUnitTest extends CacheTestCase
     locatorPort = locPort;
     Properties config = getDistributedSystemProperties();
     config.put(DistributionConfig.ROLES_NAME, "");
-    config.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    config.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
     //creating the DS
     getSystem(config);
 
@@ -777,7 +784,7 @@ public class ReconnectDUnitTest extends CacheTestCase
       pw.close();
     }
     catch (IOException ex) {
-      fail("IOException during cache.xml generation to " + file, ex);
+      Assert.fail("IOException during cache.xml generation to " + file, ex);
     }
     closeCache();
     //disconnectFromDS();
@@ -807,7 +814,7 @@ public class ReconnectDUnitTest extends CacheTestCase
             + " trying to reconnect");
     final AsyncInvocation roleLossAsync = vm0.invokeAsync(roleLoss);
     
-    getLogWriter().info("waiting for role loss vm to start reconnect attempts");
+    LogWriterUtils.getLogWriter().info("waiting for role loss vm to start reconnect attempts");
 
     WaitCriterion ev = new WaitCriterion() {
       public boolean done() {
@@ -824,7 +831,7 @@ public class ReconnectDUnitTest extends CacheTestCase
         return "waiting for event";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 120 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 120 * 1000, 200, true);
 
     VM vm2 = host.getVM(2);
     if (roleLossAsync.isAlive()) {
@@ -848,17 +855,17 @@ public class ReconnectDUnitTest extends CacheTestCase
           getLogWriter().severe("Exception : "+ee);
         }
       }*/
-      getLogWriter().info("waiting for vm0 to finish reconnecting");
-      DistributedTestCase.join(roleLossAsync, 120 * 1000, getLogWriter());
+      LogWriterUtils.getLogWriter().info("waiting for vm0 to finish reconnecting");
+      ThreadUtils.join(roleLossAsync, 120 * 1000);
     }
 
     if (roleLossAsync.getException() != null){
-      fail("Exception in Vm0", roleLossAsync.getException());
+      Assert.fail("Exception in Vm0", roleLossAsync.getException());
     }
 
-    DistributedTestCase.join(avkVm1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(avkVm1, 30 * 1000);
     if (avkVm1.getException() != null){
-      fail("Exception in Vm1", avkVm1.getException());
+      Assert.fail("Exception in Vm1", avkVm1.getException());
     }
 
   }
@@ -874,7 +881,7 @@ public class ReconnectDUnitTest extends CacheTestCase
         try {
           //  closeCache();
           //  getSystem().disconnect();
-          getLogWriter().info(startupMessage);
+          LogWriterUtils.getLogWriter().info(startupMessage);
           WaitCriterion ev = new WaitCriterion() {
             public boolean done() {
               return ((Boolean)otherVM.invoke(ReconnectDUnitTest.class, "isInitialRolePlayerStarted")).booleanValue();
@@ -883,15 +890,15 @@ public class ReconnectDUnitTest extends CacheTestCase
               return null;
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+          Wait.waitForCriterion(ev, 10 * 1000, 200, true);
 
-          getLogWriter().info("Starting the test and creating the cache and regions etc ...");
+          LogWriterUtils.getLogWriter().info("Starting the test and creating the cache and regions etc ...");
           locatorPort = locPort;
           Properties props = getDistributedSystemProperties();
           props.put("cache-xml-file", "RoleRegained.xml");
           props.put("max-wait-time-reconnect", "3000");
           props.put("max-num-reconnect-tries", "8");
-          props.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+          props.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
 
           getSystem(props);
           system.getLogWriter().info("<ExpectedException action=add>" 
@@ -901,7 +908,7 @@ public class ReconnectDUnitTest extends CacheTestCase
             getCache();
           } catch (CancelException e) {
             // can happen if RoleA goes away during initialization
-            getLogWriter().info("cache threw CancelException while creating the cache");      
+            LogWriterUtils.getLogWriter().info("cache threw CancelException while creating the cache");      
           }
           
           initialized = true;
@@ -910,14 +917,14 @@ public class ReconnectDUnitTest extends CacheTestCase
 
           ev = new WaitCriterion() {
             public boolean done() {
-              getLogWriter().info("ReconnectTries=" + reconnectTries);
+              LogWriterUtils.getLogWriter().info("ReconnectTries=" + reconnectTries);
               return reconnectTries != 0;
             }
             public String description() {
               return null;
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+          Wait.waitForCriterion(ev, 30 * 1000, 200, true);
 
           //        long startTime = System.currentTimeMillis();
 
@@ -967,8 +974,8 @@ public class ReconnectDUnitTest extends CacheTestCase
                   excuse = "value is wrong";
                   return false;
                 }
-                getLogWriter().info("All assertions passed");
-                getLogWriter().info("MyKey : "+key+" and myvalue : "+value);
+                LogWriterUtils.getLogWriter().info("All assertions passed");
+                LogWriterUtils.getLogWriter().info("MyKey : "+key+" and myvalue : "+value);
                 return true;
               }
               catch (CancelException ecc){ 
@@ -979,7 +986,7 @@ public class ReconnectDUnitTest extends CacheTestCase
 
               }
               finally {
-                getLogWriter().info("waiting for reconnect.  Current status is '"+excuse+"'");
+                LogWriterUtils.getLogWriter().info("waiting for reconnect.  Current status is '"+excuse+"'");
               }
               return false;
             }
@@ -988,7 +995,7 @@ public class ReconnectDUnitTest extends CacheTestCase
             }
           };
 
-          DistributedTestCase.waitForCriterion(ev,  60 * 1000, 200, true); // was 5 * 60 * 1000
+          Wait.waitForCriterion(ev,  60 * 1000, 200, true); // was 5 * 60 * 1000
 
           Cache cache = CacheFactory.getAnyInstance();
           if (cache != null) {
@@ -1000,11 +1007,11 @@ public class ReconnectDUnitTest extends CacheTestCase
           throw e;
         }
         catch (Error th) {
-          getLogWriter().severe("DEBUG", th);
+          LogWriterUtils.getLogWriter().severe("DEBUG", th);
           throw th;
         } finally {
           if (t != null) {
-            DistributedTestCase.join(t, 2 * 60 * 1000, getLogWriter());
+            ThreadUtils.join(t, 2 * 60 * 1000);
           }
           // greplogs won't care if you remove an exception that was never added,
           // and this ensures that it gets removed.
@@ -1024,12 +1031,12 @@ public class ReconnectDUnitTest extends CacheTestCase
         "second RoleA player") {
       public void run2() throws CacheException
       {
-        getLogWriter().info(startupMessage);
+        LogWriterUtils.getLogWriter().info(startupMessage);
         //closeCache();
         // getSystem().disconnect();
         locatorPort = locPort;
         Properties props = getDistributedSystemProperties();
-        props.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        props.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
         props.put(DistributionConfig.ROLES_NAME, "RoleA");
 
         getSystem(props);
@@ -1040,7 +1047,7 @@ public class ReconnectDUnitTest extends CacheTestCase
 
         RegionAttributes attr = fac.create();
         Region region = createRootRegion(regionName, attr);
-        getLogWriter().info("STARTED THE REQUIREDROLES CACHE");
+        LogWriterUtils.getLogWriter().info("STARTED THE REQUIREDROLES CACHE");
         try{
           Thread.sleep(120);
         }
@@ -1057,7 +1064,7 @@ public class ReconnectDUnitTest extends CacheTestCase
         catch(InterruptedException ee){
           fail("interrupted");
         }
-        getLogWriter().info("RolePlayer is done...");
+        LogWriterUtils.getLogWriter().info("RolePlayer is done...");
 
 
       }
@@ -1076,10 +1083,10 @@ public class ReconnectDUnitTest extends CacheTestCase
       {
         //  closeCache();
         // getSystem().disconnect();
-        getLogWriter().info(startupMessage);
+        LogWriterUtils.getLogWriter().info(startupMessage);
         locatorPort = locPort;
         Properties props = getDistributedSystemProperties();
-        props.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        props.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
         props.put(DistributionConfig.ROLES_NAME, "RoleA");
 
         getSystem(props);
@@ -1090,7 +1097,7 @@ public class ReconnectDUnitTest extends CacheTestCase
 
         RegionAttributes attr = fac.create();
         createRootRegion(regionName, attr);
-        getLogWriter().info("STARTED THE REQUIREDROLES CACHE");
+        LogWriterUtils.getLogWriter().info("STARTED THE REQUIREDROLES CACHE");
         initialRolePlayerStarted = true;
 
         while(!((Boolean)otherVM.invoke(ReconnectDUnitTest.class, "isInitialized")).booleanValue()){
@@ -1100,7 +1107,7 @@ public class ReconnectDUnitTest extends CacheTestCase
             fail("interrupted");
           }
         }
-        getLogWriter().info("RoleAPlayerInitializer is done...");
+        LogWriterUtils.getLogWriter().info("RoleAPlayerInitializer is done...");
         closeCache();
 
       }
@@ -1110,10 +1117,10 @@ public class ReconnectDUnitTest extends CacheTestCase
   
   void addReconnectListener() {
     reconnectTries = 0; // reset the count for this listener
-    getLogWriter().info("adding reconnect listener");
+    LogWriterUtils.getLogWriter().info("adding reconnect listener");
     ReconnectListener reconlis = new ReconnectListener() {
       public void reconnecting(InternalDistributedSystem oldSys) {
-        getLogWriter().info("reconnect listener invoked");
+        LogWriterUtils.getLogWriter().info("reconnect listener invoked");
         reconnectTries++;
       }
       public void onReconnect(InternalDistributedSystem system1, InternalDistributedSystem system2) {}
@@ -1145,7 +1152,7 @@ public class ReconnectDUnitTest extends CacheTestCase
               return "waiting for locator to start reconnecting: " + oldLocator;
             }
           };
-          waitForCriterion(wc, 10000, 50, true);
+          Wait.waitForCriterion(wc, 10000, 50, true);
         }
         return true;
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectedCacheServerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectedCacheServerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectedCacheServerDUnitTest.java
index e408a6d..2b97a9a 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectedCacheServerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ReconnectedCacheServerDUnitTest.java
@@ -41,7 +41,7 @@ public class ReconnectedCacheServerDUnitTest extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() {
+  protected final void preTearDownCacheTestCase() throws Exception {
     if (addedCacheServer && cache != null && !cache.isClosed()) {
       // since I polluted the cache I should shut it down in order
       // to avoid affecting other tests

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionExpirationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionExpirationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionExpirationDUnitTest.java
index af10e85..4187aaf 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionExpirationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionExpirationDUnitTest.java
@@ -23,9 +23,11 @@ import com.gemstone.gemfire.cache.ExpirationAttributes;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Test Region expiration - both time-to-live and idle timeout.
@@ -124,7 +126,7 @@ public class RegionExpirationDUnitTest extends CacheTestCase {
         return "region never destroyed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 30 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 30 * 1000, 1000, true);
   }
 
   public void testWhenBothTtlAndIdleAreSet() 
@@ -162,9 +164,9 @@ public class RegionExpirationDUnitTest extends CacheTestCase {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    getLogWriter().info("vm0 is " + vm0.getPid() + ", vm1 is " + vm1);
+    LogWriterUtils.getLogWriter().info("vm0 is " + vm0.getPid() + ", vm1 is " + vm1);
 
-    getLogWriter().info("2: " + regionName + " action is " + action);
+    LogWriterUtils.getLogWriter().info("2: " + regionName + " action is " + action);
 
     final long tilt = System.currentTimeMillis() + timeoutSecs * 1000;
 
@@ -216,7 +218,7 @@ public class RegionExpirationDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Get") {
         public void run2() throws CacheException {
           Region region = getRootRegion().getSubregion(regionName);
-          getLogWriter().info("3: " + regionName + ", " + region + ", action is " + action);
+          LogWriterUtils.getLogWriter().info("3: " + regionName + ", " + region + ", action is " + action);
           if (action.isInvalidate() || action.isLocalInvalidate()) {
             assertTrue(!region.containsValueForKey(key));
           } else {
@@ -253,7 +255,7 @@ public class RegionExpirationDUnitTest extends CacheTestCase {
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setEarlyAck(false);
     RegionAttributes attrs = factory.create();
-    getLogWriter().info("4: " + regionName + " ttl action is " + ttl);
+    LogWriterUtils.getLogWriter().info("4: " + regionName + " ttl action is " + ttl);
     getOrCreateRootRegion().createSubregion(regionName, attrs);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionMembershipListenerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionMembershipListenerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionMembershipListenerDUnitTest.java
index 8ecb00a..d539b82 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionMembershipListenerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionMembershipListenerDUnitTest.java
@@ -37,9 +37,11 @@ import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManage
 import com.gemstone.gemfire.internal.cache.CacheDistributionAdvisor.CacheProfile;
 import com.gemstone.gemfire.internal.cache.DistributedRegion;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Test {@link RegionMembershipListener}
@@ -66,8 +68,7 @@ public class RegionMembershipListenerDUnitTest extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     DistributedRegion.TEST_HOOK_ADD_PROFILE = false;
   }
 
@@ -366,9 +367,9 @@ public class RegionMembershipListenerDUnitTest extends CacheTestCase {
           return MyRML.this.toString() + " waiting for Op " + op + " when lastOp was " + getOpName(MyRML.this.lastOp);
         }
       };
-      getLogWriter().info(this.toString() + " waiting for Op " + getOpName(op)
+      LogWriterUtils.getLogWriter().info(this.toString() + " waiting for Op " + getOpName(op)
           + " when lastOp was " + getOpName(this.lastOp));
-      DistributedTestCase.waitForCriterion(ev, this.timeOut, 200, true);
+      Wait.waitForCriterion(ev, this.timeOut, 200, true);
       assertEquals(op, this.lastOp);
       return true;
     }
@@ -387,7 +388,7 @@ public class RegionMembershipListenerDUnitTest extends CacheTestCase {
       this.lastOp = Op.Initial;
       this.lastEvent = null;
       this.initialMembers = initialMembers;
-      getLogWriter().info(this.toString() + " received initialMembers notification for region " + r
+      LogWriterUtils.getLogWriter().info(this.toString() + " received initialMembers notification for region " + r
           + " with members " + Arrays.deepToString(initialMembers));
     }
     public void afterRemoteRegionCreate(RegionEvent event) {
@@ -397,23 +398,23 @@ public class RegionMembershipListenerDUnitTest extends CacheTestCase {
       if (cacheProfile != null) {
         this.memberInitialized = cacheProfile.regionInitialized;
         if (!this.memberInitialized) {
-          getLogWriter().warning("afterRemoteRegionCreate invoked when member is not done initializing!", new Exception("stack trace"));
+          LogWriterUtils.getLogWriter().warning("afterRemoteRegionCreate invoked when member is not done initializing!", new Exception("stack trace"));
         }
-        getLogWriter().info(this.toString() + " received afterRemoteRegionCreate notification for event " + event);
+        LogWriterUtils.getLogWriter().info(this.toString() + " received afterRemoteRegionCreate notification for event " + event);
       } else {
-        getLogWriter().warning("afterRemoteRegionCreate was expecting a profile in the event callback but there was none. " +
+        LogWriterUtils.getLogWriter().warning("afterRemoteRegionCreate was expecting a profile in the event callback but there was none. " +
         		" This indicates a problem with the test hook DistributedRegion.TEST_HOOK_ADD_PROFILE");
       }
     }
     public void afterRemoteRegionDeparture(RegionEvent event) {
       this.lastOp = Op.Departure;
       this.lastEvent = event;
-      getLogWriter().info(this.toString() + " received afterRemoteRegionDeparture notification for event " + event);
+      LogWriterUtils.getLogWriter().info(this.toString() + " received afterRemoteRegionDeparture notification for event " + event);
     }
     public void afterRemoteRegionCrash(RegionEvent event) {
       this.lastOp = Op.Crash;
       this.lastEvent = event;
-      getLogWriter().info(this.toString() + " received afterRemoteRegionCrash notification for event " + event);
+      LogWriterUtils.getLogWriter().info(this.toString() + " received afterRemoteRegionCrash notification for event " + event);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionReliabilityTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionReliabilityTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionReliabilityTestCase.java
index 26108cd..2ae8293 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionReliabilityTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionReliabilityTestCase.java
@@ -65,9 +65,11 @@ import com.gemstone.gemfire.internal.cache.TXManagerImpl;
 import com.gemstone.gemfire.internal.cache.TXState;
 import com.gemstone.gemfire.internal.cache.TXStateInterface;
 import com.gemstone.gemfire.internal.cache.TXStateProxyImpl;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests region reliability defined by MembershipAttributes.
@@ -82,9 +84,8 @@ public abstract class RegionReliabilityTestCase extends ReliabilityTestCase {
   }
 
   @Override
-  public void tearDown2() throws java.lang.Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     DistributedCacheOperation.setBeforePutOutgoing(null);
-    super.tearDown2();
   }
 
   // -------------------------------------------------------------------------
@@ -825,7 +826,7 @@ public abstract class RegionReliabilityTestCase extends ReliabilityTestCase {
         return "expected zero entries but have " + ((LocalRegion) region).basicEntries(false).size();
       }
     };
-    DistributedTestCase.waitForCriterion(wc1, 30*1000, 10, true);
+    Wait.waitForCriterion(wc1, 30*1000, 10, true);
 
     // create region again
     Host.getHost(0).getVM(1).invoke(new CacheSerializableRunnable("Create Region") {
@@ -1007,7 +1008,7 @@ public abstract class RegionReliabilityTestCase extends ReliabilityTestCase {
         return "expected region " + region + " to be destroyed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 30*1000, 10, true);
+    Wait.waitForCriterion(wc, 30*1000, 10, true);
   }
   
   public static void waitForEntryDestroy(final Region region, final Object key) {
@@ -1019,7 +1020,7 @@ public abstract class RegionReliabilityTestCase extends ReliabilityTestCase {
         return "expected entry " + key + " to not exist but it has the value " + region.get(key);
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 30*1000, 10, true);
+    Wait.waitForCriterion(wc, 30*1000, 10, true);
   }
   
   /**
@@ -1412,7 +1413,7 @@ public abstract class RegionReliabilityTestCase extends ReliabilityTestCase {
       }
     });
     
-    DistributedTestCase.join(thread, 30 * 1000, getLogWriter());
+    ThreadUtils.join(thread, 30 * 1000);
     assertTrue(region.isDestroyed());
     try {
       region.put("fee", "fi");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionTestCase.java
index 9c3add6..ed89a13 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RegionTestCase.java
@@ -58,10 +58,11 @@ import com.gemstone.gemfire.internal.cache.EntryExpiryTask;
 import com.gemstone.gemfire.internal.cache.EntrySnapshot;
 import com.gemstone.gemfire.internal.cache.ExpiryTask;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 //import com.gemstone.gemfire.internal.util.DebuggerSupport;
 
@@ -109,23 +110,14 @@ public abstract class RegionTestCase extends CacheTestCase {
     super(name);
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     cleanup();
-    invokeInEveryVM(getClass(), "cleanup");
-    /*for (int h = 0; h < Host.getHostCount(); h++) {
-      Host host = Host.getHost(h);
-      for (int v = 0; v < host.getVMCount(); v++) {
-        host.getVM(v).invoke(new SerializableRunnable("Clean up") {
-            public void run() {
-              cleanup();
-            }
-          });
-// already called in every VM in super.tearDown
-// host.getVM(v).invoke(this.getClass(), "remoteTearDown");
-      }
-    }*/
-    super.tearDown2();
+    Invoke.invokeInEveryVM(getClass(), "cleanup");
+    postTearDownRegionTestCase();
+  }
+  
+  protected void postTearDownRegionTestCase() throws Exception {
   }
   
   ////////  Helper methods
@@ -268,7 +260,7 @@ public abstract class RegionTestCase extends CacheTestCase {
       assertEquals(value, values.iterator().next());
     }
     catch (UnsupportedOperationException uoe) {
-      getLogWriter().info("Region.values() reported UnsupportedOperation");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Region.values() reported UnsupportedOperation");
     }
   }
   
@@ -472,7 +464,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     region.destroy(key);
     
     Region.Entry entry2 = region.getEntry(key);
-    getLogWriter().info("Found entry for destroyed key: " + entry2);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Found entry for destroyed key: " + entry2);
     assertNull(entry2);
     if (entry.isLocal()) {
       assertTrue(entry.isDestroyed());
@@ -1979,11 +1971,11 @@ public abstract class RegionTestCase extends CacheTestCase {
         break;
       }
       if (!wasInvalidated) {
-        pause(pauseMs);
+        Wait.pause(pauseMs);
         continue;
       }
       if (now >= tilt - SLOP) {
-        getLogWriter().warning("Entry invalidated sloppily "
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().warning("Entry invalidated sloppily "
             + "now=" + now + " tilt=" + tilt + " delta = " + (tilt - now));
         break;
       }
@@ -2003,7 +1995,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         if (fetchEntryValue(entry) == null) break;
         fail("Entry failed to invalidate");
       }
-      pause(pauseMs);
+      Wait.pause(pauseMs);
     }
   }
 
@@ -2050,11 +2042,11 @@ public abstract class RegionTestCase extends CacheTestCase {
       if (now >= tilt)
         break;
       if (!isEntryDestroyed(entry)) {
-        pause(pauseMs);
+        Wait.pause(pauseMs);
         continue;
       }
       if (now >= tilt - SLOP) {
-        getLogWriter().warning("Entry destroyed sloppily "
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().warning("Entry destroyed sloppily "
             + "now=" + now + " tilt=" + tilt + " delta = " + (tilt - now));
         break;
       }
@@ -2072,7 +2064,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         break;
       Assert.assertTrue(System.currentTimeMillis() <= tilt,
           "Entry failed to destroy");
-      pause(pauseMs);
+      Wait.pause(pauseMs);
     }
   }
   
@@ -2094,11 +2086,11 @@ public abstract class RegionTestCase extends CacheTestCase {
       if (now >= tilt)
         break;
       if (!region.isDestroyed()) {
-        pause(10);
+        Wait.pause(10);
         continue;
       }
       if (now >= tilt - SLOP) {
-        getLogWriter().warning("Region destroyed sloppily "
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().warning("Region destroyed sloppily "
             + "now=" + now + " tilt=" + tilt + " delta = " + (tilt - now));
         break;
       }
@@ -2116,7 +2108,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         break;
       Assert.assertTrue(System.currentTimeMillis() <= tilt,
           "Region failed to destroy");
-      pause(10);
+      Wait.pause(10);
     }
   }  
 
@@ -2201,7 +2193,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     
     // Random values should not expire
     region.put(key1, value);
-    pause(timeout * 2);
+    Wait.pause(timeout * 2);
     assert(region.get(key1).equals(value));
     
     // key2 *should* expire
@@ -2260,7 +2252,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     region.create(key1, value);
     
     // This value should NOT expire.
-    pause(timeout * 2);
+    Wait.pause(timeout * 2);
     assertTrue(region.get(key1).equals(value));
     
     // This value SHOULD expire
@@ -2363,7 +2355,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         ExpiryTask.permitExpiration();
       }
       waitForInvalidate(entry, tilt1, timeout1/2);
-      DistributedTestCase.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 100, true);
+      Wait.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 100, true);
       eventCount = 0;
 
       // Do it again with a put (I guess)
@@ -2379,7 +2371,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         ExpiryTask.permitExpiration();
       }
       waitForInvalidate(entry, tilt1, timeout1/2);
-      DistributedTestCase.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 100, true);
+      Wait.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 100, true);
       eventCount = 0;
 
       // Change custom expiry for this region now...
@@ -2405,7 +2397,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         ExpiryTask.permitExpiration();
       }
       waitForInvalidate(entry, tilt2, timeout2/2);
-      DistributedTestCase.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 100, true);
+      Wait.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 100, true);
       eventCount = 0;
       // key1 should not be invalidated since we mutated to custom expiry to only expire key2
       entry = region.getEntry(key1);
@@ -2480,7 +2472,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     assertEquals(0, eventCount);
     
     // now set it to a really short time and make sure it expires immediately
-    waitForExpiryClockToChange(region);
+    Wait.waitForExpiryClockToChange(region);
     final Region.Entry entry = region.getEntry(key1);
     mutt = region.getAttributesMutator();
     ExpirationAttributes expire4 = new ExpirationAttributes(1, ExpirationAction.INVALIDATE);
@@ -2493,7 +2485,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         return "entry never became invalid";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10 * 1000, 10, true);
+    Wait.waitForCriterion(wc, 10 * 1000, 10, true);
 
     WaitCriterion waitForEventCountToBeOne = new WaitCriterion() {
       public boolean done() {
@@ -2503,7 +2495,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         return "eventCount never became 1";
       }
     };
-    DistributedTestCase.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 10, true);
+    Wait.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 10, true);
     eventCount = 0;
   }
 
@@ -2933,7 +2925,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     region.create(key1, value);
     
     // This value should NOT expire.
-    pause(timeout * 2);
+    Wait.pause(timeout * 2);
     assertTrue(region.get(key1).equals(value));
     
     // This value SHOULD expire
@@ -3013,7 +3005,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     region.create(key2, value);
     
     // This value should NOT expire.
-    pause(timeout * 2);
+    Wait.pause(timeout * 2);
     assertTrue(region.get(key2).equals(value));
     
     // This value SHOULD expire
@@ -3114,7 +3106,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     assertEquals(0, eventCount);
     
     // now set it to a really short time and make sure it expires immediately
-    waitForExpiryClockToChange(region);
+    Wait.waitForExpiryClockToChange(region);
     final Region.Entry entry = region.getEntry(key1);
     mutt = region.getAttributesMutator();
     ExpirationAttributes expire4 = new ExpirationAttributes(1, ExpirationAction.INVALIDATE);
@@ -3127,7 +3119,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         return "entry never became invalid";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10 * 1000, 10, true);
+    Wait.waitForCriterion(wc, 10 * 1000, 10, true);
 
     WaitCriterion waitForEventCountToBeOne = new WaitCriterion() {
       public boolean done() {
@@ -3137,7 +3129,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         return "eventCount never became 1";
       }
     };
-    DistributedTestCase.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 10, true);
+    Wait.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 10, true);
     eventCount = 0;
   }
 
@@ -3201,7 +3193,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     assertEquals(0, eventCount);
     
     // now set it to a really short time and make sure it expires immediately
-    waitForExpiryClockToChange(region);
+    Wait.waitForExpiryClockToChange(region);
     final Region.Entry entry = region.getEntry(key1);
     mutt = region.getAttributesMutator();
     ExpirationAttributes expire4 = new ExpirationAttributes(1, ExpirationAction.INVALIDATE);
@@ -3214,7 +3206,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         return "entry never became invalid";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10 * 1000, 10, true);
+    Wait.waitForCriterion(wc, 10 * 1000, 10, true);
 
     WaitCriterion waitForEventCountToBeOne = new WaitCriterion() {
       public boolean done() {
@@ -3224,7 +3216,7 @@ public abstract class RegionTestCase extends CacheTestCase {
         return "eventCount never became 1";
       }
     };
-    DistributedTestCase.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 10, true);
+    Wait.waitForCriterion(waitForEventCountToBeOne, 10 * 1000, 10, true);
     eventCount = 0;
   }
 
@@ -3413,7 +3405,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     
     // Random values should not expire
     region.put(key1, value);
-    pause(timeout * 2);
+    Wait.pause(timeout * 2);
     assert(region.get(key1).equals(value));
     
     // key2 *should* expire
@@ -3487,14 +3479,14 @@ public abstract class RegionTestCase extends CacheTestCase {
       assertNotNull(entry.getValue());
       EntryExpiryTask eet = region.getEntryExpiryTask(key1);
       final long createExpiryTime = eet.getExpirationTime();
-      waitForExpiryClockToChange(region);
+      Wait.waitForExpiryClockToChange(region);
       region.get(key1);
       assertSame(eet, region.getEntryExpiryTask(key1));
       final long getExpiryTime = eet.getExpirationTime();
       if (getExpiryTime - createExpiryTime <= 0L) {
         fail("get did not reset the expiration time. createExpiryTime=" + createExpiryTime + " getExpiryTime=" + getExpiryTime);
       }
-      waitForExpiryClockToChange(region);
+      Wait.waitForExpiryClockToChange(region);
       region.put(key1, value);
       assertSame(eet, region.getEntryExpiryTask(key1));
       final long putExpiryTime = eet.getExpirationTime();
@@ -3597,7 +3589,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     EntryExpiryTask eet = region.getEntryExpiryTask(key);
     long createExpiryTime = eet.getExpirationTime();
 
-    waitForExpiryClockToChange(region);
+    Wait.waitForExpiryClockToChange(region);
     region.get(key); // touch
     assertSame(eet, region.getEntryExpiryTask(key));
     long getExpiryTime = eet.getExpirationTime();
@@ -3605,7 +3597,7 @@ public abstract class RegionTestCase extends CacheTestCase {
       fail("get did not reset the expiration time. createExpiryTime=" + createExpiryTime + " getExpiryTime=" + getExpiryTime);
     }
     
-    waitForExpiryClockToChange(region);
+    Wait.waitForExpiryClockToChange(region);
     region.put(key, value); // touch
     assertSame(eet, region.getEntryExpiryTask(key));
     long putExpiryTime = eet.getExpirationTime();
@@ -3617,7 +3609,7 @@ public abstract class RegionTestCase extends CacheTestCase {
 
     // Now verify operations that do not modify the expiry time
     
-    waitForExpiryClockToChange(region);
+    Wait.waitForExpiryClockToChange(region);
     region.invalidate(key); // touch
     assertSame(eet, region.getEntryExpiryTask(key));
     long invalidateExpiryTime = eet.getExpirationTime();
@@ -3679,7 +3671,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     region.put(key, value);
     tilt = System.currentTimeMillis() + timeout;
     entry = region.getEntry(key);
-    pause(timeout * 2);
+    Wait.pause(timeout * 2);
     assertEquals(value, entry.getValue());
     region.getAttributesMutator().setEntryIdleTimeout(expire);
     waitForInvalidate(entry, tilt);
@@ -3715,7 +3707,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     if ((firstIdleExpiryTime - firstTTLExpiryTime) >= 0) {
       fail("idle should be less than ttl: idle=" + firstIdleExpiryTime + " ttl=" + firstTTLExpiryTime);
     }
-    waitForExpiryClockToChange(region);
+    Wait.waitForExpiryClockToChange(region);
     region.get(key);
     eet = region.getEntryExpiryTask(key);
     final long secondIdleExpiryTime = eet.getIdleExpirationTime();
@@ -3865,23 +3857,23 @@ public abstract class RegionTestCase extends CacheTestCase {
           ExpiryTask expiryTask = lr.getRegionIdleExpiryTask();
           region.put(key, value);
           long createExpiry = expiryTask.getExpirationTime();
-          long changeTime = waitForExpiryClockToChange(lr, createExpiry-EXPIRATION_MS);
+          long changeTime = Wait.waitForExpiryClockToChange(lr, createExpiry-EXPIRATION_MS);
           region.put(key, "VALUE2");
           long putExpiry = expiryTask.getExpirationTime();
           assertTrue("CLOCK went back in time! Expected putBaseExpiry=" + (putExpiry-EXPIRATION_MS) + " to be >= than changeTime=" + changeTime, (putExpiry-EXPIRATION_MS - changeTime) >= 0);
           assertTrue("expected putExpiry=" + putExpiry + " to be > than createExpiry=" + createExpiry, (putExpiry - createExpiry) > 0);
-          changeTime = waitForExpiryClockToChange(lr, putExpiry-EXPIRATION_MS);
+          changeTime = Wait.waitForExpiryClockToChange(lr, putExpiry-EXPIRATION_MS);
           region.get(key);
           long getExpiry = expiryTask.getExpirationTime();
           assertTrue("CLOCK went back in time! Expected getBaseExpiry=" + (getExpiry-EXPIRATION_MS) + " to be >= than changeTime=" + changeTime, (getExpiry-EXPIRATION_MS - changeTime) >= 0);
           assertTrue("expected getExpiry=" + getExpiry + " to be > than putExpiry=" + putExpiry, (getExpiry - putExpiry) > 0);
         
-          changeTime = waitForExpiryClockToChange(lr, getExpiry-EXPIRATION_MS);
+          changeTime = Wait.waitForExpiryClockToChange(lr, getExpiry-EXPIRATION_MS);
           sub.put(key, value);
           long subPutExpiry = expiryTask.getExpirationTime();
           assertTrue("CLOCK went back in time! Expected subPutBaseExpiry=" + (subPutExpiry-EXPIRATION_MS) + " to be >= than changeTime=" + changeTime, (subPutExpiry-EXPIRATION_MS - changeTime) >= 0);
           assertTrue("expected subPutExpiry=" + subPutExpiry + " to be > than getExpiry=" + getExpiry, (subPutExpiry - getExpiry) > 0);
-          changeTime = waitForExpiryClockToChange(lr, subPutExpiry-EXPIRATION_MS);
+          changeTime = Wait.waitForExpiryClockToChange(lr, subPutExpiry-EXPIRATION_MS);
           sub.get(key);
           long subGetExpiry = expiryTask.getExpirationTime();
           assertTrue("CLOCK went back in time! Expected subGetBaseExpiry=" + (subGetExpiry-EXPIRATION_MS) + " to be >= than changeTime=" + changeTime, (subGetExpiry-EXPIRATION_MS - changeTime) >= 0);
@@ -3950,7 +3942,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     // create region in other VMs if distributed
     boolean isDistributed = getRegionAttributes().getScope().isDistributed();
     if (isDistributed) {
-      invokeInEveryVM(new CacheSerializableRunnable("create presnapshot region") {
+      Invoke.invokeInEveryVM(new CacheSerializableRunnable("create presnapshot region") {
         public void run2() throws CacheException {
           preSnapshotRegion = createRegion(name);
         }
@@ -3993,7 +3985,7 @@ public abstract class RegionTestCase extends CacheTestCase {
       
       // test postSnapshot behavior in other VMs if distributed
       if (isDistributed) {
-        invokeInEveryVM(new CacheSerializableRunnable("postSnapshot") {
+        Invoke.invokeInEveryVM(new CacheSerializableRunnable("postSnapshot") {
           public void run2() throws CacheException {
             RegionTestCase.this.remoteTestPostSnapshot(name, false, false);
           }
@@ -4014,7 +4006,7 @@ public abstract class RegionTestCase extends CacheTestCase {
     // create region in other VMs if distributed
     boolean isDistributed = getRegionAttributes().getScope().isDistributed();
     if (isDistributed) {
-      invokeInEveryVM(new CacheSerializableRunnable("create presnapshot region") {
+      Invoke.invokeInEveryVM(new CacheSerializableRunnable("create presnapshot region") {
         public void run2() throws CacheException {
           preSnapshotRegion = createRootRegion(name, getRegionAttributes());
         }
@@ -4063,7 +4055,7 @@ public abstract class RegionTestCase extends CacheTestCase {
       // test postSnapshot behavior in other VMs if distributed
       if (isDistributed) {
         log.info("before distributed remoteTestPostSnapshot");
-        invokeInEveryVM(new CacheSerializableRunnable("postSnapshot") {
+        Invoke.invokeInEveryVM(new CacheSerializableRunnable("postSnapshot") {
           public void run2() throws CacheException {
             RegionTestCase.this.remoteTestPostSnapshot(name, false, true);
           }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RemoveAllMultiVmDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RemoveAllMultiVmDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RemoveAllMultiVmDUnitTest.java
index 1d878aa..f45c403 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RemoveAllMultiVmDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RemoveAllMultiVmDUnitTest.java
@@ -38,6 +38,7 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -70,14 +71,15 @@ public class RemoveAllMultiVmDUnitTest extends DistributedTestCase {
       vm1.invoke(RemoveAllMultiVmDUnitTest.class, "createCache");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(RemoveAllMultiVmDUnitTest.class, "closeCache");
-        vm1.invoke(RemoveAllMultiVmDUnitTest.class, "closeCache");
-        cache = null;
-        invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(RemoveAllMultiVmDUnitTest.class, "closeCache");
+      vm1.invoke(RemoveAllMultiVmDUnitTest.class, "closeCache");
+      cache = null;
+      Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
     }
     
     public static void createCache(){

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RequiredRolesDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RequiredRolesDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RequiredRolesDUnitTest.java
index a4120b0..1e69266 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RequiredRolesDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RequiredRolesDUnitTest.java
@@ -34,9 +34,12 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.distributed.Role;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.membership.InternalRole;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests the functionality of the {@link RequiredRoles} class.
@@ -192,7 +195,7 @@ public class RequiredRolesDUnitTest extends ReliabilityTestCase {
         return "waiting for test start";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     assertTrue(this.startTestWaitForRequiredRoles);
     assertFalse(this.finishTestWaitForRequiredRoles);
     
@@ -220,7 +223,7 @@ public class RequiredRolesDUnitTest extends ReliabilityTestCase {
         
     // create region in vm3... gain for 2 roles
     Host.getHost(0).getVM(vm3).invoke(create);
-    DistributedTestCase.join(threadA, 30 * 1000, getLogWriter());
+    ThreadUtils.join(threadA, 30 * 1000);
     assertTrue(this.finishTestWaitForRequiredRoles);
     assertTrue(this.rolesTestWaitForRequiredRoles.isEmpty());
     
@@ -240,7 +243,7 @@ public class RequiredRolesDUnitTest extends ReliabilityTestCase {
     this.finishTestWaitForRequiredRoles = false;
     threadA = new Thread(group, runWaitForRequiredRoles);
     threadA.start();
-    DistributedTestCase.join(threadA, 30 * 1000, getLogWriter());
+    ThreadUtils.join(threadA, 30 * 1000);
     assertTrue(this.startTestWaitForRequiredRoles);
     assertTrue(this.finishTestWaitForRequiredRoles);
     assertTrue(this.rolesTestWaitForRequiredRoles.isEmpty());
@@ -253,7 +256,7 @@ public class RequiredRolesDUnitTest extends ReliabilityTestCase {
     this.finishTestWaitForRequiredRoles = false;
     threadA = new Thread(group, runWaitForRequiredRoles);
     threadA.start();
-    DistributedTestCase.join(threadA, 30 * 1000, getLogWriter());
+    ThreadUtils.join(threadA, 30 * 1000);
     assertTrue(this.startTestWaitForRequiredRoles);
     assertTrue(this.finishTestWaitForRequiredRoles);
     assertTrue(this.rolesTestWaitForRequiredRoles.isEmpty());
@@ -276,14 +279,14 @@ public class RequiredRolesDUnitTest extends ReliabilityTestCase {
         return "waiting for test start";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     assertTrue(this.startTestWaitForRequiredRoles);
     assertFalse(this.finishTestWaitForRequiredRoles);
     assertMissingRoles(name, vmRoles[vm2]);
     
     // end the wait and make sure no roles are missing
     Host.getHost(0).getVM(vm2).invoke(create);
-    DistributedTestCase.join(threadA, 30 * 1000, getLogWriter());
+    ThreadUtils.join(threadA, 30 * 1000);
     assertTrue(this.startTestWaitForRequiredRoles);
     assertTrue(this.finishTestWaitForRequiredRoles);
     assertTrue(this.rolesTestWaitForRequiredRoles.isEmpty());
@@ -429,7 +432,7 @@ public class RequiredRolesDUnitTest extends ReliabilityTestCase {
             SystemFailure.setFailure((VirtualMachineError)e); // don't throw
           }
           String s = "Uncaught exception in thread " + t;
-          getLogWriter().error(s, e);
+          LogWriterUtils.getLogWriter().error(s, e);
           fail(s);
         }
       };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RolePerformanceDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RolePerformanceDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RolePerformanceDUnitTest.java
index 81cd191..e7c0ba7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RolePerformanceDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/RolePerformanceDUnitTest.java
@@ -26,6 +26,7 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -63,7 +64,7 @@ public class RolePerformanceDUnitTest extends CacheTestCase {
           throw e;
         }
         else {
-          getLogWriter().info("testRolePerformance attempt #" + i + 
+          LogWriterUtils.getLogWriter().info("testRolePerformance attempt #" + i + 
             " failed -- reattempting up to 10x", e);
         }
       }
@@ -101,7 +102,7 @@ public class RolePerformanceDUnitTest extends CacheTestCase {
     String data = name + " results: millisNoRoles=" + millisNoRoles +
       ", millisWithRoles=" + millisWithRoles + ", deviation=" + deviation + 
       ", ceiling=" + ceiling;
-    getLogWriter().info(data);
+    LogWriterUtils.getLogWriter().info(data);
     
     assertTrue("millisWithRoles is greater than allowable deviation: " + data,
                millisWithRoles <= ceiling);


[02/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderDUnitTest.java b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderDUnitTest.java
index c14af54..dbdff58 100644
--- a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderDUnitTest.java
+++ b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderDUnitTest.java
@@ -25,6 +25,9 @@ import com.gemstone.gemfire.internal.cache.wan.BatchException70;
 import com.gemstone.gemfire.internal.cache.wan.WANTestBase;
 import com.gemstone.gemfire.internal.cache.wan.parallel.ConcurrentParallelGatewaySenderEventProcessor;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 import java.net.SocketException;
 import java.util.Set;
@@ -75,13 +78,13 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
         true, 100, 10, false, false, null, true, 5, OrderPolicy.PARTITION });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -89,9 +92,9 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
     //before doing any puts, let the senders be running in order to ensure that
     //not a single event will be lost
@@ -112,7 +115,7 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
       vm7.invoke(ConcurrentParallelGatewaySenderDUnitTest.class, "setTestHook",
           new Object[] {"ln", Boolean.TRUE });
 
-      vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR",
+      vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
           1000 });
 
       // verify all buckets drained on all sender nodes.
@@ -126,7 +129,7 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
           "validateParallelSenderQueueAllBucketsDrained", new Object[] { "ln" });
 
       vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-          testName + "_PR", 1000 });
+          getTestMethodName() + "_PR", 1000 });
 
       int dispatched1 = (Integer)vm4.invoke(WANTestBase.class,
           "verifyAndGetEventsDispatchedByConcurrentDispatchers",
@@ -179,13 +182,13 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
         true, 100, 10, false, false, null, true, 5, OrderPolicy.PARTITION });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -193,9 +196,9 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
     //before doing any puts, let the senders be running in order to ensure that
     //not a single event will be lost
@@ -204,7 +207,7 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm6.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
         1000 });
     
     //verify all buckets drained on all sender nodes.
@@ -214,7 +217,7 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
     
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_PR", 1000 });
+        getTestMethodName() + "_PR", 1000 });
   }
   
   
@@ -246,13 +249,13 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
         true, 100, 10, false, false, null, true, 7, OrderPolicy.PARTITION });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -260,9 +263,9 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
     //before doing any puts, let the senders be running in order to ensure that
     //not a single event will be lost
@@ -271,7 +274,7 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm6.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
         1000 });
     
     //verify all buckets drained on all sender nodes.
@@ -281,7 +284,7 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
     
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_PR", 1000 });
+        getTestMethodName() + "_PR", 1000 });
   }
   
   
@@ -312,13 +315,13 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
         true, 100, 300, false, false, null, true, 6, OrderPolicy.PARTITION });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -336,7 +339,7 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm6.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
 
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
       1000 });
 
     
@@ -344,9 +347,9 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm3.invoke(WANTestBase.class, "createReceiver", new Object[] { nyPort });
     
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-      testName + "_PR", null, 1, 100, isOffHeap() });
+      getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-      testName + "_PR", null, 1, 100, isOffHeap() });
+      getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     
     //verify all buckets drained on all sender nodes.
     vm4.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
@@ -359,7 +362,7 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     // started before creating partition region it is quite possible that the
     // region may loose some of the events. This needs to be handled by the code
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_PR", 1000 });
+        getTestMethodName() + "_PR", 1000 });
   }
   
   /**
@@ -471,13 +474,13 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
         true, 100, 10, false, false, null, true, 6, OrderPolicy.PARTITION });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -485,9 +488,9 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
     //before doing any puts, let the senders be running in order to ensure that
     //not a single event will be lost
@@ -496,10 +499,10 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm6.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
       1000 });
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-              testName + "_PR", 1000 });
+              getTestMethodName() + "_PR", 1000 });
     //-------------------Close and rebuild local site ---------------------------------
 
     vm4.invoke(WANTestBase.class, "killSender", new Object[] {});
@@ -508,8 +511,8 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "killSender", new Object[] {});
     
     Integer regionSize = 
-      (Integer) vm2.invoke(WANTestBase.class, "getRegionSize", new Object[] {testName + "_PR" });
-    getLogWriter().info("Region size on remote is: " + regionSize);
+      (Integer) vm2.invoke(WANTestBase.class, "getRegionSize", new Object[] {getTestMethodName() + "_PR" });
+    LogWriterUtils.getLogWriter().info("Region size on remote is: " + regionSize);
     
     vm4.invoke(WANTestBase.class, "createCache", new Object[] { lnPort });
     vm5.invoke(WANTestBase.class, "createCache", new Object[] { lnPort });
@@ -531,13 +534,13 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "setRemoveFromQueueOnException", new Object[] { "ln", true });
     
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -550,11 +553,11 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     //------------------------------------------------------------------------------------
     
-    addExpectedException(EntryExistsException.class.getName());
-    addExpectedException(BatchException70.class.getName());
-    addExpectedException(ServerOperationException.class.getName());
+    IgnoredException.addIgnoredException(EntryExistsException.class.getName());
+    IgnoredException.addIgnoredException(BatchException70.class.getName());
+    IgnoredException.addIgnoredException(ServerOperationException.class.getName());
     
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 10000 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 10000 });
     
     //verify all buckets drained on all sender nodes.
     vm4.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
@@ -563,9 +566,9 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
     
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_PR", 10000 });
+        getTestMethodName() + "_PR", 10000 });
     vm3.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-      testName + "_PR", 10000 });
+      getTestMethodName() + "_PR", 10000 });
   }
   
   /**
@@ -597,13 +600,13 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
         true, 100, 10, false, false, null, true, 7, OrderPolicy.KEY });
 
     vm4.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, "ln", 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, "ln", 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, "ln", 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, "ln", 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -611,11 +614,11 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, null, 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, null, 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), null, 1, 100, isOffHeap() });
 
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName, 1000 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName(), 1000 });
     
     //verify all buckets drained on all sender nodes.
     vm4.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
@@ -624,7 +627,7 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
 
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName, 1000 });
+        getTestMethodName(), 1000 });
   }
   
   
@@ -657,13 +660,13 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
         true, 100, 10, false, false, null, true, 7, OrderPolicy.PARTITION });
 
     vm4.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, "ln", 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, "ln", 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, "ln", 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, "ln", 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -671,11 +674,11 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, null, 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createColocatedPartitionedRegions",
-        new Object[] { testName, null, 1, 100, isOffHeap() });
+        new Object[] { getTestMethodName(), null, 1, 100, isOffHeap() });
 
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName, 1000 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName(), 1000 });
     
     //verify all buckets drained on all sender nodes.
     vm4.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
@@ -684,11 +687,11 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
 
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName, 1000 });
+        getTestMethodName(), 1000 });
   }
   
   public void testPartitionedParallelPropagationHA() throws Exception {
-    addExpectedException(SocketException.class.getName()); // for Connection reset
+    IgnoredException.addIgnoredException(SocketException.class.getName()); // for Connection reset
     Integer lnPort = (Integer)vm0.invoke(WANTestBase.class,
         "createFirstLocatorWithDSId", new Object[] { 1 });
     Integer nyPort = (Integer)vm1.invoke(WANTestBase.class,
@@ -717,13 +720,13 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "setRemoveFromQueueOnException", new Object[] { "ln", true });
     
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 2, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 2, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 2, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 2, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 2, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 2, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 2, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 2, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -731,17 +734,17 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
     AsyncInvocation inv1 = vm7.invokeAsync(WANTestBase.class, "doPuts",
-        new Object[] { testName + "_PR", 5000 });
-    pause(500);
+        new Object[] { getTestMethodName() + "_PR", 5000 });
+    Wait.pause(500);
     AsyncInvocation inv2 = vm4.invokeAsync(WANTestBase.class, "killSender");
     AsyncInvocation inv3 = vm6.invokeAsync(WANTestBase.class, "doPuts",
-        new Object[] { testName + "_PR", 10000 });
-    pause(1500);
+        new Object[] { getTestMethodName() + "_PR", 10000 });
+    Wait.pause(1500);
     AsyncInvocation inv4 = vm5.invokeAsync(WANTestBase.class, "killSender");
     inv1.join();
     inv2.join();
@@ -749,18 +752,18 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
     inv4.join();
     
     vm6.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-      testName + "_PR", 10000 });
+      getTestMethodName() + "_PR", 10000 });
     vm7.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-      testName + "_PR", 10000 });
+      getTestMethodName() + "_PR", 10000 });
     
     //verify all buckets drained on the sender nodes that up and running.
     vm6.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
     vm7.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
 
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_PR", 10000 });
+        getTestMethodName() + "_PR", 10000 });
     vm3.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-        testName + "_PR", 10000 });
+        getTestMethodName() + "_PR", 10000 });
   }
   
   public void testWANPDX_PR_MultipleVM_ConcurrentParallelSender() {
@@ -780,21 +783,21 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
       true, 100, 10, false, false, null, true, 5, OrderPolicy.KEY });
     
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 0, 2, isOffHeap()});
+        getTestMethodName() + "_PR", null, 0, 2, isOffHeap()});
 
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-      testName + "_PR", "ln", 0, 2, isOffHeap()});
+      getTestMethodName() + "_PR", "ln", 0, 2, isOffHeap()});
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-      testName + "_PR", "ln", 0, 2, isOffHeap()});
+      getTestMethodName() + "_PR", "ln", 0, 2, isOffHeap()});
     
     vm3.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     
-    vm3.invoke(WANTestBase.class, "doPutsPDXSerializable", new Object[] { testName + "_PR",
+    vm3.invoke(WANTestBase.class, "doPutsPDXSerializable", new Object[] { getTestMethodName() + "_PR",
         10 });
 
     vm2.invoke(WANTestBase.class, "validateRegionSize_PDX", new Object[] {
-        testName + "_PR", 10 });
+        getTestMethodName() + "_PR", 10 });
   }
   
   public void testWANPDX_PR_MultipleVM_ConcurrentParallelSender_StartedLater() {
@@ -814,14 +817,14 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
       true, 100, 10, false, false, null, true, 5, OrderPolicy.KEY });
     
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 0, 2, isOffHeap()});
+        getTestMethodName() + "_PR", null, 0, 2, isOffHeap()});
 
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-      testName + "_PR", "ln", 0, 2, isOffHeap()});
+      getTestMethodName() + "_PR", "ln", 0, 2, isOffHeap()});
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-      testName + "_PR", "ln", 0, 2, isOffHeap()});
+      getTestMethodName() + "_PR", "ln", 0, 2, isOffHeap()});
     
-    vm3.invoke(WANTestBase.class, "doPutsPDXSerializable", new Object[] { testName + "_PR",
+    vm3.invoke(WANTestBase.class, "doPutsPDXSerializable", new Object[] { getTestMethodName() + "_PR",
         10 });
 
     AsyncInvocation inv1 = vm3.invokeAsync(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -835,11 +838,11 @@ public class ConcurrentParallelGatewaySenderDUnitTest extends WANTestBase {
       fail("Caught interrupted exception");
     }
     
-    vm4.invoke(WANTestBase.class, "doPutsPDXSerializable", new Object[] { testName + "_PR",
+    vm4.invoke(WANTestBase.class, "doPutsPDXSerializable", new Object[] { getTestMethodName() + "_PR",
       40 });
     
     vm2.invoke(WANTestBase.class, "validateRegionSize_PDX", new Object[] {
-        testName + "_PR", 40 });
+        getTestMethodName() + "_PR", 40 });
   }
 
   public static void setTestHook(String senderId, boolean hook) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderOperation_1_DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderOperation_1_DUnitTest.java b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderOperation_1_DUnitTest.java
index e0775b7..1ed4d1b 100644
--- a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderOperation_1_DUnitTest.java
+++ b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderOperation_1_DUnitTest.java
@@ -19,6 +19,9 @@ package com.gemstone.gemfire.internal.cache.wan.concurrent;
 import com.gemstone.gemfire.cache.wan.GatewaySender.OrderPolicy;
 import com.gemstone.gemfire.internal.cache.wan.WANTestBase;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * @author skumar
@@ -33,9 +36,9 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
   
   public void setUp() throws Exception {
     super.setUp();
-    addExpectedException("Broken pipe");
-    addExpectedException("Connection reset");
-    addExpectedException("Unexpected IOException");
+    IgnoredException.addIgnoredException("Broken pipe");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("Unexpected IOException");
   }
   
   public void testParallelGatewaySenderWithoutStarting() {
@@ -62,28 +65,28 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
         true, 100, 10, false, false, null, true, 6, OrderPolicy.KEY });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
     
     vm4.invoke(WANTestBase.class, "verifySenderStoppedState", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "verifySenderStoppedState", new Object[] { "ln" });
     vm6.invoke(WANTestBase.class, "verifySenderStoppedState", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "verifySenderStoppedState", new Object[] { "ln" });
     
-    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {testName + "_PR", 0 });
-    vm3.invoke(WANTestBase.class, "validateRegionSize", new Object[] {testName + "_PR", 0 });
+    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {getTestMethodName() + "_PR", 0 });
+    vm3.invoke(WANTestBase.class, "validateRegionSize", new Object[] {getTestMethodName() + "_PR", 0 });
   }
   
   /**
@@ -113,18 +116,18 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
         true, 100, 10, false, false, null, true, 7, OrderPolicy.KEY });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegionAsAccessor", new Object[] {
-        testName + "_PR", "ln", 1, 100 });
+        getTestMethodName() + "_PR", "ln", 1, 100 });
     vm7.invoke(WANTestBase.class, "createPartitionedRegionAsAccessor", new Object[] {
-        testName + "_PR", "ln", 1, 100 });
+        getTestMethodName() + "_PR", "ln", 1, 100 });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     
     //start the senders
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -132,18 +135,18 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm6.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     
-    pause(2000);
+    Wait.pause(2000);
     
     vm6.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
 
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
 
     vm4.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
     vm5.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
     
-    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {testName + "_PR", 1000 });
-    vm3.invoke(WANTestBase.class, "validateRegionSize", new Object[] {testName + "_PR", 1000 });
+    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {getTestMethodName() + "_PR", 1000 });
+    vm3.invoke(WANTestBase.class, "validateRegionSize", new Object[] {getTestMethodName() + "_PR", 1000 });
   }
 
   
@@ -175,13 +178,13 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
         true, 100, 10, false, false, null, true, 5, OrderPolicy.KEY });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -189,9 +192,9 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
     //make sure all the senders are running before doing any puts
     vm4.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
@@ -200,19 +203,19 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     
     //FIRST RUN: now, the senders are started. So, start the puts
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 100 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 100 });
     
     //now, pause all of the senders
     vm4.invoke(WANTestBase.class, "pauseSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "pauseSender", new Object[] { "ln" });
     vm6.invoke(WANTestBase.class, "pauseSender", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "pauseSender", new Object[] { "ln" });
-    pause(2000);
+    Wait.pause(2000);
     //SECOND RUN: keep one thread doing puts to the region
-    vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
+    vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
     
     //verify region size remains on remote vm and is restricted below a specified limit (i.e. number of puts in the first run)
-    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {testName + "_PR", 100 });
+    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {getTestMethodName() + "_PR", 100 });
   }
 
   /**
@@ -243,13 +246,13 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
         true, 100, 10, false, false, null, true, 8, OrderPolicy.KEY });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -257,9 +260,9 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
     //make sure all the senders are running before doing any puts
     vm4.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
@@ -268,7 +271,7 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     
     //now, the senders are started. So, start the puts
-    vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
+    vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
     
     //now, pause all of the senders
     vm4.invoke(WANTestBase.class, "pauseSender", new Object[] { "ln" });
@@ -277,7 +280,7 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "pauseSender", new Object[] { "ln" });
     
     //sleep for a second or two
-    pause(2000);
+    Wait.pause(2000);
     
     //resume the senders
     vm4.invoke(WANTestBase.class, "resumeSender", new Object[] { "ln" });
@@ -285,7 +288,7 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm6.invoke(WANTestBase.class, "resumeSender", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "resumeSender", new Object[] { "ln" });
     
-    pause(2000);
+    Wait.pause(2000);
     
     vm4.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
     vm5.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
@@ -293,7 +296,7 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
     
     //find the region size on remote vm
-    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {testName + "_PR", 1000 });
+    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {getTestMethodName() + "_PR", 1000 });
  
   }
   
@@ -322,14 +325,14 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
         true, 100, 10, false, false, null, true, 4, OrderPolicy.KEY });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-      testName + "_PR", null, 1, 100, isOffHeap() });
+      getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-      testName + "_PR", null, 1, 100, isOffHeap() });
+      getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
   
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -339,7 +342,7 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm5.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
 
     //start the puts
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 100 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 100 });
 
     //let the queue drain completely
     vm4.invoke(WANTestBase.class, "validateQueueContents", new Object[] { "ln", 0 });
@@ -353,11 +356,11 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm5.invoke(WANTestBase.class, "resumeSender", new Object[] { "ln" });
     
     //do more puts
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
     
     //validate region size on remote vm to contain only the events put in local site 
     //before the senders are stopped.
-    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {testName + "_PR", 100 });
+    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {getTestMethodName() + "_PR", 100 });
   }
 
   /**
@@ -388,13 +391,13 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
         true, 100, 10, false, false, null, true, 3, OrderPolicy.KEY });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -402,9 +405,9 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
     //make sure all the senders are running before doing any puts
     vm4.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
@@ -413,7 +416,7 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     
     //FIRST RUN: now, the senders are started. So, do some of the puts
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 100 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 100 });
     
     //now, stop all of the senders
     vm4.invoke(WANTestBase.class, "stopSender", new Object[] { "ln" });
@@ -422,10 +425,10 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "stopSender", new Object[] { "ln" });
     
     //SECOND RUN: keep one thread doing puts
-    vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
+    vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
     
     //verify region size remains on remote vm and is restricted below a specified limit (number of puts in the first run)
-    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {testName + "_PR", 100 });
+    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {getTestMethodName() + "_PR", 100 });
   }
 
   /**
@@ -455,13 +458,13 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
         true, 100, 10, false, false, null, true, 4, OrderPolicy.KEY });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -469,9 +472,9 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
     //make sure all the senders are running before doing any puts
     vm4.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
@@ -480,7 +483,7 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     
     //FIRST RUN: now, the senders are started. So, do some of the puts
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 200 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 200 });
     
     //now, stop all of the senders
     vm4.invoke(WANTestBase.class, "stopSender", new Object[] { "ln" });
@@ -488,13 +491,13 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm6.invoke(WANTestBase.class, "stopSender", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "stopSender", new Object[] { "ln" });
     
-    pause(2000);
+    Wait.pause(2000);
 
     //SECOND RUN: do some of the puts after the senders are stopped
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
     
     //Region size on remote site should remain same and below the number of puts done in the FIRST RUN
-    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {testName + "_PR", 200 });
+    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {getTestMethodName() + "_PR", 200 });
     
     //start the senders again
     AsyncInvocation vm4start = vm4.invokeAsync(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -508,10 +511,10 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7start.getResult(START_TIMEOUT);
 
     //Region size on remote site should remain same and below the number of puts done in the FIRST RUN
-    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {testName + "_PR", 200 });
+    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {getTestMethodName() + "_PR", 200 });
 
     //SECOND RUN: do some more puts
-    AsyncInvocation async = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
+    AsyncInvocation async = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
     async.join();
     
     //verify all the buckets on all the sender nodes are drained
@@ -521,7 +524,7 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
     
     //verify the events propagate to remote site
-    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {testName + "_PR", 1000 });
+    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {getTestMethodName() + "_PR", 1000 });
     
     vm4.invoke(WANTestBase.class, "validateQueueSizeStat", new Object[] { "ln", 0 });
     vm5.invoke(WANTestBase.class, "validateQueueSizeStat", new Object[] { "ln", 0 });
@@ -559,13 +562,13 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
         true, 100, 10, false, false, null, true, 7, OrderPolicy.KEY });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -573,9 +576,9 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
     //make sure all the senders are running before doing any puts
     vm4.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
@@ -583,12 +586,12 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm6.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     
-    getLogWriter().info("All the senders are now started");
+    LogWriterUtils.getLogWriter().info("All the senders are now started");
     
     //FIRST RUN: now, the senders are started. So, do some of the puts
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 200 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 200 });
     
-    getLogWriter().info("Done few puts");
+    LogWriterUtils.getLogWriter().info("Done few puts");
     
     //now, stop all of the senders
     vm4.invoke(WANTestBase.class, "stopSender", new Object[] { "ln" });
@@ -596,32 +599,32 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm6.invoke(WANTestBase.class, "stopSender", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "stopSender", new Object[] { "ln" });
     
-    getLogWriter().info("All the senders are stopped");
-    pause(2000);
+    LogWriterUtils.getLogWriter().info("All the senders are stopped");
+    Wait.pause(2000);
     
     //SECOND RUN: do some of the puts after the senders are stopped
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
-    getLogWriter().info("Done some more puts in second run");
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
+    LogWriterUtils.getLogWriter().info("Done some more puts in second run");
     
     //Region size on remote site should remain same and below the number of puts done in the FIRST RUN
-    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {testName + "_PR", 200 });
+    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {getTestMethodName() + "_PR", 200 });
     
     //SECOND RUN: start async puts on region
-    AsyncInvocation async = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 5000 });
-    getLogWriter().info("Started high number of puts by async thread");
+    AsyncInvocation async = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 5000 });
+    LogWriterUtils.getLogWriter().info("Started high number of puts by async thread");
 
-    getLogWriter().info("Starting the senders at the same time");
+    LogWriterUtils.getLogWriter().info("Starting the senders at the same time");
     //when puts are happening by another thread, start the senders
     vm4.invokeAsync(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invokeAsync(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm6.invokeAsync(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm7.invokeAsync(WANTestBase.class, "startSender", new Object[] { "ln" });
 
-    getLogWriter().info("All the senders are started");
+    LogWriterUtils.getLogWriter().info("All the senders are started");
     
     async.join();
         
-    pause(2000);
+    Wait.pause(2000);
     
     //verify all the buckets on all the sender nodes are drained
     vm4.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
@@ -661,13 +664,13 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
         true, 100, 10, false, false, null, true, 4, OrderPolicy.KEY });
 
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegionAsAccessor", new Object[] {
-        testName + "_PR", "ln", 1, 100});
+        getTestMethodName() + "_PR", "ln", 1, 100});
     vm7.invoke(WANTestBase.class, "createPartitionedRegionAsAccessor", new Object[] {
-        testName + "_PR", "ln", 1, 100});
+        getTestMethodName() + "_PR", "ln", 1, 100});
 
     vm4.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -675,9 +678,9 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "startSender", new Object[] { "ln" });
 
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
 
     //make sure all the senders are not running on accessor nodes and running on non-accessor nodes
     vm4.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
@@ -687,7 +690,7 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     
     //FIRST RUN: now, the senders are started. So, do some of the puts
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 200 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 200 });
     
     //now, stop all of the senders
     vm4.invoke(WANTestBase.class, "stopSender", new Object[] { "ln" });
@@ -695,13 +698,13 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm6.invoke(WANTestBase.class, "stopSender", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "stopSender", new Object[] { "ln" });
     
-    pause(2000);
+    Wait.pause(2000);
     
     //SECOND RUN: do some of the puts after the senders are stopped
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
     
     //Region size on remote site should remain same and below the number of puts done in the FIRST RUN
-    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {testName + "_PR", 200 });
+    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {getTestMethodName() + "_PR", 200 });
     
     //start the senders again
     AsyncInvocation vm4start = vm4.invokeAsync(WANTestBase.class, "startSender", new Object[] { "ln" });
@@ -715,19 +718,19 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7start.getResult(START_TIMEOUT);
 
     //Region size on remote site should remain same and below the number of puts done in the FIRST RUN
-    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {testName + "_PR", 200 });
+    vm2.invoke(WANTestBase.class, "validateRegionSizeRemainsSame", new Object[] {getTestMethodName() + "_PR", 200 });
 
     //SECOND RUN: do some more puts
-    AsyncInvocation async = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
+    AsyncInvocation async = vm4.invokeAsync(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
     async.join();
-    pause(5000);
+    Wait.pause(5000);
     
     //verify all buckets drained only on non-accessor nodes.
     vm4.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
     vm5.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
     
     //verify the events propagate to remote site
-    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {testName + "_PR", 1000 });
+    vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {getTestMethodName() + "_PR", 1000 });
   }
 
   
@@ -749,7 +752,7 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm6.invoke(WANTestBase.class, "createCache", new Object[] { lnPort });
     vm7.invoke(WANTestBase.class, "createCache", new Object[] { lnPort });
 
-    getLogWriter().info("Created cache on local site");
+    LogWriterUtils.getLogWriter().info("Created cache on local site");
     
     vm4.invoke(WANTestBase.class, "createConcurrentSender", new Object[] { "ln", 2,
         true, 100, 10, false, false, null, true, 5, OrderPolicy.KEY });
@@ -760,27 +763,27 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "createConcurrentSender", new Object[] { "ln", 2,
         true, 100, 10, false, false, null, true, 5, OrderPolicy.KEY });
     
-    getLogWriter().info("Created senders on local site");
+    LogWriterUtils.getLogWriter().info("Created senders on local site");
     
     vm4.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm5.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
     vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", "ln", 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", "ln", 1, 100, isOffHeap() });
 
-    getLogWriter().info("Created PRs on local site");
+    LogWriterUtils.getLogWriter().info("Created PRs on local site");
     
     vm2.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
     vm3.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
-        testName + "_PR", null, 1, 100, isOffHeap() });
-    getLogWriter().info("Created PRs on remote site");
+        getTestMethodName() + "_PR", null, 1, 100, isOffHeap() });
+    LogWriterUtils.getLogWriter().info("Created PRs on remote site");
     
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 1000 });
-    getLogWriter().info("Done 1000 puts on local site");
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 1000 });
+    LogWriterUtils.getLogWriter().info("Done 1000 puts on local site");
     
     //Since puts are already done on userPR, it will have the buckets created. 
     //During sender start, it will wait until those buckets are created for shadowPR as well.
@@ -796,16 +799,16 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm6.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "waitForSenderRunningState", new Object[] { "ln" });
     
-    getLogWriter().info("Started senders on local site");
+    LogWriterUtils.getLogWriter().info("Started senders on local site");
     
-    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { testName + "_PR", 5000 });
-    getLogWriter().info("Done 5000 puts on local site");
+    vm4.invoke(WANTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR", 5000 });
+    LogWriterUtils.getLogWriter().info("Done 5000 puts on local site");
     
     vm4.invoke(WANTestBase.class, "pauseSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "pauseSender", new Object[] { "ln" });
     vm6.invoke(WANTestBase.class, "pauseSender", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "pauseSender", new Object[] { "ln" });
-    getLogWriter().info("Paused senders on local site");
+    LogWriterUtils.getLogWriter().info("Paused senders on local site");
     
     vm4.invoke(WANTestBase.class, "verifySenderPausedState", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "verifySenderPausedState", new Object[] { "ln" });
@@ -813,14 +816,14 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "verifySenderPausedState", new Object[] { "ln" });
     
     AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class, "doPuts",
-        new Object[] { testName + "_PR", 1000 });
-    getLogWriter().info("Started 1000 async puts on local site");
+        new Object[] { getTestMethodName() + "_PR", 1000 });
+    LogWriterUtils.getLogWriter().info("Started 1000 async puts on local site");
 
     vm4.invoke(WANTestBase.class, "resumeSender", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "resumeSender", new Object[] { "ln" });
     vm6.invoke(WANTestBase.class, "resumeSender", new Object[] { "ln" });
     vm7.invoke(WANTestBase.class, "resumeSender", new Object[] { "ln" });
-    getLogWriter().info("Resumed senders on local site");
+    LogWriterUtils.getLogWriter().info("Resumed senders on local site");
 
     vm4.invoke(WANTestBase.class, "verifySenderResumedState", new Object[] { "ln" });
     vm5.invoke(WANTestBase.class, "verifySenderResumedState", new Object[] { "ln" });
@@ -841,8 +844,8 @@ public class ConcurrentParallelGatewaySenderOperation_1_DUnitTest extends WANTes
     vm7.invoke(WANTestBase.class, "validateParallelSenderQueueAllBucketsDrained", new Object[] {"ln"});
 
     vm2.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-      testName + "_PR", 5000 });
+      getTestMethodName() + "_PR", 5000 });
     vm3.invoke(WANTestBase.class, "validateRegionSize", new Object[] {
-      testName + "_PR", 5000 });
+      getTestMethodName() + "_PR", 5000 });
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderOperation_2_DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderOperation_2_DUnitTest.java b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderOperation_2_DUnitTest.java
index 5533dc3..4e74822 100644
--- a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderOperation_2_DUnitTest.java
+++ b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentParallelGatewaySenderOperation_2_DUnitTest.java
@@ -21,9 +21,13 @@ import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache.wan.GatewaySender.OrderPolicy;
 import com.gemstone.gemfire.internal.cache.wan.AbstractGatewaySender;
 import com.gemstone.gemfire.internal.cache.wan.WANTestBase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author skumar
@@ -39,10 +43,10 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
   
   public void setUp() throws Exception {
     super.setUp();
-    addExpectedException("RegionDestroyedException");
-    addExpectedException("Broken pipe");
-    addExpectedException("Connection reset");
-    addExpectedException("Unexpected IOException");
+    IgnoredException.addIgnoredException("RegionDestroyedException");
+    IgnoredException.addIgnoredException("Broken pipe");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("Unexpected IOException");
   }
   
   // to test that when userPR is locally destroyed, shadow Pr is also locally
@@ -57,7 +61,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
 
       createReceiverAndDoPutsInPausedSender(nyPort);
 
-      vm4.invoke(() -> localDestroyRegion(testName + "_PR"));
+      vm4.invoke(() -> localDestroyRegion(getTestMethodName() + "_PR"));
 
       recreatePRDoPutsAndValidateRegionSizes(0, true);
     } finally {
@@ -79,7 +83,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
 
       vm4.invoke(() -> validateParallelSenderQueueAllBucketsDrained("ln"));
 
-      vm4.invoke(() -> localDestroyRegion(testName + "_PR"));
+      vm4.invoke(() -> localDestroyRegion(getTestMethodName() + "_PR"));
 
       recreatePRDoPutsAndValidateRegionSizes(10, false);
     } finally {
@@ -96,11 +100,11 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
 
     createReceiverAndDoPutsInPausedSender(nyPort);
 
-    vm4.invoke(() -> closeRegion(testName + "_PR"));
+    vm4.invoke(() -> closeRegion(getTestMethodName() + "_PR"));
 
     vm4.invoke(() -> resumeSender("ln"));
 
-    pause(500); //paused if there is any element which is received on remote site
+    Wait.pause(500); //paused if there is any element which is received on remote site
 
     recreatePRDoPutsAndValidateRegionSizes(0, false);
 
@@ -115,14 +119,14 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
     try {
       createAndStartSender(vm4, lnPort, 6, false, true);
 
-      vm4.invoke(() -> addCacheListenerAndDestroyRegion(testName + "_PR"));
+      vm4.invoke(() -> addCacheListenerAndDestroyRegion(getTestMethodName() + "_PR"));
 
       createReceiverAndDoPutsInPausedSender(nyPort);
 
       vm4.invoke(() -> resumeSender("ln"));
 
       AsyncInvocation putAsync = vm4.invokeAsync(WANTestBase.class,
-          "doPutsFrom", new Object[] { testName + "_PR", 10, 101 });
+          "doPutsFrom", new Object[] { getTestMethodName() + "_PR", 10, 101 });
       try {
         putAsync.join();
       } catch (InterruptedException e) {
@@ -132,21 +136,21 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
 
       if (putAsync.getException() != null
           && !(putAsync.getException() instanceof RegionDestroyedException)) {
-        fail("Expected RegionDestroyedException but got",
+        Assert.fail("Expected RegionDestroyedException but got",
             putAsync.getException());
       }
 
       // before destroy, there is wait for queue to drain, so data will be
       // dispatched
-      vm2.invoke(() -> validateRegionSizeWithinRange(testName + "_PR", 10, 101)); // possible size is more than 10
+      vm2.invoke(() -> validateRegionSizeWithinRange(getTestMethodName() + "_PR", 10, 101)); // possible size is more than 10
 
-      vm4.invoke(() -> createPartitionedRegion(testName + "_PR", "ln", 1, 10, isOffHeap()));
+      vm4.invoke(() -> createPartitionedRegion(getTestMethodName() + "_PR", "ln", 1, 10, isOffHeap()));
 
-      vm4.invoke(() -> doPutsFrom(testName + "_PR", 10, 20));
+      vm4.invoke(() -> doPutsFrom(getTestMethodName() + "_PR", 10, 20));
 
-      vm4.invoke(() -> validateRegionSize(testName + "_PR", 10));
+      vm4.invoke(() -> validateRegionSize(getTestMethodName() + "_PR", 10));
 
-      vm2.invoke(() -> validateRegionSizeWithinRange(testName + "_PR", 20, 101)); // possible size is more than 20
+      vm2.invoke(() -> validateRegionSizeWithinRange(getTestMethodName() + "_PR", 20, 101)); // possible size is more than 20
     } finally {
       vm4.invoke(() -> clear_INFINITE_MAXIMUM_SHUTDOWN_WAIT_TIME());
     }
@@ -154,9 +158,9 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
   
   public void testParallelGatewaySender_SingleNode_UserPR_Destroy_NodeDown()
       throws Exception {
-    addExpectedException("Broken pipe");
-    addExpectedException("Connection reset");
-    addExpectedException("Unexpected IOException");
+    IgnoredException.addIgnoredException("Broken pipe");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("Unexpected IOException");
     Integer[] locatorPorts = createLNAndNYLocators();
     Integer lnPort = locatorPorts[0];
     Integer nyPort = locatorPorts[1];
@@ -172,9 +176,9 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
       vm5.invoke(WANTestBase.class, "resumeSender", new Object[] { "ln" });
       vm6.invoke(WANTestBase.class, "resumeSender", new Object[] { "ln" });
 
-      pause(200);
+      Wait.pause(200);
       AsyncInvocation localDestroyAsync = vm4.invokeAsync(WANTestBase.class,
-          "destroyRegion", new Object[] { testName + "_PR" });
+          "destroyRegion", new Object[] { getTestMethodName() + "_PR" });
 
       AsyncInvocation closeAsync = vm4.invokeAsync(WANTestBase.class,
           "closeCache");
@@ -186,7 +190,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
         fail("Interrupted the async invocation.");
       }
 
-      vm2.invoke(() -> validateRegionSize(testName + "_PR", 10));
+      vm2.invoke(() -> validateRegionSize(getTestMethodName() + "_PR", 10));
     } finally {
       vm4.invoke(() -> clear_INFINITE_MAXIMUM_SHUTDOWN_WAIT_TIME());
       vm5.invoke(() -> clear_INFINITE_MAXIMUM_SHUTDOWN_WAIT_TIME());
@@ -206,10 +210,10 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
       createReceiverAndDoPutsInPausedSender(nyPort);
 
       AsyncInvocation putAsync = vm4.invokeAsync(WANTestBase.class,
-          "doPutsFrom", new Object[] { testName + "_PR", 10, 2000 });
+          "doPutsFrom", new Object[] { getTestMethodName() + "_PR", 10, 2000 });
       AsyncInvocation localDestroyAsync = vm4.invokeAsync(
           ConcurrentParallelGatewaySenderOperation_2_DUnitTest.class,
-          "closeRegion", new Object[] { testName + "_PR" });
+          "closeRegion", new Object[] { getTestMethodName() + "_PR" });
       try {
         putAsync.join();
         localDestroyAsync.join();
@@ -238,7 +242,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
     try {
       vm7.invoke(() -> createCache_INFINITE_MAXIMUM_SHUTDOWN_WAIT_TIME(lnPort));
 
-      getLogWriter().info("Created cache on local site");
+      LogWriterUtils.getLogWriter().info("Created cache on local site");
 
       vm7.invoke(() -> createConcurrentSender("ln1", 2, true, 100, 10, false, false, null, true, 5, OrderPolicy.KEY));
       vm7.invoke(() -> createConcurrentSender("ln2", 3, true, 100, 10, false, false, null, true, 5, OrderPolicy.KEY));
@@ -248,10 +252,10 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
       vm7.invoke(() -> startSender("ln2"));
       vm7.invoke(() -> startSender("ln3"));
 
-      String regionName = testName + "_PR";
+      String regionName = getTestMethodName() + "_PR";
       vm7.invoke(() -> createPartitionedRegion(regionName, "ln1,ln2,ln3", 1, 10, isOffHeap()));
 
-      getLogWriter().info("Created PRs on local site");
+      LogWriterUtils.getLogWriter().info("Created PRs on local site");
 
       vm4.invoke(() -> createPartitionedRegion(regionName, null, 1, 10, isOffHeap()));
       vm5.invoke(() -> createPartitionedRegion(regionName, null, 1, 10, isOffHeap()));
@@ -288,12 +292,12 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
       createAndStartSender(vm4, lnPort, 5, true, false);
       createAndStartSender(vm5, lnPort, 5, true, false);
 
-      String regionName = testName + "_PR";
+      String regionName = getTestMethodName() + "_PR";
       vm2.invoke(() -> createPartitionedRegion(regionName, null, 1, 10, isOffHeap()));
 
       AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class, "doPuts",
           new Object[] { regionName, 10 });
-      pause(1000);
+      Wait.pause(1000);
       vm5.invoke(() -> localDestroyRegion(regionName));
 
       try {
@@ -330,7 +334,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
       createAndStartTwoSenders(vm4, lnPort, 4);
       createAndStartTwoSenders(vm5, lnPort, 4);
 
-      String regionName = testName + "_PR";
+      String regionName = getTestMethodName() + "_PR";
       vm6.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
           regionName, null, 1, 100, isOffHeap() });
       vm7.invoke(WANTestBase.class, "createPartitionedRegion", new Object[] {
@@ -339,7 +343,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
       AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class, "doPuts",
           new Object[] { regionName, 10 });
 
-      pause(1000);
+      Wait.pause(1000);
       vm5.invoke(WANTestBase.class, "localDestroyRegion",
           new Object[] { regionName });
 
@@ -376,13 +380,13 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
       createAndStartSenderWithCustomerOrderShipmentRegion(vm4, lnPort, 5, true);
       createAndStartSenderWithCustomerOrderShipmentRegion(vm5, lnPort, 5, true);
 
-      getLogWriter().info("Created PRs on local site");
+      LogWriterUtils.getLogWriter().info("Created PRs on local site");
 
       vm2.invoke(() -> createCustomerOrderShipmentPartitionedRegion(null, null, 1, 100, isOffHeap()));
 
       AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class,
           "putcolocatedPartitionedRegion", new Object[] { 10 });
-      pause(1000);
+      Wait.pause(1000);
 
       try {
         vm5.invoke(() -> localDestroyRegion(customerRegionName));
@@ -393,7 +397,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
       try {
         inv1.join();
       } catch (Exception e) {
-        fail("Unexpected exception", e);
+        Assert.fail("Unexpected exception", e);
       }
 
       validateRegionSizes(customerRegionName, 10, vm4, vm5, vm2);
@@ -414,7 +418,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
       createAndStartSenderWithCustomerOrderShipmentRegion(vm4, lnPort, 6, true);
       createAndStartSenderWithCustomerOrderShipmentRegion(vm5, lnPort, 6, true);
 
-      getLogWriter().info("Created PRs on local site");
+      LogWriterUtils.getLogWriter().info("Created PRs on local site");
 
       vm2.invoke(WANTestBase.class,
           "createCustomerOrderShipmentPartitionedRegion", new Object[] { null,
@@ -422,7 +426,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
 
       AsyncInvocation inv1 = vm4.invokeAsync(WANTestBase.class,
           "putcolocatedPartitionedRegion", new Object[] { 2000 });
-      pause(1000);
+      Wait.pause(1000);
 
       try {
         vm5.invoke(WANTestBase.class, "destroyRegion",
@@ -465,7 +469,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
             + max + " but actual entries: " + r.keySet().size();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 120000, 500, true);
+    Wait.waitForCriterion(wc, 120000, 500, true);
   }
 
   protected static void createCache_INFINITE_MAXIMUM_SHUTDOWN_WAIT_TIME(
@@ -481,13 +485,13 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
     if (pause) {
       vm.invoke(() -> pauseSender("ln"));
     }
-    vm.invoke(() -> createPartitionedRegion(testName + "_PR", "ln", 1, 10, isOffHeap()));
-    getLogWriter().info("Created PRs on local site");
+    vm.invoke(() -> createPartitionedRegion(getTestMethodName() + "_PR", "ln", 1, 10, isOffHeap()));
+    LogWriterUtils.getLogWriter().info("Created PRs on local site");
   }
 
   protected void createReceiverAndDoPutsInPausedSender(int port) {
     // Note: This is a test-specific method used by several tests to do puts from vm4 to vm2.
-    String regionName = testName + "_PR";
+    String regionName = getTestMethodName() + "_PR";
     vm2.invoke(() -> createReceiver(port));
     vm2.invoke(() -> createPartitionedRegion(regionName, null, 1, 10, isOffHeap()));
     vm4.invoke(() -> doPuts(regionName, 10));
@@ -500,7 +504,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
     // Note: This is a test-specific method used by several test to recreate a partitioned region,
     // do puts and validate region sizes in vm2 and vm4.
     // since shadowPR is locally destroyed, so no data to dispatch
-    String regionName = testName + "_PR";
+    String regionName = getTestMethodName() + "_PR";
     vm2.invoke(() -> validateRegionSize(regionName, expectedRegionSize));
     if (resumeSender) {
       vm4.invoke(() -> resumeSender("ln"));
@@ -513,7 +517,7 @@ public class ConcurrentParallelGatewaySenderOperation_2_DUnitTest extends WANTes
   protected void createAndStartTwoSenders(VM vm, int port, int concurrencyLevel) {
     // Note: This is a test-specific method used to create and start 2 senders.
     vm.invoke(() -> createCache_INFINITE_MAXIMUM_SHUTDOWN_WAIT_TIME(port));
-    vm.invoke(() -> createPartitionedRegion(testName + "_PR", "ln1,ln2", 1, 100, isOffHeap()));
+    vm.invoke(() -> createPartitionedRegion(getTestMethodName() + "_PR", "ln1,ln2", 1, 100, isOffHeap()));
     createSenders(vm, concurrencyLevel);
     vm.invoke(() -> startSender("ln1"));
     vm.invoke(() -> startSender("ln2"));


[03/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/WANTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/WANTestBase.java b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/WANTestBase.java
index 1afba75..17afcbb 100644
--- a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/WANTestBase.java
+++ b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/wan/WANTestBase.java
@@ -81,7 +81,6 @@ import com.gemstone.gemfire.cache.wan.GatewaySender.OrderPolicy;
 import com.gemstone.gemfire.cache.wan.GatewaySenderFactory;
 import com.gemstone.gemfire.cache.wan.GatewayTransportFilter;
 import com.gemstone.gemfire.cache30.CacheTestCase;
-import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.Locator;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
@@ -115,13 +114,16 @@ import com.gemstone.gemfire.internal.cache.wan.parallel.ParallelGatewaySenderQue
 import com.gemstone.gemfire.pdx.SimpleClass;
 import com.gemstone.gemfire.pdx.SimpleClass1;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.util.test.TestUtil;
 import com.jayway.awaitility.Awaitility;
 
-import junit.framework.Assert;
-
 public class WANTestBase extends DistributedTestCase{
 
   protected static Cache cache;
@@ -182,11 +184,11 @@ public class WANTestBase extends DistributedTestCase{
     //this is done to vary the number of dispatchers for sender 
     //during every test method run
     shuffleNumDispatcherThreads();
-    invokeInEveryVM(WANTestBase.class,"setNumDispatcherThreadsForTheRun",
+    Invoke.invokeInEveryVM(WANTestBase.class,"setNumDispatcherThreadsForTheRun",
     	new Object[]{dispatcherThreads.get(0)});
-    addExpectedException("Connection refused");
-    addExpectedException("Software caused connection abort");
-    addExpectedException("Connection reset");
+    IgnoredException.addIgnoredException("Connection refused");
+    IgnoredException.addIgnoredException("Software caused connection abort");
+    IgnoredException.addIgnoredException("Connection reset");
   }
   
   public static void shuffleNumDispatcherThreads() {
@@ -204,7 +206,7 @@ public class WANTestBase extends DistributedTestCase{
   }
 
   public static void createLocator(int dsId, int port, Set<String> localLocatorsList, Set<String> remoteLocatorsList){
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
     props.setProperty(DistributionConfig.DISTRIBUTED_SYSTEM_ID_NAME, ""+dsId);
@@ -227,7 +229,7 @@ public class WANTestBase extends DistributedTestCase{
   
   public static Integer createFirstLocatorWithDSId(int dsId) {
     stopOldLocator();
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
@@ -240,7 +242,7 @@ public class WANTestBase extends DistributedTestCase{
   
   public static Integer createFirstPeerLocator(int dsId) {
     stopOldLocator();
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
@@ -253,7 +255,7 @@ public class WANTestBase extends DistributedTestCase{
   
   public static Integer createSecondLocator(int dsId, int locatorPort) {
     stopOldLocator();
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
@@ -266,7 +268,7 @@ public class WANTestBase extends DistributedTestCase{
 
   public static Integer createSecondPeerLocator(int dsId, int locatorPort) {
     stopOldLocator();
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
@@ -279,7 +281,7 @@ public class WANTestBase extends DistributedTestCase{
   
   public static Integer createFirstRemoteLocator(int dsId, int remoteLocPort) {
     stopOldLocator();
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
@@ -292,7 +294,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void bringBackLocatorOnOldPort(int dsId, int remoteLocPort, int oldPort) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props.put(DistributionConfig.LOG_LEVEL_NAME, "fine");
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
@@ -307,7 +309,7 @@ public class WANTestBase extends DistributedTestCase{
   
   public static Integer createFirstRemotePeerLocator(int dsId, int remoteLocPort) {
     stopOldLocator();
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
@@ -322,7 +324,7 @@ public class WANTestBase extends DistributedTestCase{
   public static Integer createSecondRemoteLocator(int dsId, int localPort,
       int remoteLocPort) {
     stopOldLocator();
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
@@ -337,7 +339,7 @@ public class WANTestBase extends DistributedTestCase{
   public static Integer createSecondRemotePeerLocator(int dsId, int localPort,
       int remoteLocPort) {
     stopOldLocator();
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
@@ -350,11 +352,11 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void createReplicatedRegion(String regionName, String senderIds, Boolean offHeap){
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(InterruptedException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(InterruptedException.class
         .getName());
-    ExpectedException exp2 = addExpectedException(GatewaySenderException.class
+    IgnoredException exp2 = IgnoredException.addIgnoredException(GatewaySenderException.class
         .getName());
     try {
       AttributesFactory fact = new AttributesFactory();
@@ -455,7 +457,7 @@ public class WANTestBase extends DistributedTestCase{
   
   public static void createReplicatedRegionWithAsyncEventQueue(
       String regionName, String asyncQueueIds, Boolean offHeap) {
-    ExpectedException exp1 = addExpectedException(ForceReattemptException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
     try {
       AttributesFactory fact = new AttributesFactory();
@@ -498,7 +500,7 @@ public class WANTestBase extends DistributedTestCase{
   
   public static void createReplicatedRegionWithSenderAndAsyncEventQueue(
       String regionName, String senderIds, String asyncChannelId, Boolean offHeap) {
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
     try {
 
@@ -654,7 +656,7 @@ public class WANTestBase extends DistributedTestCase{
       Integer batchSize, boolean isConflation, boolean isPersistent,
       String diskStoreName, boolean isDiskSynchronous, int nDispatchers) {
 
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
 
     try {
@@ -730,7 +732,7 @@ public class WANTestBase extends DistributedTestCase{
     } else {
       persistentDirectory = new File(diskStoreName); 
     }
-    getLogWriter().info("The ds is : " + persistentDirectory.getName());
+    LogWriterUtils.getLogWriter().info("The ds is : " + persistentDirectory.getName());
     persistentDirectory.mkdir();
     DiskStoreFactory dsf = cache.createDiskStoreFactory();
     File [] dirs1 = new File[] {persistentDirectory};
@@ -848,7 +850,7 @@ public class WANTestBase extends DistributedTestCase{
       final Set<RegionQueue> queues = ((AbstractGatewaySender) sender)
           .getQueues();
 
-      waitForCriterion(new WaitCriterion() {
+      Wait.waitForCriterion(new WaitCriterion() {
 
         public String description() {
           return "Waiting for EventQueue size to be " + numQueueEntries;
@@ -870,9 +872,9 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void createPartitionedRegion(String regionName, String senderIds, Integer redundantCopies, Integer totalNumBuckets, Boolean offHeap){
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(PartitionOfflineException.class
         .getName());
     try {
       AttributesFactory fact = new AttributesFactory();
@@ -903,9 +905,9 @@ public class WANTestBase extends DistributedTestCase{
   // TODO:OFFHEAP: add offheap flavor
   public static void createPartitionedRegionWithPersistence(String regionName,
       String senderIds, Integer redundantCopies, Integer totalNumBuckets) {
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(PartitionOfflineException.class
         .getName());
     try {
       AttributesFactory fact = new AttributesFactory();
@@ -934,9 +936,9 @@ public class WANTestBase extends DistributedTestCase{
   }
   public static void createColocatedPartitionedRegion(String regionName,
 	      String senderIds, Integer redundantCopies, Integer totalNumBuckets, String colocatedWith) {
-	ExpectedException exp = addExpectedException(ForceReattemptException.class
+	IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
 		.getName());
-	ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+	IgnoredException exp1 = IgnoredException.addIgnoredException(PartitionOfflineException.class
 		.getName());
 	try {
 	  AttributesFactory fact = new AttributesFactory();
@@ -982,9 +984,9 @@ public class WANTestBase extends DistributedTestCase{
   
   public static void createPartitionedRegionWithAsyncEventQueue(
       String regionName, String asyncEventQueueId, Boolean offHeap) {
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(PartitionOfflineException.class
         .getName());
     try {
       AttributesFactory fact = new AttributesFactory();
@@ -1006,9 +1008,9 @@ public class WANTestBase extends DistributedTestCase{
   public static void createColocatedPartitionedRegionWithAsyncEventQueue(
     String regionName, String asyncEventQueueId, Integer totalNumBuckets, String colocatedWith) {
 	
-	ExpectedException exp = addExpectedException(ForceReattemptException.class
+	IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
 	  .getName());
-	ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+	IgnoredException exp1 = IgnoredException.addIgnoredException(PartitionOfflineException.class
 	  .getName());
 	try {
 	  AttributesFactory fact = new AttributesFactory();
@@ -1051,7 +1053,7 @@ public class WANTestBase extends DistributedTestCase{
    */
   public static void createPRWithRedundantCopyWithAsyncEventQueue(
       String regionName, String asyncEventQueueId, Boolean offHeap) {
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
 
     try {
@@ -1141,9 +1143,9 @@ public class WANTestBase extends DistributedTestCase{
       Integer totalNumBuckets,
       Boolean offHeap){
     
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(PartitionOfflineException.class
         .getName());
     try {
 
@@ -1175,7 +1177,7 @@ public class WANTestBase extends DistributedTestCase{
   public static void createCustomerOrderShipmentPartitionedRegion(
       String regionName, String senderIds, Integer redundantCopies,
       Integer totalNumBuckets, Boolean offHeap) {
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
     try {
       AttributesFactory fact = new AttributesFactory();
@@ -1201,7 +1203,7 @@ public class WANTestBase extends DistributedTestCase{
       customerRegion = (PartitionedRegion)cache.createRegionFactory(
           fact.create()).create(customerRegionName);
       assertNotNull(customerRegion);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region CUSTOMER created Successfully :"
               + customerRegion.toString());
 
@@ -1226,7 +1228,7 @@ public class WANTestBase extends DistributedTestCase{
       orderRegion = (PartitionedRegion)cache.createRegionFactory(fact.create())
           .create(orderRegionName);
       assertNotNull(orderRegion);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region ORDER created Successfully :"
               + orderRegion.toString());
 
@@ -1251,7 +1253,7 @@ public class WANTestBase extends DistributedTestCase{
       shipmentRegion = (PartitionedRegion)cache.createRegionFactory(
           fact.create()).create(shipmentRegionName);
       assertNotNull(shipmentRegion);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region SHIPMENT created Successfully :"
               + shipmentRegion.toString());
     }
@@ -1328,7 +1330,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void createCacheConserveSockets(Boolean conserveSockets,Integer locPort){
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort + "]");
@@ -1338,7 +1340,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   protected static void createCache(boolean management, Integer locPort) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     if (management) {
       props.setProperty(DistributionConfig.JMX_MANAGER_NAME, "true");
@@ -1353,7 +1355,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   protected static void createCacheWithSSL(Integer locPort) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
 
     boolean gatewaySslenabled = true;
     String  gatewaySslprotocols = "any";
@@ -1361,7 +1363,7 @@ public class WANTestBase extends DistributedTestCase{
     boolean gatewaySslRequireAuth = true;
     
     Properties gemFireProps = new Properties();
-    gemFireProps.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    gemFireProps.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_ENABLED_NAME, String.valueOf(gatewaySslenabled));
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_PROTOCOLS_NAME, gatewaySslprotocols);
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_CIPHERS_NAME, gatewaySslciphers);
@@ -1378,14 +1380,14 @@ public class WANTestBase extends DistributedTestCase{
     gemFireProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     gemFireProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort + "]");
     
-    getLogWriter().info("Starting cache ds with following properties \n" + gemFireProps);
+    LogWriterUtils.getLogWriter().info("Starting cache ds with following properties \n" + gemFireProps);
     
     InternalDistributedSystem ds = test.getSystem(gemFireProps);
     cache = CacheFactory.create(ds);    
   }
   
   public static void createCache_PDX(Integer locPort){
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort + "]");
@@ -1402,7 +1404,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void createCache(Integer locPort1, Integer locPort2){
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort1
@@ -1412,7 +1414,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void createCacheWithoutLocator(Integer mCastPort){
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, ""+mCastPort);
     InternalDistributedSystem ds = test.getSystem(props);
@@ -1433,7 +1435,7 @@ public class WANTestBase extends DistributedTestCase{
       server1.start();
     }
     catch (IOException e) {
-      fail("Failed to start the Server", e);
+      com.gemstone.gemfire.test.dunit.Assert.fail("Failed to start the Server", e);
     }
     assertTrue(server1.isRunning());
 
@@ -1467,11 +1469,11 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void startSender(String senderId) {
-    final ExpectedException exln = addExpectedException("Could not connect");
+    final IgnoredException exln = IgnoredException.addIgnoredException("Could not connect");
 
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(InterruptedException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(InterruptedException.class
         .getName());
     try {
       Set<GatewaySender> senders = cache.getGatewaySenders();
@@ -1569,7 +1571,7 @@ public class WANTestBase extends DistributedTestCase{
               + " but actual entries: " + regionQueue.size();
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 120000, 500, true);
+      Wait.waitForCriterion(wc, 120000, 500, true);
     }
     ArrayList<Integer> stats = new ArrayList<Integer>();
     stats.add(statistics.getEventQueueSize());
@@ -1871,7 +1873,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void waitForSenderRunningState(String senderId){
-    final ExpectedException exln = addExpectedException("Could not connect");
+    final IgnoredException exln = IgnoredException.addIgnoredException("Could not connect");
     try {
       Set<GatewaySender> senders = cache.getGatewaySenders();
       final GatewaySender sender = getGatewaySenderById(senders, senderId);
@@ -1888,7 +1890,7 @@ public class WANTestBase extends DistributedTestCase{
           return "Expected sender isRunning state to be true but is false";
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 300000, 500, true);
+      Wait.waitForCriterion(wc, 300000, 500, true);
     } finally {
       exln.remove();
     }
@@ -1909,7 +1911,7 @@ public class WANTestBase extends DistributedTestCase{
         return "Expected sender primary state to be true but is false";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10000, 1000, true); 
+    Wait.waitForCriterion(wc, 10000, 1000, true); 
   }
   
   private static GatewaySender getGatewaySenderById(Set<GatewaySender> senders, String senderId) {
@@ -1951,7 +1953,7 @@ public class WANTestBase extends DistributedTestCase{
         return "Expected seconadry map to be " + primaryUpdatesMap + " but it is " + secondaryUpdatesMap;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 300000, 500, true); 
+    Wait.waitForCriterion(wc, 300000, 500, true); 
   }
   
   public static HashMap checkQueue2(){
@@ -2038,7 +2040,7 @@ public class WANTestBase extends DistributedTestCase{
   }
 
   public static void addListenerOnBucketRegion(String regionName, int numBuckets) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     test.addCacheListenerOnBucketRegion(regionName, numBuckets);
   }
   
@@ -2053,7 +2055,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void addListenerOnQueueBucketRegion(String senderId, int numBuckets) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     test.addCacheListenerOnQueueBucketRegion(senderId, numBuckets);
   }
   
@@ -2082,17 +2084,17 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void addQueueListener(String senderId, boolean isParallel){
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     test.addCacheQueueListener(senderId, isParallel);
   }
   
   public static void addSecondQueueListener(String senderId, boolean isParallel){
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     test.addSecondCacheQueueListener(senderId, isParallel);
   }
   
   public static void addListenerOnRegion(String regionName){
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     test.addCacheListenerOnRegion(regionName);
   }
   private void addCacheListenerOnRegion(String regionName){
@@ -2149,8 +2151,8 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void pauseSender(String senderId) {
-    final ExpectedException exln = addExpectedException("Could not connect");
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    final IgnoredException exln = IgnoredException.addIgnoredException("Could not connect");
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
     try {
       Set<GatewaySender> senders = cache.getGatewaySenders();
@@ -2170,10 +2172,31 @@ public class WANTestBase extends DistributedTestCase{
       exln.remove();
     }
   }
+      
+  public static void pauseSenderAndWaitForDispatcherToPause(String senderId) {
+    final IgnoredException exln = IgnoredException.addIgnoredException("Could not connect");
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
+        .getName());
+    try {
+      Set<GatewaySender> senders = cache.getGatewaySenders();
+      GatewaySender sender = null;
+      for (GatewaySender s : senders) {
+        if (s.getId().equals(senderId)) {
+          sender = s;
+          break;
+        }
+      }
+      sender.pause();
+      ((AbstractGatewaySender)sender).getEventProcessor().waitForDispatcherToPause();
+    } finally {
+      exp.remove();
+      exln.remove();
+    }    
+  }
   
   public static void resumeSender(String senderId) {
-    final ExpectedException exln = addExpectedException("Could not connect");
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    final IgnoredException exln = IgnoredException.addIgnoredException("Could not connect");
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
     try {
       Set<GatewaySender> senders = cache.getGatewaySenders();
@@ -2193,8 +2216,8 @@ public class WANTestBase extends DistributedTestCase{
   }
 
   public static void stopSender(String senderId) {
-    final ExpectedException exln = addExpectedException("Could not connect");
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    final IgnoredException exln = IgnoredException.addIgnoredException("Could not connect");
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
     try {
       Set<GatewaySender> senders = cache.getGatewaySenders();
@@ -2235,7 +2258,7 @@ public class WANTestBase extends DistributedTestCase{
       boolean isParallel, Integer maxMemory,
       Integer batchSize, boolean isConflation, boolean isPersistent,
       GatewayEventFilter filter, boolean isManulaStart) {
-    final ExpectedException exln = addExpectedException("Could not connect");
+    final IgnoredException exln = IgnoredException.addIgnoredException("Could not connect");
     try {
       File persistentDirectory = new File(dsName + "_disk_"
           + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
@@ -2300,7 +2323,7 @@ public class WANTestBase extends DistributedTestCase{
 	boolean isParallel, Integer maxMemory,
 	Integer batchSize, boolean isConflation, boolean isPersistent,
 	GatewayEventFilter filter, boolean isManulaStart, int numDispatchers, OrderPolicy orderPolicy) {
-	  final ExpectedException exln = addExpectedException("Could not connect");
+	  final IgnoredException exln = IgnoredException.addIgnoredException("Could not connect");
 	  try {
 		File persistentDirectory = new File(dsName + "_disk_"
 			+ System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
@@ -2485,7 +2508,7 @@ public class WANTestBase extends DistributedTestCase{
       List<GatewayEventFilter> eventfilters,
       List<GatewayTransportFilter> tranportFilters, boolean isManulaStart,
       boolean isDiskSync) {
-    ExpectedException exp1 = addExpectedException(RegionDestroyedException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(RegionDestroyedException.class
         .getName());
     try {
       File persistentDirectory = new File(dsName + "_disk_"
@@ -2577,7 +2600,7 @@ public class WANTestBase extends DistributedTestCase{
     else {
       persistentDirectory = new File(dsStore);  
     }
-    getLogWriter().info("The ds is : " + persistentDirectory.getName());
+    LogWriterUtils.getLogWriter().info("The ds is : " + persistentDirectory.getName());
     
     persistentDirectory.mkdir();
     DiskStoreFactory dsf = cache.createDiskStoreFactory();
@@ -2599,12 +2622,12 @@ public class WANTestBase extends DistributedTestCase{
         gateway.setPersistenceEnabled(true);
         String dsname = dsf.setDiskDirs(dirs1).create(dsName).getName();
         gateway.setDiskStoreName(dsname);
-        getLogWriter().info("The DiskStoreName is : " + dsname);
+        LogWriterUtils.getLogWriter().info("The DiskStoreName is : " + dsname);
       }
       else {
         DiskStore store = dsf.setDiskDirs(dirs1).create(dsName);
         gateway.setDiskStoreName(store.getName());
-        getLogWriter().info("The ds is : " + store.getName());
+        LogWriterUtils.getLogWriter().info("The ds is : " + store.getName());
       }
       gateway.setBatchConflationEnabled(isConflation);
       gateway.create(dsName, remoteDsId);
@@ -2714,11 +2737,11 @@ public class WANTestBase extends DistributedTestCase{
         return "Expected to wait for " + millisec + " millisec.";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, millisec, 500, false); 
+    Wait.waitForCriterion(wc, millisec, 500, false); 
   }
   
   public static int createReceiver(int locPort) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort
@@ -2744,10 +2767,10 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void createReceiverWithBindAddress(int locPort) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
-    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort
         + "]");
 
@@ -2765,7 +2788,7 @@ public class WANTestBase extends DistributedTestCase{
       fail("Expected GatewayReciever Exception");
     }
     catch (GatewayReceiverException gRE){
-      getLogWriter().fine("KBKBKB : got the GatewayReceiverException", gRE);
+      LogWriterUtils.getLogWriter().fine("KBKBKB : got the GatewayReceiverException", gRE);
       assertTrue(gRE.getMessage().contains("Failed to create server socket on"));
     }
     catch (IOException e) {
@@ -2775,7 +2798,7 @@ public class WANTestBase extends DistributedTestCase{
     }
   }
   public static int createReceiverWithSSL(int locPort) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     boolean gatewaySslenabled = true;
     String  gatewaySslprotocols = "any";
     String  gatewaySslciphers = "any";
@@ -2783,7 +2806,7 @@ public class WANTestBase extends DistributedTestCase{
     
     Properties gemFireProps = new Properties();
 
-    gemFireProps.put(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    gemFireProps.put(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_ENABLED_NAME, String.valueOf(gatewaySslenabled));
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_PROTOCOLS_NAME, gatewaySslprotocols);
     gemFireProps.put(DistributionConfig.GATEWAY_SSL_CIPHERS_NAME, gatewaySslciphers);
@@ -2800,7 +2823,7 @@ public class WANTestBase extends DistributedTestCase{
     gemFireProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     gemFireProps.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort + "]");
 
-    getLogWriter().info("Starting cache ds with following properties \n" + gemFireProps);
+    LogWriterUtils.getLogWriter().info("Starting cache ds with following properties \n" + gemFireProps);
     
     InternalDistributedSystem ds = test.getSystem(gemFireProps);
     cache = CacheFactory.create(ds);    
@@ -2831,7 +2854,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static int createReceiverAfterCache(int locPort) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     GatewayReceiverFactory fact = cache.createGatewayReceiverFactory();
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     fact.setStartPort(port);
@@ -2850,7 +2873,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void createReceiverAndServer(int locPort) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort
@@ -2880,7 +2903,7 @@ public class WANTestBase extends DistributedTestCase{
     try {
       server.start();
     } catch (IOException e) {
-      fail("Failed to start server ", e);
+      com.gemstone.gemfire.test.dunit.Assert.fail("Failed to start server ", e);
     }
   }
   
@@ -2896,13 +2919,13 @@ public class WANTestBase extends DistributedTestCase{
         }
         catch (IOException e) {
           e.printStackTrace();
-          fail("Failed to start GatewayRecevier on port " + port, e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("Failed to start GatewayRecevier on port " + port, e);
         }
 	return port;
   }
   
   public static int createServer(int locPort) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort
@@ -2918,14 +2941,14 @@ public class WANTestBase extends DistributedTestCase{
     try {
       server.start();
     } catch (IOException e) {
-      fail("Failed to start server ", e);
+      com.gemstone.gemfire.test.dunit.Assert.fail("Failed to start server ", e);
     }
     return port;
   }
   
   public static void createClientWithLocator(int port0,String host, 
       String regionName) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props = new Properties();
     props.setProperty("mcast-port", "0");
@@ -2954,13 +2977,13 @@ public class WANTestBase extends DistributedTestCase{
     region = cache.createRegion(regionName, attrs);
     region.registerInterest("ALL_KEYS");
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region " + regionName + " created Successfully :"
             + region.toString());
   }
   
   public static int createReceiver_PDX(int locPort) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort + "]");
@@ -2992,7 +3015,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void createReceiver2(int locPort) {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     GatewayReceiverFactory fact = cache.createGatewayReceiverFactory();
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     fact.setStartPort(port);
@@ -3013,9 +3036,9 @@ public class WANTestBase extends DistributedTestCase{
     CacheTransactionManager txMgr = cache.getCacheTransactionManager();
     txMgr.setDistributed(true);
     
-    ExpectedException exp1 = addExpectedException(InterruptedException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(InterruptedException.class
         .getName());
-    ExpectedException exp2 = addExpectedException(GatewaySenderException.class
+    IgnoredException exp2 = IgnoredException.addIgnoredException(GatewaySenderException.class
         .getName());
     try {
       Region r = cache.getRegion(Region.SEPARATOR + regionName);
@@ -3035,9 +3058,9 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void doPuts(String regionName, int numPuts) {
-    ExpectedException exp1 = addExpectedException(InterruptedException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(InterruptedException.class
         .getName());
-    ExpectedException exp2 = addExpectedException(GatewaySenderException.class
+    IgnoredException exp2 = IgnoredException.addIgnoredException(GatewaySenderException.class
         .getName());
     try {
       Region r = cache.getRegion(Region.SEPARATOR + regionName);
@@ -3138,7 +3161,7 @@ public class WANTestBase extends DistributedTestCase{
         return "Looking for min size of region to be " + min;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 30000, 5, false); 
+    Wait.waitForCriterion(wc, 30000, 5, false); 
     r.destroyRegion();
   }
 
@@ -3157,13 +3180,13 @@ public class WANTestBase extends DistributedTestCase{
         return "Looking for min size of region to be " + min;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 30000, 5, false); 
+    Wait.waitForCriterion(wc, 30000, 5, false); 
     r.destroyRegion();
     destroyFlag = false;
   }
   
   public static void localDestroyRegion(String regionName) {
-    ExpectedException exp = addExpectedException(PRLocallyDestroyedException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(PRLocallyDestroyedException.class
         .getName());
     try {
       Region r = cache.getRegion(Region.SEPARATOR + regionName);
@@ -3200,11 +3223,11 @@ public class WANTestBase extends DistributedTestCase{
         custKeyValues.put(custid, customer);
       }
       catch (Exception e) {
-        fail(
+        com.gemstone.gemfire.test.dunit.Assert.fail(
             "putCustomerPartitionedRegion : failed while doing put operation in CustomerPartitionedRegion ",
             e);
       }
-      getLogWriter().info("Customer :- { " + custid + " : " + customer + " }");
+      LogWriterUtils.getLogWriter().info("Customer :- { " + custid + " : " + customer + " }");
     }
     return custKeyValues;
   }
@@ -3226,11 +3249,11 @@ public class WANTestBase extends DistributedTestCase{
 
       }
       catch (Exception e) {
-        fail(
+        com.gemstone.gemfire.test.dunit.Assert.fail(
             "putOrderPartitionedRegion : failed while doing put operation in OrderPartitionedRegion ",
             e);
       }
-      getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
+      LogWriterUtils.getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
     }
     return orderKeyValues;
   }
@@ -3249,11 +3272,11 @@ public class WANTestBase extends DistributedTestCase{
         assertEquals(order, orderRegion.get(custid));
 
       } catch (Exception e) {
-        fail(
+        com.gemstone.gemfire.test.dunit.Assert.fail(
             "putOrderPartitionedRegionUsingCustId : failed while doing put operation in OrderPartitionedRegion ",
             e);
       }
-      getLogWriter().info("Order :- { " + custid + " : " + order + " }");
+      LogWriterUtils.getLogWriter().info("Order :- { " + custid + " : " + order + " }");
     }
     return orderKeyValues;
   }
@@ -3276,11 +3299,11 @@ public class WANTestBase extends DistributedTestCase{
 
         }
         catch (Exception e) {
-          fail(
+          com.gemstone.gemfire.test.dunit.Assert.fail(
               "updateOrderPartitionedRegion : failed while doing put operation in OrderPartitionedRegion ",
               e);
         }
-        getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
+        LogWriterUtils.getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
       }
     }
     return orderKeyValues;
@@ -3299,11 +3322,11 @@ public class WANTestBase extends DistributedTestCase{
         assertEquals(order, orderRegion.get(custid));
         orderKeyValues.put(custid, order);
       } catch (Exception e) {
-        fail(
+        com.gemstone.gemfire.test.dunit.Assert.fail(
             "updateOrderPartitionedRegionUsingCustId : failed while doing put operation in OrderPartitionedRegion ",
             e);
       }
-      getLogWriter().info("Order :- { " + custid + " : " + order + " }");
+      LogWriterUtils.getLogWriter().info("Order :- { " + custid + " : " + order + " }");
     }
     return orderKeyValues;
   }
@@ -3328,11 +3351,11 @@ public class WANTestBase extends DistributedTestCase{
             shipmentKeyValue.put(shipmentId, shipment);
           }
           catch (Exception e) {
-            fail(
+            com.gemstone.gemfire.test.dunit.Assert.fail(
                 "putShipmentPartitionedRegion : failed while doing put operation in ShipmentPartitionedRegion ",
                 e);
           }
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Shipment :- { " + shipmentId + " : " + shipment + " }");
         }
       }
@@ -3377,11 +3400,11 @@ public class WANTestBase extends DistributedTestCase{
         assertEquals(shipment, shipmentRegion.get(custid));
         shipmentKeyValue.put(custid, shipment);
       } catch (Exception e) {
-        fail(
+        com.gemstone.gemfire.test.dunit.Assert.fail(
             "putShipmentPartitionedRegionUsingCustId : failed while doing put operation in ShipmentPartitionedRegion ",
             e);
       }
-      getLogWriter().info("Shipment :- { " + custid + " : " + shipment + " }");
+      LogWriterUtils.getLogWriter().info("Shipment :- { " + custid + " : " + shipment + " }");
     }
     return shipmentKeyValue;
   }
@@ -3406,11 +3429,11 @@ public class WANTestBase extends DistributedTestCase{
             shipmentKeyValue.put(shipmentId, shipment);
           }
           catch (Exception e) {
-            fail(
+            com.gemstone.gemfire.test.dunit.Assert.fail(
                 "updateShipmentPartitionedRegion : failed while doing put operation in ShipmentPartitionedRegion ",
                 e);
           }
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Shipment :- { " + shipmentId + " : " + shipment + " }");
         }
       }
@@ -3431,11 +3454,11 @@ public class WANTestBase extends DistributedTestCase{
         assertEquals(shipment, shipmentRegion.get(custid));
         shipmentKeyValue.put(custid, shipment);
       } catch (Exception e) {
-        fail(
+        com.gemstone.gemfire.test.dunit.Assert.fail(
             "updateShipmentPartitionedRegionUsingCustId : failed while doing put operation in ShipmentPartitionedRegion ",
             e);
       }
-      getLogWriter().info("Shipment :- { " + custid + " : " + shipment + " }");
+      LogWriterUtils.getLogWriter().info("Shipment :- { " + custid + " : " + shipment + " }");
     }
     return shipmentKeyValue;
   }
@@ -3471,7 +3494,7 @@ public class WANTestBase extends DistributedTestCase{
     
   public static void doNextPuts(String regionName, int start, int numPuts) {
     //waitForSitesToUpdate();
-    ExpectedException exp = addExpectedException(CacheClosedException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(CacheClosedException.class
         .getName());
     try {
       Region r = cache.getRegion(Region.SEPARATOR + regionName);
@@ -3553,7 +3576,7 @@ public class WANTestBase extends DistributedTestCase{
         
       };
       
-      DistributedTestCase.waitForCriterion(wc, 120000, 500, true);
+      Wait.waitForCriterion(wc, 120000, 500, true);
     }
   }
   
@@ -3647,9 +3670,9 @@ public class WANTestBase extends DistributedTestCase{
   }
 
   public static void validateRegionSize(String regionName, final int regionSize) {
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(CacheClosedException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(CacheClosedException.class
         .getName());
     try {
 
@@ -3669,7 +3692,7 @@ public class WANTestBase extends DistributedTestCase{
               + " present region keyset " + r.keySet();
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 240000, 500, true);
+      Wait.waitForCriterion(wc, 240000, 500, true);
     } finally {
       exp.remove();
       exp1.remove();
@@ -3733,7 +3756,7 @@ public class WANTestBase extends DistributedTestCase{
             + " but actual entries: " + eventsMap.size();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 500, true); //TODO:Yogs 
+    Wait.waitForCriterion(wc, 60000, 500, true); //TODO:Yogs 
   }
   
    public static void validateCustomAsyncEventListener(String asyncQueueId,
@@ -3762,7 +3785,7 @@ public class WANTestBase extends DistributedTestCase{
             + " but actual entries: " + eventsMap.size();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 500, true); // TODO:Yogs
+    Wait.waitForCriterion(wc, 60000, 500, true); // TODO:Yogs
     
    Iterator<AsyncEvent> itr = eventsMap.values().iterator();
    while (itr.hasNext()) {
@@ -3809,7 +3832,7 @@ public class WANTestBase extends DistributedTestCase{
               + size;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60000, 500, true);
+      Wait.waitForCriterion(wc, 60000, 500, true);
 
     } else {
       WaitCriterion wc = new WaitCriterion() {
@@ -3837,7 +3860,7 @@ public class WANTestBase extends DistributedTestCase{
               + size;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60000, 500, true);
+      Wait.waitForCriterion(wc, 60000, 500, true);
     }
   }
   
@@ -3860,7 +3883,7 @@ public class WANTestBase extends DistributedTestCase{
     for (int bucketId : bucketIds) {
       List<GatewaySenderEventImpl> eventsForBucket = bucketToEventsMap
           .get(bucketId);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Events for bucket: " + bucketId + " is " + eventsForBucket);
       assertNotNull(eventsForBucket);
       for (int i = 0; i < batchSize; i++) {
@@ -3882,7 +3905,7 @@ public class WANTestBase extends DistributedTestCase{
 
     final Map eventsMap = ((MyAsyncEventListener)theListener).getEventsMap();
     assertNotNull(eventsMap);
-    getLogWriter().info("The events map size is " + eventsMap.size());
+    LogWriterUtils.getLogWriter().info("The events map size is " + eventsMap.size());
     return eventsMap.size();
   }
   
@@ -3916,9 +3939,9 @@ public class WANTestBase extends DistributedTestCase{
         return "Expected region entries: " + regionSize + " but actual entries: " + r.keySet().size() + " present region keyset " + r.keySet()  ;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 200000, 500, true); 
+    Wait.waitForCriterion(wc, 200000, 500, true); 
     for(int i = 0 ; i < regionSize; i++){
-      getLogWriter().info("For Key : Key_"+i + " : Values : " + r.get("Key_" + i));
+      LogWriterUtils.getLogWriter().info("For Key : Key_"+i + " : Values : " + r.get("Key_" + i));
       assertEquals(new SimpleClass(i, (byte)i), r.get("Key_" + i));
     }
   }
@@ -3938,9 +3961,9 @@ public class WANTestBase extends DistributedTestCase{
         return "Expected region entries: " + regionSize + " but actual entries: " + r.keySet().size() + " present region keyset " + r.keySet()  ;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 200000, 500, true); 
+    Wait.waitForCriterion(wc, 200000, 500, true); 
     for(int i = 0 ; i < regionSize; i++){
-      getLogWriter().info("For Key : Key_"+i + " : Values : " + r.get("Key_" + i));
+      LogWriterUtils.getLogWriter().info("For Key : Key_"+i + " : Values : " + r.get("Key_" + i));
       assertEquals(new SimpleClass1(false, (short) i, "" + i, i,"" +i ,""+ i,i, i), r.get("Key_" + i));
     }
   }
@@ -3948,7 +3971,7 @@ public class WANTestBase extends DistributedTestCase{
   public static void validateQueueSizeStat(String id, final int queueSize) {
     final AbstractGatewaySender sender = (AbstractGatewaySender)  cache.getGatewaySender(id);
     
-    waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       
       @Override
       public boolean done() {
@@ -4001,7 +4024,7 @@ public class WANTestBase extends DistributedTestCase{
         return "Expected region size to remain same below a specified limit but actual region size does not remain same or exceeded the specified limit " + sameRegionSizeCounter + " :regionSize " + previousSize;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 200000, 500, true); 
+    Wait.waitForCriterion(wc, 200000, 500, true); 
   }
   
   public static String getRegionFullPath(String regionName) {
@@ -4023,7 +4046,7 @@ public class WANTestBase extends DistributedTestCase{
       public boolean done() {
         for(Object key: keyValues.keySet()) {
           if (!r.get(key).equals(keyValues.get(key))) {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "The values are for key " + "  " + key + " " + r.get(key)
                     + " in the map " + keyValues.get(key));
             return false;
@@ -4036,7 +4059,7 @@ public class WANTestBase extends DistributedTestCase{
         return "Expected region entries doesn't match";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 120000, 500, true); 
+    Wait.waitForCriterion(wc, 120000, 500, true); 
   }
   
   public static void CheckContent(String regionName, final int regionSize) {
@@ -4063,7 +4086,7 @@ public class WANTestBase extends DistributedTestCase{
         return "Expected region entries: " + regionSize + " but actual entries: " + r.keySet().size();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 120000, 500, true); 
+    Wait.waitForCriterion(wc, 120000, 500, true); 
   }
   
   public static void verifyPrimaryStatus(final Boolean isPrimary) {
@@ -4083,7 +4106,7 @@ public class WANTestBase extends DistributedTestCase{
         return "Expected sender to be : " + isPrimary.booleanValue() + " but actually it is : " + sender.isPrimary();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 120000, 500, true);
+    Wait.waitForCriterion(wc, 120000, 500, true);
   }
   
   public static Boolean getPrimaryStatus(){
@@ -4102,7 +4125,7 @@ public class WANTestBase extends DistributedTestCase{
         return "Checking Primary Status";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10000, 500, false);
+    Wait.waitForCriterion(wc, 10000, 500, false);
     return sender.isPrimary();
   }
   
@@ -4170,10 +4193,10 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static Boolean killSender(String senderId){
-    final ExpectedException exln = addExpectedException("Could not connect");
-    ExpectedException exp = addExpectedException(CacheClosedException.class
+    final IgnoredException exln = IgnoredException.addIgnoredException("Could not connect");
+    IgnoredException exp = IgnoredException.addIgnoredException(CacheClosedException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(ForceReattemptException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
     try {
     Set<GatewaySender> senders = cache.getGatewaySenders();
@@ -4185,7 +4208,7 @@ public class WANTestBase extends DistributedTestCase{
       }
     }
     if (sender.isPrimary()) {
-      getLogWriter().info("Gateway sender is killed by a test");
+      LogWriterUtils.getLogWriter().info("Gateway sender is killed by a test");
       cache.getDistributedSystem().disconnect();
       return Boolean.TRUE;
     }
@@ -4207,7 +4230,7 @@ public class WANTestBase extends DistributedTestCase{
       }
     }
     if (queue.isPrimary()) {
-      getLogWriter().info("AsyncEventQueue is killed by a test");
+      LogWriterUtils.getLogWriter().info("AsyncEventQueue is killed by a test");
       cache.getDistributedSystem().disconnect();
       return Boolean.TRUE;
     }
@@ -4215,10 +4238,10 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void killSender(){
-    getLogWriter().info("Gateway sender is going to be killed by a test"); 
+    LogWriterUtils.getLogWriter().info("Gateway sender is going to be killed by a test"); 
     cache.close();
     cache.getDistributedSystem().disconnect();
-    getLogWriter().info("Gateway sender is killed by a test");
+    LogWriterUtils.getLogWriter().info("Gateway sender is killed by a test");
   }
   
   static void waitForSitesToUpdate() {
@@ -4230,7 +4253,7 @@ public class WANTestBase extends DistributedTestCase{
         return "Waiting for all sites to get updated";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10000, 500, false);
+    Wait.waitForCriterion(wc, 10000, 500, false);
   }
   
   public static void checkAllSiteMetaData(     
@@ -4281,7 +4304,7 @@ public class WANTestBase extends DistributedTestCase{
         return "Making sure system is initialized";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 50000, 1000, true); 
+    Wait.waitForCriterion(wc, 50000, 1000, true); 
     assertNotNull(system);
     
 //    final Map<Integer,Set<DistributionLocatorId>> allSiteMetaData = ((DistributionConfigImpl)system
@@ -4320,7 +4343,7 @@ public class WANTestBase extends DistributedTestCase{
             + " but actual meta data: " + allSiteMetaData;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 300000, 500, true); 
+    Wait.waitForCriterion(wc, 300000, 500, true); 
     return System.currentTimeMillis();
   }
   
@@ -4340,7 +4363,7 @@ public class WANTestBase extends DistributedTestCase{
         .getLocatorDiscoveryCallback();
      
     boolean discovered = callback.waitForDiscovery(locatorToWaitFor, MAX_WAIT);
-    Assert.assertTrue(
+    assertTrue(
         "Waited " + MAX_WAIT + " for " + locatorToWaitFor
             + " to be discovered on client. List is now: "
             + callback.getDiscovered(), discovered);
@@ -4348,9 +4371,9 @@ public class WANTestBase extends DistributedTestCase{
   
   public static void validateQueueContents(final String senderId,
       final int regionSize) {
-    ExpectedException exp1 = addExpectedException(InterruptedException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(InterruptedException.class
         .getName());
-    ExpectedException exp2 = addExpectedException(GatewaySenderException.class
+    IgnoredException exp2 = IgnoredException.addIgnoredException(GatewaySenderException.class
         .getName());
     try {
       Set<GatewaySender> senders = cache.getGatewaySenders();
@@ -4384,7 +4407,7 @@ public class WANTestBase extends DistributedTestCase{
                 + " but actual entries: " + size;
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 120000, 500, true);
+        Wait.waitForCriterion(wc, 120000, 500, true);
 
       } else if (sender.isParallel()) {
         final RegionQueue regionQueue;
@@ -4403,7 +4426,7 @@ public class WANTestBase extends DistributedTestCase{
                 + " but actual entries: " + regionQueue.size();
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 120000, 500, true);
+        Wait.waitForCriterion(wc, 120000, 500, true);
       }
     } finally {
       exp1.remove();
@@ -4468,7 +4491,7 @@ public class WANTestBase extends DistributedTestCase{
             + " but actual entries: " + size;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 120000, 500, true);
+    Wait.waitForCriterion(wc, 120000, 500, true);
   }
   
   public static Integer getQueueContentSize(final String senderId) {
@@ -4516,9 +4539,9 @@ public class WANTestBase extends DistributedTestCase{
 
   public static void validateParallelSenderQueueAllBucketsDrained(
       final String senderId) {
-    ExpectedException exp = addExpectedException(RegionDestroyedException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(RegionDestroyedException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(ForceReattemptException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
     try {
       Set<GatewaySender> senders = cache.getGatewaySenders();
@@ -4537,7 +4560,7 @@ public class WANTestBase extends DistributedTestCase{
         WaitCriterion wc = new WaitCriterion() {
           public boolean done() {
             if (bucket.keySet().size() == 0) {
-              getLogWriter().info("Bucket " + bucket.getId() + " is empty");
+              LogWriterUtils.getLogWriter().info("Bucket " + bucket.getId() + " is empty");
               return true;
             }
             return false;
@@ -4551,7 +4574,7 @@ public class WANTestBase extends DistributedTestCase{
                 + bucket.keySet();
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 180000, 50, true);
+        Wait.waitForCriterion(wc, 180000, 50, true);
       }// for loop ends
     } finally {
       exp.remove();
@@ -5095,8 +5118,8 @@ public class WANTestBase extends DistributedTestCase{
     }
   }*/
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+ @Override
+ protected final void preTearDown() throws Exception {
     cleanupVM();
     vm0.invoke(WANTestBase.class, "cleanupVM");
     vm1.invoke(WANTestBase.class, "cleanupVM");
@@ -5122,7 +5145,7 @@ public class WANTestBase extends DistributedTestCase{
       cache.getDistributedSystem().disconnect();
       cache = null;
     } else {
-      WANTestBase test = new WANTestBase(testName);
+      WANTestBase test = new WANTestBase(getTestMethodName());
       if (test.isConnectedToDS()) {
         test.getSystem().disconnect();
       }
@@ -5135,7 +5158,7 @@ public class WANTestBase extends DistributedTestCase{
   }
   
   public static void shutdownLocator() {
-    WANTestBase test = new WANTestBase(testName);
+    WANTestBase test = new WANTestBase(getTestMethodName());
     test.getSystem().disconnect();
   }  
   


[31/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionWithSameNameDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionWithSameNameDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionWithSameNameDUnitTest.java
index 6a7807f..d8eaf9c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionWithSameNameDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionWithSameNameDUnitTest.java
@@ -37,7 +37,9 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.RegionExistsException;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -106,7 +108,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     firstCreationFlag = true;
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy, firstCreationFlag, multipleVMFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testNameWithPartitionRegionFirstOnSameVM() - Partition Regions successfully created ");
     // creating distributed region on same vm with same name as previouslu
@@ -117,7 +119,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     vmList = addNodeToList(startIndexForNode, endIndexForNode);
     createDistributedRegion(vmList, startIndexForRegion, endIndexForRegion,
         Scope.DISTRIBUTED_ACK, firstCreationFlag, multipleVMFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testNameWithPartitionRegionFirstOnSameVM() - test completed successfully ");
   }
@@ -158,7 +160,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy, firstCreationFlag, multipleVMFlag);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testNameWithPartitionRegionFirstOnSameVM() - test completed successfully ");
   }
@@ -195,7 +197,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     firstCreationFlag = true;
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy, firstCreationFlag, multipleVMFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testNameWithPartitionRegionFirstOnDifferentVM() - Partition Regions successfully created ");
     // creating distrubuted region with the scope = DISTRIBUTED_ACK on
@@ -208,7 +210,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     vmList = addNodeToList(startIndexForNode, endIndexForNode);
     createDistributedRegion(vmList, startIndexForRegion, endIndexForRegion,
         Scope.DISTRIBUTED_ACK, firstCreationFlag, multipleVMFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testNameWithPartitionRegionFirstOnDifferentVM() - test completed successfully ");
   }
@@ -250,7 +252,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy, firstCreationFlag, multipleVMFlag);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testNameWithLocalRegionFirstOnDifferentVM() - test completed successfully ");
   }
@@ -291,7 +293,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     firstCreationFlag = true;
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy, firstCreationFlag, multipleVMFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionRegionVsLocalRegionFirst() - test completed successfully ");
   }
@@ -333,7 +335,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     firstCreationFlag = true;
     createDistributedRegion(vmList, startIndexForRegion, endIndexForRegion,
         Scope.LOCAL, firstCreationFlag, multipleVMFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionRegionVsLocalRegionSecond() - test completed successfully ");
   }
@@ -365,12 +367,12 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     firstCreationFlag = true;
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy, firstCreationFlag, multipleVMFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testWithPartitionedRegionAsParentRegionAndDistributedSubRegion() - Parent region as partitioned region is created ");
     // create subregion of partition region
     createSubRegionOfPartitionedRegion(vmList, DISTRIBUTED_REGION);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testWithPartitionedRegionAsParentRegionAndDistributedSubRegion() completed Successfully ");
   }
@@ -403,12 +405,12 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     firstCreationFlag = true;
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy, firstCreationFlag, multipleVMFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testWithPartitionedRegionAsParentRegionAndPartitionedSubRegion() - Parent region as partitioned region is created ");
     // create subregion of partition region
     createSubRegionOfPartitionedRegion(vmList, PARTITIONED_REGION);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testWithPartitionedRegionAsParentRegionAndPartitionedSubRegion() completed Successfully ");
   }
@@ -440,7 +442,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     firstCreationFlag = true;
     createDistributedRegion(vmList, startIndexForRegion, endIndexForRegion,
         Scope.DISTRIBUTED_ACK, firstCreationFlag, multipleVMFlag);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testWithSubRegionPartitionedRegionFirst() - Parent region is created");
     // creating distributed region as subregion of parent on vm0
     prPrefix = "child_region";
@@ -448,7 +450,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     endIndexForNode = 1;
     vmList = addNodeToList(startIndexForNode, endIndexForNode);
     createPartitionedSubRegion(vmList, firstCreationFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testWithSubRegionPartitionedRegionFirst() - Partitioned sub region on vm0 ");
     // creating partiton region as subregion of parent region with the same name
@@ -457,7 +459,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     endIndexForNode = 4;
     vmList = addNodeToList(startIndexForNode, endIndexForNode);
     createDistributedSubRegion(vmList, firstCreationFlag);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testWithSubRegionPartitionedRegionFirst() completed successfully ");
 
   }
@@ -489,7 +491,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     firstCreationFlag = true;
     createDistributedRegion(vmList, startIndexForRegion, endIndexForRegion,
         Scope.DISTRIBUTED_ACK, firstCreationFlag, multipleVMFlag);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testWithSubRegionDistributedRegionFirst() - Parent region is created");
     // creating distributed region as subregion of parent on vm0
     prPrefix = "child_region";
@@ -497,7 +499,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     endIndexForNode = 1;
     vmList = addNodeToList(startIndexForNode, endIndexForNode);
     createDistributedSubRegion(vmList, firstCreationFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testWithSubRegionDistributedRegionFirst() - Distributed sub region on vm0 ");
     // creating partiton region as subregion of parent region with the same name
@@ -506,7 +508,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
     endIndexForNode = 4;
     vmList = addNodeToList(startIndexForNode, endIndexForNode);
     createPartitionedSubRegion(vmList, firstCreationFlag);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testWithSubRegionDistributedRegionFirst() completed successfully ");
 
   }
@@ -617,7 +619,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
           case DISTRIBUTED_REGION: {
             Region childRegion = parentRegion.createSubregion("child_region",
                 ra);
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "Distributed Subregion is created as : "
                     + childRegion.getName());
           }
@@ -625,7 +627,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
           case PARTITIONED_REGION: {
             Region childRegion = parentRegion.createSubregion("child_region",
                 createRegionAttrsForPR(0, 200));
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "Partitioned Subregion is created as : "
                     + childRegion.getName());
 
@@ -748,7 +750,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
               cache.createRegion(innerPrPrefix + i, ra);
             }
             catch (RegionExistsException ex) {
-              fail(
+              Assert.fail(
                   "Got incorrect exception because the partition region being created prior to local region",
                   ex);
             }
@@ -879,7 +881,7 @@ public class PartitionedRegionWithSameNameDUnitTest extends
             }
           }
         }
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info(
                 "createMultiplePartitionRegion() - Partition Regions Successfully Completed ");
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PutAllDAckDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PutAllDAckDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PutAllDAckDUnitTest.java
index b192a43..7fd0c16 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PutAllDAckDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PutAllDAckDUnitTest.java
@@ -38,8 +38,10 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -71,15 +73,16 @@ public class PutAllDAckDUnitTest extends DistributedTestCase {
       VM vm1 = host.getVM(1);
       vm0.invoke(PutAllDAckDUnitTest.class, "createCacheForVM0");
       vm1.invoke(PutAllDAckDUnitTest.class, "createCacheForVM1");
-      getLogWriter().fine("Cache created successfully");
+      LogWriterUtils.getLogWriter().fine("Cache created successfully");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(PutAllDAckDUnitTest.class, "closeCache");
-        vm1.invoke(PutAllDAckDUnitTest.class, "closeCache");
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(PutAllDAckDUnitTest.class, "closeCache");
+      vm1.invoke(PutAllDAckDUnitTest.class, "closeCache");
     }
     
     public static void createCacheForVM0() throws Exception {
@@ -144,7 +147,7 @@ public class PutAllDAckDUnitTest extends DistributedTestCase {
                 obj = region.put(ob, str);
             }
         }catch(Exception ex){
-            fail("Failed while region.put", ex);
+            Assert.fail("Failed while region.put", ex);
         }
         return obj;
     }//end of putMethod
@@ -162,7 +165,7 @@ public class PutAllDAckDUnitTest extends DistributedTestCase {
             region.putAll(m);
             
         }catch(Exception ex){
-            fail("Failed while region.putAll", ex);
+            Assert.fail("Failed while region.putAll", ex);
         }
     }//end of putAllMethod
     
@@ -214,7 +217,7 @@ public class PutAllDAckDUnitTest extends DistributedTestCase {
 //             }
             
             beforeCreateputAllcounter++;
-            getLogWriter().fine("*******BeforeCreate*****");
+            LogWriterUtils.getLogWriter().fine("*******BeforeCreate*****");
             beforeCreate = true;
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PutAllGlobalDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PutAllGlobalDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PutAllGlobalDUnitTest.java
index 0aa77f0..c8f0d39 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PutAllGlobalDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PutAllGlobalDUnitTest.java
@@ -44,10 +44,14 @@ import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.locks.DLockGrantor;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -85,17 +89,18 @@ public class PutAllGlobalDUnitTest extends DistributedTestCase {
       VM vm1 = host.getVM(1);
       vm0.invoke(PutAllGlobalDUnitTest.class, "createCacheForVM0");
       vm1.invoke(PutAllGlobalDUnitTest.class, "createCacheForVM1");
-      getLogWriter().fine("Cache created successfully");
+      LogWriterUtils.getLogWriter().fine("Cache created successfully");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(PutAllGlobalDUnitTest.class, "closeCache");
-        vm1.invoke(PutAllGlobalDUnitTest.class, "closeCache");
-        cache = null;
-        invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(PutAllGlobalDUnitTest.class, "closeCache");
+      vm1.invoke(PutAllGlobalDUnitTest.class, "closeCache");
+      cache = null;
+      Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
     }
     
     public static void createCacheForVM0(){
@@ -181,36 +186,36 @@ public class PutAllGlobalDUnitTest extends DistributedTestCase {
               long startTime = 0;
                 try{
                     Thread.sleep(500);
-                    getLogWriter().info("async2 proceeding with put operation");
+                    LogWriterUtils.getLogWriter().info("async2 proceeding with put operation");
                     startTime = System.currentTimeMillis();
                     region.put(new Integer(1),"mapVal");
-                    getLogWriter().info("async2 done with put operation");
+                    LogWriterUtils.getLogWriter().info("async2 done with put operation");
                     fail("Should have thrown TimeoutException");
                 }catch(TimeoutException Tx){
                    // Tx.printStackTrace();
-                    getLogWriter().info("PASS: As expected Caught TimeoutException ");
+                    LogWriterUtils.getLogWriter().info("PASS: As expected Caught TimeoutException ");
                     if (startTime + TIMEOUT_PERIOD + DLockGrantor.GRANTOR_THREAD_MAX_WAIT /* slop of grantor max wait ms */ < System.currentTimeMillis()) {
-                      getLogWriter().warning("though this test passed, the put() timed out in "
+                      LogWriterUtils.getLogWriter().warning("though this test passed, the put() timed out in "
                           + (System.currentTimeMillis() - startTime) +
                           " instead of the expected " + TIMEOUT_PERIOD + " milliseconds");
                     }
                 }
                 catch(Exception ex){
-                  fail("async2 threw unexpected exception", ex);
+                  Assert.fail("async2 threw unexpected exception", ex);
                     //ex.printStackTrace();
                 } 
             }
         });
         
-        DistributedTestCase.join(async2, 30 * 1000, getLogWriter());
+        ThreadUtils.join(async2, 30 * 1000);
         if (async2.exceptionOccurred()) {
-          DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
-          fail("async2 failed", async2.getException());
+          ThreadUtils.join(async1, 30 * 1000);
+          Assert.fail("async2 failed", async2.getException());
         }
         
-        DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+        ThreadUtils.join(async1, 30 * 1000);
         if (async1.exceptionOccurred()) {
-          fail("async1 failed", async1.getException());
+          Assert.fail("async1 failed", async1.getException());
         }
         
     }//end of test case1
@@ -220,18 +225,18 @@ public class PutAllGlobalDUnitTest extends DistributedTestCase {
     public static void putAllMethod() throws Exception {
         Map m = new HashMap();
         serverSocket.accept();
-        getLogWriter().info("async1 connection received - continuing with putAll operation");
+        LogWriterUtils.getLogWriter().info("async1 connection received - continuing with putAll operation");
         serverSocket.close();
         try{
           for (int i=1; i<2; i++) {
             m.put(new Integer(i), String.valueOf(i));
           }
             region.putAll(m);
-            getLogWriter().info("async1 done with putAll operation");
+            LogWriterUtils.getLogWriter().info("async1 done with putAll operation");
             
         }catch(Exception ex){
 //            ex.printStackTrace();
-            fail("Failed while region.putAll", ex);
+            Assert.fail("Failed while region.putAll", ex);
         }
     }//end of putAllMethod
     
@@ -276,13 +281,13 @@ public class PutAllGlobalDUnitTest extends DistributedTestCase {
     
     static class BeforeCreateCallback extends CacheWriterAdapter {
         public void beforeCreate(EntryEvent event){
-          getLogWriter().info("beforeCreate invoked for " + event.getKey());
+          LogWriterUtils.getLogWriter().info("beforeCreate invoked for " + event.getKey());
             try{
                 Thread.sleep(5000);
             }catch(InterruptedException ex) {
                 fail("interrupted");
             }
-          getLogWriter().info("beforeCreate done for " + event.getKey());
+          LogWriterUtils.getLogWriter().info("beforeCreate done for " + event.getKey());
             
         }
     }// end of BeforeCreateCallback

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoteTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoteTransactionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoteTransactionDUnitTest.java
index 9afffe4..81b2a73 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoteTransactionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoteTransactionDUnitTest.java
@@ -96,12 +96,15 @@ import com.gemstone.gemfire.internal.cache.execute.data.Customer;
 import com.gemstone.gemfire.internal.cache.execute.data.Order;
 import com.gemstone.gemfire.internal.cache.execute.data.OrderId;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author sbawaska
@@ -123,7 +126,7 @@ public class RemoteTransactionDUnitTest extends CacheTestCase {
       //TXManagerImpl mgr = getGemfireCache().getTxManager();
       //assertEquals(0, mgr.hostedTransactionsInProgressForTest());
       final TXManagerImpl mgr = getGemfireCache().getTxManager();
-      waitForCriterion(new WaitCriterion() {
+      Wait.waitForCriterion(new WaitCriterion() {
         @Override
         public boolean done() {
           return mgr.hostedTransactionsInProgressForTest() == 0;
@@ -149,12 +152,11 @@ public class RemoteTransactionDUnitTest extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     try {
-      invokeInEveryVM(verifyNoTxState);
+      Invoke.invokeInEveryVM(verifyNoTxState);
     } finally {
       closeAllCache();
-      super.tearDown2();
     }
   }
   
@@ -251,7 +253,7 @@ public class RemoteTransactionDUnitTest extends CacheTestCase {
     }
     public Object call() throws Exception {
       CacheTransactionManager mgr = getGemfireCache().getTxManager();
-      getLogWriter().fine("testTXPut starting tx");
+      LogWriterUtils.getLogWriter().fine("testTXPut starting tx");
       mgr.begin();
       Region<CustId, Customer> custRegion = getCache().getRegion(CUSTOMER);
       Region<OrderId, Order> orderRegion = getCache().getRegion(ORDER);
@@ -2537,7 +2539,7 @@ public class RemoteTransactionDUnitTest extends CacheTestCase {
         PartitionedRegion pr = (PartitionedRegion)getGemfireCache().getRegion(CUSTOMER);
         Set filter = new HashSet();
         filter.add(expectedCustId);
-        getLogWriter().info("SWAP:inside NestedTxFunc calling func2:");
+        LogWriterUtils.getLogWriter().info("SWAP:inside NestedTxFunc calling func2:");
         r.put(expectedCustId, expectedCustomer);
         FunctionService.onRegion(pr).withFilter(filter).execute(new NestedTxFunction2()).getResult();
         assertNotNull(getGemfireCache().getTxManager().getTXState());
@@ -3463,7 +3465,7 @@ public class RemoteTransactionDUnitTest extends CacheTestCase {
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port);
         ccf.setPoolSubscriptionEnabled(true);
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<Integer, String> crf = cCache
             .createClientRegionFactory(isEmpty ? ClientRegionShortcut.PROXY
@@ -3643,7 +3645,7 @@ protected static class ClientListener extends CacheListenerAdapter {
             return "listener was never invoked";
           }
         };
-        DistributedTestCase.waitForCriterion(waitForListenerInvocation, 10 * 1000, 10, true);
+        Wait.waitForCriterion(waitForListenerInvocation, 10 * 1000, 10, true);
         return null;
       }
     });
@@ -3675,7 +3677,7 @@ protected static class ClientListener extends CacheListenerAdapter {
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port);
         ccf.setPoolSubscriptionEnabled(true);
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<Integer, String> crf = cCache
             .createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY);
@@ -3718,7 +3720,7 @@ protected static class ClientListener extends CacheListenerAdapter {
             return "listener invoked:"+l.invoked;
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 10*1000, 200, true);
+        Wait.waitForCriterion(wc, 10*1000, 200, true);
         return null;
       }
     });
@@ -3764,7 +3766,7 @@ protected static class ClientListener extends CacheListenerAdapter {
             return "listener was never invoked";
           }
         };
-        DistributedTestCase.waitForCriterion(waitForListenerInvocation, 10 * 1000, 10, true);
+        Wait.waitForCriterion(waitForListenerInvocation, 10 * 1000, 10, true);
         return null;
       }
     });
@@ -3914,7 +3916,7 @@ protected static class ClientListener extends CacheListenerAdapter {
       
       //Putting a string key causes this, the partition resolver
       //doesn't handle it.
-      addExpectedException("IllegalStateException");
+      IgnoredException.addIgnoredException("IllegalStateException");
       assertEquals(Status.STATUS_ACTIVE, tx.getStatus());
       final CountDownLatch latch = new CountDownLatch(1);
       Thread t = new Thread(new Runnable() {
@@ -3953,12 +3955,12 @@ protected static class ClientListener extends CacheListenerAdapter {
       private int count;
       @Override
       public void afterCreate(EntryEvent event) {
-        getLogWriter().info("afterCreate invoked for " + event);
+        LogWriterUtils.getLogWriter().info("afterCreate invoked for " + event);
         count++;
       }
       @Override
       public void afterUpdate(EntryEvent event) {
-        getLogWriter().info("afterUpdate invoked for " + event);
+        LogWriterUtils.getLogWriter().info("afterUpdate invoked for " + event);
         count++;
       }
     }
@@ -4102,11 +4104,11 @@ protected static class ClientListener extends CacheListenerAdapter {
         // is not hosting the tx. But it will not allow an expiration
         // initiated on the hosting jvm.
         // tx is hosted in vm2 so expiration can happen in vm1.
-        DistributedTestCase.waitForCriterion(wc2, 30000, 5, true);
+        Wait.waitForCriterion(wc2, 30000, 5, true);
         getCache().getCacheTransactionManager().resume(tx);
         assertTrue(r.containsKey("key"));
         getCache().getCacheTransactionManager().commit();
-        DistributedTestCase.waitForCriterion(wc2, 30000, 5, true);
+        Wait.waitForCriterion(wc2, 30000, 5, true);
         return null;
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveAllDAckDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveAllDAckDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveAllDAckDUnitTest.java
index 7ccdd6a..9184d94 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveAllDAckDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveAllDAckDUnitTest.java
@@ -36,8 +36,10 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -68,15 +70,16 @@ public class RemoveAllDAckDUnitTest extends DistributedTestCase {
       VM vm1 = host.getVM(1);
       vm0.invoke(RemoveAllDAckDUnitTest.class, "createCacheForVM0");
       vm1.invoke(RemoveAllDAckDUnitTest.class, "createCacheForVM1");
-      getLogWriter().fine("Cache created successfully");
+      LogWriterUtils.getLogWriter().fine("Cache created successfully");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(RemoveAllDAckDUnitTest.class, "closeCache");
-        vm1.invoke(RemoveAllDAckDUnitTest.class, "closeCache");
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(RemoveAllDAckDUnitTest.class, "closeCache");
+      vm1.invoke(RemoveAllDAckDUnitTest.class, "closeCache");
     }
     
     public static void createCacheForVM0() throws Exception {
@@ -139,7 +142,7 @@ public class RemoveAllDAckDUnitTest extends DistributedTestCase {
                 obj = region.put(ob, str);
             }
         }catch(Exception ex){
-            fail("Failed while region.put", ex);
+            Assert.fail("Failed while region.put", ex);
         }
         return obj;
     }//end of putMethod
@@ -159,7 +162,7 @@ public class RemoveAllDAckDUnitTest extends DistributedTestCase {
             beforeDestroyRemoveAllcounter++;
             assertEquals(true, event.getOperation().isRemoveAll());
             assertEquals("removeAllCallback", event.getCallbackArgument());
-            getLogWriter().fine("*******BeforeDestroy*****");
+            LogWriterUtils.getLogWriter().fine("*******BeforeDestroy*****");
             beforeDestroy = true;
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveDAckDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveDAckDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveDAckDUnitTest.java
index b32f4f5..4b92161 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveDAckDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveDAckDUnitTest.java
@@ -35,6 +35,7 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -65,16 +66,16 @@ public class RemoveDAckDUnitTest extends DistributedTestCase {
       VM vm1 = host.getVM(1);
       vm0.invoke(RemoveDAckDUnitTest.class, "createCacheVM0");
       vm1.invoke(RemoveDAckDUnitTest.class, "createCacheVM1");
-      getLogWriter().fine("Cache created in successfully");
+      LogWriterUtils.getLogWriter().fine("Cache created in successfully");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(RemoveDAckDUnitTest.class, "closeCache");
-        vm1.invoke(RemoveDAckDUnitTest.class, "closeCache");
-        
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(RemoveDAckDUnitTest.class, "closeCache");
+      vm1.invoke(RemoveDAckDUnitTest.class, "closeCache");
     }
     
     public static void createCacheVM0(){

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveGlobalDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveGlobalDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveGlobalDUnitTest.java
index 008d09b..5d6131c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveGlobalDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/RemoveGlobalDUnitTest.java
@@ -39,7 +39,9 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -71,12 +73,12 @@ public class RemoveGlobalDUnitTest extends DistributedTestCase {
       vm1.invoke(RemoveGlobalDUnitTest.class, "createCache");
     }
     
-    public void tearDown2(){
-        vm0.invoke(RemoveGlobalDUnitTest.class, "resetFlag");
-        vm1.invoke(RemoveGlobalDUnitTest.class, "resetFlag");
-        vm0.invoke(RemoveGlobalDUnitTest.class, "closeCache");
-        vm1.invoke(RemoveGlobalDUnitTest.class, "closeCache");
-        
+    @Override
+    protected final void preTearDown() throws Exception {
+      vm0.invoke(RemoveGlobalDUnitTest.class, "resetFlag");
+      vm1.invoke(RemoveGlobalDUnitTest.class, "resetFlag");
+      vm0.invoke(RemoveGlobalDUnitTest.class, "closeCache");
+      vm1.invoke(RemoveGlobalDUnitTest.class, "closeCache");
     }
     
     public static void resetFlag()
@@ -150,7 +152,7 @@ public class RemoveGlobalDUnitTest extends DistributedTestCase {
             }
         });
         
-        DistributedTestCase.join(async, 30 * 1000, getLogWriter());
+        ThreadUtils.join(async, 30 * 1000);
         if(async.exceptionOccurred())
           throw async.getException();
         
@@ -223,7 +225,7 @@ public class RemoveGlobalDUnitTest extends DistributedTestCase {
             }
         });
         
-        DistributedTestCase.join(async, 30 * 1000, getLogWriter());
+        ThreadUtils.join(async, 30 * 1000);
         if(async.exceptionOccurred())
           throw async.getException();
         
@@ -241,7 +243,7 @@ public class RemoveGlobalDUnitTest extends DistributedTestCase {
             }catch(InterruptedException ex){
                 fail("interrupted");
             }
-            getLogWriter().fine("quitingfromcachewriter");
+            LogWriterUtils.getLogWriter().fine("quitingfromcachewriter");
         }
     }///////////    
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
index 10bdab3..2f14119 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SimpleDiskRegionJUnitTest.java
@@ -34,7 +34,7 @@ import org.junit.experimental.categories.Category;
 import static org.junit.Assert.*;
 
 import com.gemstone.gemfire.StatisticsFactory;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -363,11 +363,11 @@ public class SimpleDiskRegionJUnitTest extends DiskRegionTestingBase
     thread4.start();
     thread5.start();
 
-    DistributedTestCase.join(thread1, 30 * 1000, null);
-    DistributedTestCase.join(thread2, 30 * 1000, null);
-    DistributedTestCase.join(thread3, 30 * 1000, null);
-    DistributedTestCase.join(thread4, 30 * 1000, null);
-    DistributedTestCase.join(thread5, 30 * 1000, null);
+    ThreadUtils.join(thread1, 30 * 1000);
+    ThreadUtils.join(thread2, 30 * 1000);
+    ThreadUtils.join(thread3, 30 * 1000);
+    ThreadUtils.join(thread4, 30 * 1000);
+    ThreadUtils.join(thread5, 30 * 1000);
 
     if (keyIds.size() != 50000) {
       fail("Size not equal to 5000 as expected but is " + keyIds.size());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SingleHopStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SingleHopStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SingleHopStatsDUnitTest.java
index a6023c7..4b016a4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SingleHopStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SingleHopStatsDUnitTest.java
@@ -41,9 +41,15 @@ import com.gemstone.gemfire.internal.cache.execute.data.CustId;
 import com.gemstone.gemfire.internal.cache.execute.data.OrderId;
 import com.gemstone.gemfire.internal.cache.execute.data.ShipmentId;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class SingleHopStatsDUnitTest extends CacheTestCase{
 
@@ -105,28 +111,29 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
     member3 = host.getVM(3);
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
+    // close the clients first
+    member0.invoke(SingleHopStatsDUnitTest.class, "closeCache");
+    member1.invoke(SingleHopStatsDUnitTest.class, "closeCache");
+    member2.invoke(SingleHopStatsDUnitTest.class, "closeCache");
+    member3.invoke(SingleHopStatsDUnitTest.class, "closeCache");
+    closeCache();
+  }
+  
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     try {
-
-      // close the clients first
-      member0.invoke(SingleHopStatsDUnitTest.class, "closeCache");
-      member1.invoke(SingleHopStatsDUnitTest.class, "closeCache");
-      member2.invoke(SingleHopStatsDUnitTest.class, "closeCache");
-      member3.invoke(SingleHopStatsDUnitTest.class, "closeCache");
-      closeCache();
-
-      super.tearDown2();
-
       member0 = null;
       member1 = null;
       member2 = null;
       member3 = null;
       cache = null;
-      invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+      Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
 
     }
     finally {
-      unregisterAllDataSerializersFromAllVms();
+      DistributedTestUtils.unregisterAllDataSerializersFromAllVms();
     }
   }
 
@@ -239,7 +246,7 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
 
     if (colocation.equals("No_Colocation")) {
@@ -249,7 +256,7 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
         attr.setDataPolicy(DataPolicy.REPLICATE);
         region = cache.createRegion(Region_Name, attr.create());
         assertNotNull(region);
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Distributed Region " + Region_Name + " created Successfully :"
                 + region.toString());
       }else{
@@ -260,7 +267,7 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
       attr.setPartitionAttributes(paf.create());
       region = cache.createRegion(Region_Name, attr.create());
       assertNotNull(region);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region " + Region_Name + " created Successfully :"
               + region.toString());
       }
@@ -274,7 +281,7 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
       attr.setPartitionAttributes(paf.create());
       customerRegion = cache.createRegion("CUSTOMER", attr.create());
       assertNotNull(customerRegion);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region CUSTOMER created Successfully :"
               + customerRegion.toString());
 
@@ -286,7 +293,7 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
       attr.setPartitionAttributes(paf.create());
       orderRegion = cache.createRegion("ORDER", attr.create());
       assertNotNull(orderRegion);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region ORDER created Successfully :"
               + orderRegion.toString());
 
@@ -298,7 +305,7 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
       attr.setPartitionAttributes(paf.create());
       shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
       assertNotNull(shipmentRegion);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region SHIPMENT created Successfully :"
               + shipmentRegion.toString());
     }
@@ -313,7 +320,7 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
       RegionAttributes attrs = factory.create();
       region = cache.createRegion(Region_Name, attrs);
       assertNotNull(region);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Region " + Region_Name + " created Successfully :" + region.toString());
     }
     else {
@@ -322,7 +329,7 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
       RegionAttributes attrs = factory.create();
       customerRegion = cache.createRegion("CUSTOMER", attrs);
       assertNotNull(customerRegion);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region CUSTOMER created Successfully :"
               + customerRegion.toString());
 
@@ -331,7 +338,7 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
       attrs = factory.create();
       orderRegion = cache.createRegion("ORDER", attrs);
       assertNotNull(orderRegion);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region ORDER created Successfully :"
               + orderRegion.toString());
 
@@ -340,7 +347,7 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
       attrs = factory.create();
       shipmentRegion = cache.createRegion("SHIPMENT", attrs);
       assertNotNull(shipmentRegion);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region SHIPMENT created Successfully :"
               + shipmentRegion.toString());
     }
@@ -368,7 +375,7 @@ public class SingleHopStatsDUnitTest extends CacheTestCase{
         cms = ((GemFireCacheImpl)cache).getClientMetadataService();
         // since PR metadata is fetched in a background executor thread
         // we need to wait for it to arrive for a bit
-        waitForCriterion(new WaitCriterion(){
+        Wait.waitForCriterion(new WaitCriterion(){
           public boolean done() {
             return regionMetaData.size() == 1;
           }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SizingFlagDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SizingFlagDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SizingFlagDUnitTest.java
index 19170f2..90360ef 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SizingFlagDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SizingFlagDUnitTest.java
@@ -46,6 +46,7 @@ import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
@@ -358,7 +359,7 @@ public class SizingFlagDUnitTest extends CacheTestCase {
         try {
           region.getAttributesMutator().addCacheListener(new TestCacheListener());
         } catch (Exception e) {
-          fail("couldn't create index", e);
+          Assert.fail("couldn't create index", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SystemFailureDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SystemFailureDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SystemFailureDUnitTest.java
index e3858fb..e47892e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SystemFailureDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/SystemFailureDUnitTest.java
@@ -56,7 +56,7 @@ public class SystemFailureDUnitTest extends DistributedCacheTestCase {
   /////////  Public test methods
 
   public void testNullFailure() {
-    getLogWriter().info("TODO: this test needs to use VM#bounce.");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("TODO: this test needs to use VM#bounce.");
     try {
       SystemFailure.initiateFailure(null);
       fail("Null failure set allowed");
@@ -308,7 +308,7 @@ public class SystemFailureDUnitTest extends DistributedCacheTestCase {
   
   static private final Runnable listener1 = new Runnable() {
     public void run() {
-      getLogWriter().info("Inside of preListener1");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Inside of preListener1");
       listenerCount.addAndGet(1);
     }
   };
@@ -398,7 +398,7 @@ public class SystemFailureDUnitTest extends DistributedCacheTestCase {
   
   protected static Boolean verifyConnected() {
     if (SystemFailure.getFailure() != null) {
-      fail("System failure present!", SystemFailure.getFailure());
+      com.gemstone.gemfire.test.dunit.Assert.fail("System failure present!", SystemFailure.getFailure());
       return Boolean.FALSE;
     }
     GemFireCacheImpl gfc = (GemFireCacheImpl)cache;
@@ -495,7 +495,7 @@ public class SystemFailureDUnitTest extends DistributedCacheTestCase {
   protected static void message(String s) {
     System.out.println(s);
     System.err.println(s);
-    getLogWriter().info(s);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(s);
     cache.getLogger().info(s);
   }
   
@@ -545,7 +545,7 @@ public class SystemFailureDUnitTest extends DistributedCacheTestCase {
       }
     }
     public void afterCreate(EntryEvent event) {
-      getLogWriter().info("Invoking afterCreate on listener; name=" +
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Invoking afterCreate on listener; name=" +
                           event.getKey());
       forceOutOfMemory();
     }
@@ -590,7 +590,7 @@ public class SystemFailureDUnitTest extends DistributedCacheTestCase {
         }
       }
       public void afterCreate(EntryEvent event) {
-        getLogWriter().info("Invoking afterCreate on listener; name=" +
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Invoking afterCreate on listener; name=" +
                             event.getKey());
         forceOutOfMemory();
       }
@@ -660,7 +660,7 @@ public class SystemFailureDUnitTest extends DistributedCacheTestCase {
     }
 
     public void afterCreate(EntryEvent event) {
-      getLogWriter().info("Invoking afterCreate on listener; name=" +
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Invoking afterCreate on listener; name=" +
                           event.getKey());
       forceLowMemory();
     }
@@ -676,7 +676,7 @@ public class SystemFailureDUnitTest extends DistributedCacheTestCase {
     }
     
     public void afterCreate(EntryEvent event) {
-      getLogWriter().info("Invoking afterCreate on listener; name=" +
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Invoking afterCreate on listener; name=" +
                           event.getKey());
       forceInternalError();
     }
@@ -692,7 +692,7 @@ public class SystemFailureDUnitTest extends DistributedCacheTestCase {
     }
     
     public void afterCreate(EntryEvent event) {
-      getLogWriter().info("Invoking afterCreate on listener; name=" +
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Invoking afterCreate on listener; name=" +
                           event.getKey());
       forceInternalError();
     }
@@ -704,7 +704,7 @@ public class SystemFailureDUnitTest extends DistributedCacheTestCase {
     }
     
     public void afterCreate(EntryEvent event) {
-      getLogWriter().info("Invoking afterCreate on listener; name=" +
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Invoking afterCreate on listener; name=" +
                           event.getKey());
       forceError();
     }
@@ -756,7 +756,7 @@ public class SystemFailureDUnitTest extends DistributedCacheTestCase {
   }
   
   protected void doCreateEntry(String name) {
-    LogWriter log = getLogWriter();
+    LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
     log.info(
         "<ExpectedException action=add>" +
         "dunit.RMIException"

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/TXReservationMgrJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/TXReservationMgrJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/TXReservationMgrJUnitTest.java
index 663ef61..b1d1e64 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/TXReservationMgrJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/TXReservationMgrJUnitTest.java
@@ -30,7 +30,7 @@ import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.CommitConflictException;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.distributed.DistributedSystem;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 @Category(IntegrationTest.class)
@@ -124,7 +124,7 @@ public class TXReservationMgrJUnitTest {
       threads[i].start();
     }
     for (int i=0; i < THREAD_COUNT; i++) {
-      DistributedTestCase.join(threads[i], 60 * 1000, null); // increased from 30 to 60 for parallel junit runs
+      ThreadUtils.join(threads[i], 60 * 1000); // increased from 30 to 60 for parallel junit runs
     }
     int invalidCount = 0;
     for (int i=0; i < KEY_COUNT; i++) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/TransactionsWithDeltaDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/TransactionsWithDeltaDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/TransactionsWithDeltaDUnitTest.java
index 06a8f09..fdb7ada 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/TransactionsWithDeltaDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/TransactionsWithDeltaDUnitTest.java
@@ -44,6 +44,7 @@ import com.gemstone.gemfire.internal.cache.execute.data.CustId;
 import com.gemstone.gemfire.internal.cache.execute.data.Order;
 import com.gemstone.gemfire.internal.cache.execute.data.OrderId;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -114,7 +115,7 @@ public class TransactionsWithDeltaDUnitTest extends CacheTestCase {
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port);
         ccf.setPoolSubscriptionEnabled(false);
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<Integer, String> crf = cCache
             .createClientRegionFactory(isEmpty ? ClientRegionShortcut.PROXY
@@ -330,7 +331,7 @@ public class TransactionsWithDeltaDUnitTest extends CacheTestCase {
         pr.put(cust1, new Customer(1, "name1"));
         Iterator<CustId> it = pr.keySet().iterator();
         while (it.hasNext()) {
-          getLogWriter().info("SWAP:iterator1:"+pr.get(it.next()));
+          LogWriterUtils.getLogWriter().info("SWAP:iterator1:"+pr.get(it.next()));
         }
         Customer c = pr.get(cust1);
         assertNotNull(c);
@@ -348,10 +349,10 @@ public class TransactionsWithDeltaDUnitTest extends CacheTestCase {
         mgr.begin();
         Customer c = pr.get(cust1);
         c.setName("updatedName");
-        getLogWriter().info("SWAP:doingPut");
+        LogWriterUtils.getLogWriter().info("SWAP:doingPut");
         pr.put(cust1, c);
-        getLogWriter().info("SWAP:getfromtx:"+pr.get(cust1));
-        getLogWriter().info("SWAP:doingCommit");
+        LogWriterUtils.getLogWriter().info("SWAP:getfromtx:"+pr.get(cust1));
+        LogWriterUtils.getLogWriter().info("SWAP:doingCommit");
         assertEquals("updatedName", pr.get(cust1).getName());
         TXStateProxy tx = mgr.internalSuspend();
         assertEquals("name1", pr.get(cust1).getName());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/control/RebalanceOperationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/control/RebalanceOperationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/control/RebalanceOperationDUnitTest.java
index 1ce05c7..6c2cc98 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/control/RebalanceOperationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/control/RebalanceOperationDUnitTest.java
@@ -73,11 +73,16 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegionDataStore;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserverAdapter;
 import com.gemstone.gemfire.internal.cache.partitioned.BucketCountLoadProbe;
 import com.gemstone.gemfire.internal.cache.partitioned.LoadProbe;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author dsmith
@@ -88,12 +93,9 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
 
   private static final long MAX_WAIT = 60;
   
-  
-  
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    invokeInEveryVM(new SerializableRunnable() {
+  protected final void postTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       public void run() {
         InternalResourceManager.setResourceObserver(null);
         System.clearProperty("gemfire.resource.manager.threads");
@@ -328,7 +330,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
   }
   
   public void enforceIp(final boolean simulate) {
-    invokeInEveryVM(new SerializableRunnable() {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       public void run() {
         Properties props = new Properties();
         props.setProperty(DistributionConfig.ENFORCE_UNIQUE_HOST_NAME, "true");
@@ -426,7 +428,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
 
     } finally {
       disconnectFromDS();
-      invokeInEveryVM(new SerializableRunnable() {
+      Invoke.invokeInEveryVM(new SerializableRunnable() {
         public void run() {
           disconnectFromDS(); 
         }
@@ -559,7 +561,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
     
     } finally {
       disconnectFromDS();
-      invokeInEveryVM(new SerializableRunnable() {
+      Invoke.invokeInEveryVM(new SerializableRunnable() {
         public void run() {
           //clear the redundancy zone setting
           disconnectFromDS(); 
@@ -610,7 +612,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
         try {
           barrier.await(MAX_WAIT, TimeUnit.SECONDS);
         } catch (Exception e) {
-          fail("failed waiting for barrier", e);
+          Assert.fail("failed waiting for barrier", e);
         }
         observerCalled = true;
       } else {
@@ -764,7 +766,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
     checkBucketCount(vm2, "region2", 6);
     } finally {
       disconnectFromDS();
-      invokeInEveryVM(new SerializableRunnable() {
+      Invoke.invokeInEveryVM(new SerializableRunnable() {
         public void run() {
           //clear the redundancy zone setting
           disconnectFromDS(); 
@@ -819,9 +821,9 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
           .simulate()
           .getResults(MAX_WAIT, TimeUnit.SECONDS);
       } catch (InterruptedException e) {
-        fail("Interrupted waiting on rebalance", e);
+        Assert.fail("Interrupted waiting on rebalance", e);
       } catch (TimeoutException e) {
-        fail("Timeout waiting on rebalance", e);
+        Assert.fail("Timeout waiting on rebalance", e);
       }
     } else {
       try {
@@ -831,9 +833,9 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
         .start()
         .getResults(MAX_WAIT, TimeUnit.SECONDS);
       } catch (InterruptedException e) {
-        fail("Interrupted waiting on rebalance", e);
+        Assert.fail("Interrupted waiting on rebalance", e);
       } catch (TimeoutException e) {
-        fail("Timeout waiting on rebalance", e);
+        Assert.fail("Timeout waiting on rebalance", e);
       }
     }
     assertEquals(Collections.emptySet(), manager.getRebalanceOperations());
@@ -1098,7 +1100,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
   }
   
   public void testRecoverRedundancyParallelAsyncEventQueueSimulation() throws NoSuchFieldException, SecurityException {
-    invokeInEveryVM(new SerializableRunnable() {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
 
       @Override
       public void run () {
@@ -1867,7 +1869,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
           assertEquals(12, details.getCreatedBucketCount());
           assertEquals(1,details.getActualRedundantCopies());
           assertEquals(0,details.getLowRedundancyBucketCount());
-          getLogWriter().info("details=" + details.getPartitionMemberInfo());
+          LogWriterUtils.getLogWriter().info("details=" + details.getPartitionMemberInfo());
           long afterSize = 0;
           for(PartitionMemberInfo memberDetails: details.getPartitionMemberInfo()) {
             assertEquals(8, memberDetails.getBucketCount());
@@ -2009,7 +2011,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
         assertEquals(12, details.getCreatedBucketCount());
         assertEquals(1,details.getActualRedundantCopies());
         assertEquals(0,details.getLowRedundancyBucketCount());
-        getLogWriter().info("details=" + details.getPartitionMemberInfo());
+        LogWriterUtils.getLogWriter().info("details=" + details.getPartitionMemberInfo());
         long afterSize = 0;
         for(PartitionMemberInfo memberDetails: details.getPartitionMemberInfo()) {
           assertEquals(8, memberDetails.getBucketCount());
@@ -2078,7 +2080,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
           assertEquals(12, details.getCreatedBucketCount());
           assertEquals(1,details.getActualRedundantCopies());
           assertEquals(0,details.getLowRedundancyBucketCount());
-          getLogWriter().info("details=" + details.getPartitionMemberInfo());
+          LogWriterUtils.getLogWriter().info("details=" + details.getPartitionMemberInfo());
           long afterSize = 0;
           for(PartitionMemberInfo memberDetails: details.getPartitionMemberInfo()) {
             assertEquals(6, memberDetails.getBucketCount());
@@ -2193,7 +2195,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
         assertEquals(12, details.getCreatedBucketCount());
         assertEquals(1,details.getActualRedundantCopies());
         assertEquals(0,details.getLowRedundancyBucketCount());
-        getLogWriter().info("details=" + details.getPartitionMemberInfo());
+        LogWriterUtils.getLogWriter().info("details=" + details.getPartitionMemberInfo());
         long afterSize = 0;
         for(PartitionMemberInfo memberDetails: details.getPartitionMemberInfo()) {
           assertEquals(8, memberDetails.getBucketCount());
@@ -2503,7 +2505,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
     }
     cacheWriter.release();
    
-    getLogWriter().info("starting wait for rebalance.  Will wait for " + MAX_WAIT + " seconds");
+    LogWriterUtils.getLogWriter().info("starting wait for rebalance.  Will wait for " + MAX_WAIT + " seconds");
     RebalanceResults results = rebalance.getResults(MAX_WAIT, TimeUnit.SECONDS);
     assertEquals(2, results.getTotalBucketCreatesCompleted());
     assertEquals(1, results.getTotalPrimaryTransfersCompleted());
@@ -2744,7 +2746,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
     // the rebalance op may think that the other member doesn't have buckets, then
     // ask it to create them and get a negative reply because it actually does
     // have the buckets, causing the test to fail
-    pause(10000);  
+    Wait.pause(10000);  
     
     //Try to rebalance again. This shouldn't do anything, because
     //we already recovered redundancy earlier.
@@ -3164,7 +3166,7 @@ public class RebalanceOperationDUnitTest extends CacheTestCase {
         Cache cache = getCache();
         final PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
         
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           
           @Override
           public boolean done() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
index 10a7df7..5f0a249 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowAsyncRollingOpLogJUnitTest.java
@@ -29,8 +29,8 @@ import com.gemstone.gemfire.*;
 import com.gemstone.gemfire.cache.*;
 import com.gemstone.gemfire.internal.cache.lru.LRUStatistics;
 import com.gemstone.gemfire.internal.cache.*;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -168,7 +168,7 @@ public class DiskRegionOverflowAsyncRollingOpLogJUnitTest extends
             return null;
           }
         };
-      DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 30 * 1000, 200, true);
     }
 
     //Now get 0-9999 entries

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
index df09e2c..d94cf73 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/DiskRegionOverflowSyncRollingOpLogJUnitTest.java
@@ -30,8 +30,8 @@ import static org.junit.Assert.*;
 import com.gemstone.gemfire.*;
 import com.gemstone.gemfire.cache.*;
 import com.gemstone.gemfire.internal.cache.lru.LRUStatistics;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -163,7 +163,7 @@ public class DiskRegionOverflowSyncRollingOpLogJUnitTest extends
             return null;
           }
         };
-      DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 30 * 1000, 200, true);
     }
     
     //Now get 0-9999 entries

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/MultiThreadedOplogPerJUnitPerformanceTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/MultiThreadedOplogPerJUnitPerformanceTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/MultiThreadedOplogPerJUnitPerformanceTest.java
index 719a9b2..5e2da16 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/MultiThreadedOplogPerJUnitPerformanceTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/diskPerf/MultiThreadedOplogPerJUnitPerformanceTest.java
@@ -34,7 +34,7 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.DiskStoreFactoryImpl;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 @Category(IntegrationTest.class)
@@ -142,7 +142,7 @@ public class MultiThreadedOplogPerJUnitPerformanceTest
     }
 
     for (int i = 0; i < numberOfThreads; i++) {
-      DistributedTestCase.join(threads[i], 30 * 1000, null);
+      ThreadUtils.join(threads[i], 30 * 1000);
     }
 
     long totalPuts = ((long)numberOfIterations * numberOfKeysPerThread * numberOfThreads);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/Bug51193DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/Bug51193DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/Bug51193DUnitTest.java
index 63fdf64..74f5e7f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/Bug51193DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/Bug51193DUnitTest.java
@@ -42,6 +42,7 @@ import com.gemstone.gemfire.internal.cache.tier.ClientHandShake;
 import com.gemstone.gemfire.internal.cache.tier.sockets.AcceptorImpl;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ServerConnection;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -69,7 +70,8 @@ public class Bug51193DUnitTest extends DistributedTestCase {
     
   }
 
-  public void tearDown2() {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
     server0.invoke(Bug51193DUnitTest.class, "closeCache");
     client0.invoke(Bug51193DUnitTest.class, "closeCache");
@@ -117,7 +119,7 @@ public class Bug51193DUnitTest extends DistributedTestCase {
   public static Integer createServerCache(Boolean createPR)
       throws Exception {
     Properties props = new Properties();
-    props.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+    props.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
 
     Bug51193DUnitTest test = new Bug51193DUnitTest("Bug51193DUnitTest");
     DistributedSystem ds = test.getSystem(props);


[36/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXDebugDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXDebugDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXDebugDUnitTest.java
index 58e76a6..db36aac 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXDebugDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXDebugDUnitTest.java
@@ -43,6 +43,8 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.execute.CustomerIDPartitionResolver;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -68,9 +70,8 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    invokeInEveryVM(new SerializableRunnable() {
+  protected final void postTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       public void run() {
         InternalResourceManager.setResourceObserver(null);
       }
@@ -126,7 +127,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
     assertNotNull(cache);
     Region pr = cache.createRegion(partitionedRegionName, attr.create());
     assertNotNull(pr);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + partitionedRegionName
             + " created Successfully :" + pr.toString());
   }
@@ -147,7 +148,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
     assertNotNull(cache);
     Region pr = cache.getRegion(partitionedRegionName);
     assertNotNull(pr);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Destroying Partitioned Region " + partitionedRegionName);
     pr.destroyRegion();
   }
@@ -164,7 +165,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
     // af.create());
     Region rr = cache.createRegion(replicatedRegionName, af.create());
     assertNotNull(rr);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Replicated Region " + replicatedRegionName + " created Successfully :"
             + rr.toString());
   }
@@ -195,7 +196,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.put");
+          LogWriterUtils.getLogWriter().info(" calling pr.put");
           pr1.put(dummy, "1_entry__" + i);
         }
 
@@ -208,7 +209,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.put in tx 1");
+          LogWriterUtils.getLogWriter().info(" calling pr.put in tx 1");
           pr1.put(dummy, "2_entry__" + i);
         }
         ctx.commit();
@@ -217,7 +218,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.get");
+          LogWriterUtils.getLogWriter().info(" calling pr.get");
           assertEquals("2_entry__" + i, pr1.get(dummy));
         }
 
@@ -226,7 +227,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.put in tx 2");
+          LogWriterUtils.getLogWriter().info(" calling pr.put in tx 2");
           pr1.put(dummy, "3_entry__" + i);
         }
         ctx.rollback();
@@ -235,7 +236,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.get");
+          LogWriterUtils.getLogWriter().info(" calling pr.get");
           assertEquals("2_entry__" + i, pr1.get(dummy));
         }
 
@@ -244,7 +245,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.destroy in tx 3");
+          LogWriterUtils.getLogWriter().info(" calling pr.destroy in tx 3");
           pr1.destroy(dummy);
         }
         ctx.commit();
@@ -253,7 +254,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.get");
+          LogWriterUtils.getLogWriter().info(" calling pr.get");
           assertEquals(null, pr1.get(dummy));
         }
 
@@ -263,7 +264,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
           public Object call() throws CacheException {
             PartitionedRegion pr1 = (PartitionedRegion) cache
                 .getRegion("pregion1");
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 " calling pr.getLocalSize " + pr1.getLocalSize());
             assertEquals(0, pr1.getLocalSize());
             return null;
@@ -302,7 +303,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 6; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling non-tx put");
+          LogWriterUtils.getLogWriter().info(" calling non-tx put");
           pr1.put(dummy, "1_entry__" + i);
           rr1.put(dummy, "1_entry__" + i);
         }
@@ -314,19 +315,19 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               " calling pr1.destroy in tx key=" + dummy);
           pr1.destroy(dummy);
-          getLogWriter().info(" calling rr1.destroy in tx key=" + i);
+          LogWriterUtils.getLogWriter().info(" calling rr1.destroy in tx key=" + i);
           rr1.destroy(dummy);
         }
         for (int i = 4; i <= 6; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               " calling pr1.invalidate in tx key=" + dummy);
           pr1.invalidate(dummy);
-          getLogWriter().info(" calling rr1.invalidate in tx key=" + i);
+          LogWriterUtils.getLogWriter().info(" calling rr1.invalidate in tx key=" + i);
           rr1.invalidate(dummy);
         }
         ctx.commit();
@@ -335,9 +336,9 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 6; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr1.get");
+          LogWriterUtils.getLogWriter().info(" calling pr1.get");
           assertEquals(null, pr1.get(dummy));
-          getLogWriter().info(" calling rr1.get");
+          LogWriterUtils.getLogWriter().info(" calling rr1.get");
           assertEquals(null, rr1.get(i));
         }
         return null;
@@ -352,10 +353,10 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
       public Object call() throws CacheException {
         PartitionedRegion pr1 = (PartitionedRegion) cache.getRegion("pregion1");
         Region rr1 = cache.getRegion("rregion1");
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " calling pr1.getLocalSize " + pr1.getLocalSize());
         assertEquals(2, pr1.getLocalSize());
-        getLogWriter().info(" calling rr1.size " + rr1.size());
+        LogWriterUtils.getLogWriter().info(" calling rr1.size " + rr1.size());
         assertEquals(3, rr1.size());
         return null;
       }
@@ -390,9 +391,9 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.put non-tx PR1_entry__" + i);
+          LogWriterUtils.getLogWriter().info(" calling pr.put non-tx PR1_entry__" + i);
           pr1.put(dummy, "PR1_entry__" + i);
-          getLogWriter().info(" calling rr.put non-tx RR1_entry__" + i);
+          LogWriterUtils.getLogWriter().info(" calling rr.put non-tx RR1_entry__" + i);
           rr1.put(new Integer(i), "RR1_entry__" + i);
         }
 
@@ -405,9 +406,9 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.put in tx PR2_entry__" + i);
+          LogWriterUtils.getLogWriter().info(" calling pr.put in tx PR2_entry__" + i);
           pr1.put(dummy, "PR2_entry__" + i);
-          getLogWriter().info(" calling rr.put in tx RR2_entry__" + i);
+          LogWriterUtils.getLogWriter().info(" calling rr.put in tx RR2_entry__" + i);
           rr1.put(new Integer(i), "RR2_entry__" + i);
         }
         ctx.commit();
@@ -416,9 +417,9 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.get PR2_entry__" + i);
+          LogWriterUtils.getLogWriter().info(" calling pr.get PR2_entry__" + i);
           assertEquals("PR2_entry__" + i, pr1.get(dummy));
-          getLogWriter().info(" calling rr.get RR2_entry__" + i);
+          LogWriterUtils.getLogWriter().info(" calling rr.get RR2_entry__" + i);
           assertEquals("RR2_entry__" + i, rr1.get(new Integer(i)));
         }
         return null;
@@ -432,12 +433,12 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws CacheException {
         PartitionedRegion pr1 = (PartitionedRegion) cache.getRegion("pregion1");
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " calling pr.getLocalSize " + pr1.getLocalSize());
         assertEquals(2, pr1.getLocalSize());
 
         Region rr1 = cache.getRegion("rregion1");
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info(" calling rr.getLocalSize " + rr1.size());
         assertEquals(3, rr1.size());
         return null;
@@ -474,7 +475,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.put in tx 1");
+          LogWriterUtils.getLogWriter().info(" calling pr.put in tx 1");
           pr1.put(dummy, "2_entry__" + i);
         }
         ctx.commit();
@@ -483,7 +484,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.get " + pr1.get(dummy));
+          LogWriterUtils.getLogWriter().info(" calling pr.get " + pr1.get(dummy));
           assertEquals("2_entry__" + i, pr1.get(dummy));
         }
         return null;
@@ -497,7 +498,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
       public Object call() throws CacheException {
         PartitionedRegion pr1 = (PartitionedRegion) cache.getRegion("pregion1");
         CacheTransactionManager ctx = cache.getCacheTransactionManager();
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " calling pr.getLocalSize " + pr1.getLocalSize());
         assertEquals(2, pr1.getLocalSize());
         return null;
@@ -525,7 +526,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               " calling pr.put in tx for rollback no_entry__" + i);
           pr1.put(dummy, "no_entry__" + i);
         }
@@ -535,7 +536,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               " calling pr.get after rollback " + pr1.get(dummy));
           assertEquals("2_entry__" + i, pr1.get(dummy));
         }
@@ -569,10 +570,10 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.create in tx 1");
+          LogWriterUtils.getLogWriter().info(" calling pr.create in tx 1");
           pr1.create(dummy, "2_entry__" + i);
           
-          getLogWriter().info(" calling rr.create " + "2_entry__" + i);
+          LogWriterUtils.getLogWriter().info(" calling rr.create " + "2_entry__" + i);
           rr1.create(new Integer(i), "2_entry__" + i);
         }
         ctx.commit();
@@ -581,10 +582,10 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.get " + pr1.get(dummy));
+          LogWriterUtils.getLogWriter().info(" calling pr.get " + pr1.get(dummy));
           assertEquals("2_entry__" + i, pr1.get(dummy));
           
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               " calling rr.get " + rr1.get(new Integer(i)));
           assertEquals("2_entry__" + i, rr1.get(new Integer(i)));
         }
@@ -599,12 +600,12 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws CacheException {
         Region rr1 = cache.getRegion("rregion1");
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info(" calling rr.getLocalSize " + rr1.size());
         assertEquals(3, rr1.size());
         
         PartitionedRegion pr1 = (PartitionedRegion) cache.getRegion("pregion1");
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " calling pr.getLocalSize " + pr1.getLocalSize());
         assertEquals(2, pr1.getLocalSize());
         return null;
@@ -649,10 +650,10 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.get " + pr1.get(dummy));
+          LogWriterUtils.getLogWriter().info(" calling pr.get " + pr1.get(dummy));
           assertEquals("2_entry__" + i, pr1.get(dummy));
           
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               " calling rr.get " + rr1.get(new Integer(i)));
           assertEquals("2_entry__" + i, rr1.get(new Integer(i)));
         }
@@ -667,12 +668,12 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws CacheException {
         Region rr1 = cache.getRegion("rregion1");
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info(" calling rr.getLocalSize " + rr1.size());
         assertEquals(3, rr1.size());
         
         PartitionedRegion pr1 = (PartitionedRegion) cache.getRegion("pregion1");
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " calling pr.getLocalSize " + pr1.getLocalSize());
         assertEquals(2, pr1.getLocalSize());
         return null;
@@ -713,7 +714,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.get " + pr1.get(dummy));
+          LogWriterUtils.getLogWriter().info(" calling pr.get " + pr1.get(dummy));
           assertEquals("2_entry__" + i, pr1.get(dummy));
           
         }
@@ -729,7 +730,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws CacheException {
         PartitionedRegion pr1 = (PartitionedRegion) cache.getRegion("pregion1");
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " calling pr.getLocalSize " + pr1.getLocalSize());
         assertEquals(2, pr1.getLocalSize());
         return null;
@@ -780,7 +781,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
 
         // verify the data
         for (int i = 1; i <= 3; i++) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               " calling rr.get " + rr1.get(new Integer(i)));
           assertEquals(null, rr1.get(new Integer(i)));
         }
@@ -799,7 +800,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws CacheException {
         Region rr1 = cache.getRegion("rregion1");
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info(" calling rr.getLocalSize " + rr1.size());
         assertEquals(0, rr1.size());
         return null;
@@ -840,7 +841,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         for (int i = 1; i <= 3; i++) {
           DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(
               i);
-          getLogWriter().info(" calling pr.get " + pr1.get(dummy));
+          LogWriterUtils.getLogWriter().info(" calling pr.get " + pr1.get(dummy));
           assertEquals(null, pr1.get(dummy));
         }
         return null;
@@ -854,7 +855,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws CacheException {
         PartitionedRegion pr1 = (PartitionedRegion) cache.getRegion("pregion1");
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " calling pr.getLocalSize " + pr1.getLocalSize());
         assertEquals(0, pr1.getLocalSize());
         return null;
@@ -885,14 +886,14 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         ctx.setDistributed(true);
         ctx.begin();
         for (int i = 1; i <= 3; i++) {
-          getLogWriter().info(" calling rr.put " + "2_entry__" + i);
+          LogWriterUtils.getLogWriter().info(" calling rr.put " + "2_entry__" + i);
           rr1.put(new Integer(i), "2_entry__" + i);
         }
         ctx.commit();
 
         // verify the data
         for (int i = 1; i <= 3; i++) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               " calling rr.get " + rr1.get(new Integer(i)));
           assertEquals("2_entry__" + i, rr1.get(new Integer(i)));
         }
@@ -911,7 +912,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws CacheException {
         Region rr1 = cache.getRegion("rregion1");
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info(" calling rr.getLocalSize " + rr1.size());
         assertEquals(3, rr1.size());
         return null;
@@ -929,7 +930,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
         ctx.setDistributed(true);
         ctx.begin();
         for (int i = 1; i <= 3; i++) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               " calling rr.put for rollback no_entry__" + i);
           rr1.put(new Integer(i), "no_entry__" + i);
         }
@@ -938,7 +939,7 @@ public class DistTXDebugDUnitTest extends CacheTestCase {
 
         // verify the data
         for (int i = 1; i <= 3; i++) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               " calling rr.get after rollback "
                   + rr1.get(new Integer(i)));
           assertEquals("2_entry__" + i, rr1.get(new Integer(i)));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXPersistentDebugDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXPersistentDebugDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXPersistentDebugDUnitTest.java
index 5a7375a..2cfeae7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXPersistentDebugDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistTXPersistentDebugDUnitTest.java
@@ -27,6 +27,8 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.internal.cache.TXManagerImpl;
 import com.gemstone.gemfire.internal.cache.execute.data.CustId;
 import com.gemstone.gemfire.internal.cache.execute.data.Customer;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 
 public class DistTXPersistentDebugDUnitTest extends DistTXDebugDUnitTest {
@@ -38,7 +40,7 @@ public class DistTXPersistentDebugDUnitTest extends DistTXDebugDUnitTest {
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    this.invokeInEveryVM(new SerializableCallable() {
+    Invoke.invokeInEveryVM(new SerializableCallable() {
       @Override
       public Object call() throws Exception {
         //System.setProperty("gemfire.ALLOW_PERSISTENT_TRANSACTIONS", "true");
@@ -49,8 +51,8 @@ public class DistTXPersistentDebugDUnitTest extends DistTXDebugDUnitTest {
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    this.invokeInEveryVM(new SerializableCallable() {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableCallable() {
       @Override
       public Object call() throws Exception {
         //System.setProperty("gemfire.ALLOW_PERSISTENT_TRANSACTIONS", "false");
@@ -58,7 +60,6 @@ public class DistTXPersistentDebugDUnitTest extends DistTXDebugDUnitTest {
         return null;
       }
     }); 
-    super.tearDown2();
   }
   
   protected void createPesistentPR(Object[] attributes) {
@@ -106,21 +107,21 @@ public class DistTXPersistentDebugDUnitTest extends DistTXDebugDUnitTest {
       public Object call() throws Exception {
         CacheTransactionManager mgr = cache.getCacheTransactionManager();
         mgr.setDistributed(true);
-        getLogWriter().fine("SJ:TX BEGIN");
+        LogWriterUtils.getLogWriter().fine("SJ:TX BEGIN");
         mgr.begin();
         Region<CustId, Customer> prRegion = cache.getRegion(regionName);
 
         CustId custIdOne = new CustId(1);
         Customer customerOne = new Customer("name1", "addr1");
-        getLogWriter().fine("SJ:TX PUT 1");
+        LogWriterUtils.getLogWriter().fine("SJ:TX PUT 1");
         prRegion.put(custIdOne, customerOne);
 
         CustId custIdTwo = new CustId(2);
         Customer customerTwo = new Customer("name2", "addr2");
-        getLogWriter().fine("SJ:TX PUT 2");
+        LogWriterUtils.getLogWriter().fine("SJ:TX PUT 2");
         prRegion.put(custIdTwo, customerTwo);
 
-        getLogWriter().fine("SJ:TX COMMIT");
+        LogWriterUtils.getLogWriter().fine("SJ:TX COMMIT");
         mgr.commit();
         return null;
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistributedTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistributedTransactionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistributedTransactionDUnitTest.java
index 61c31d4..2eaef9c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistributedTransactionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/disttx/DistributedTransactionDUnitTest.java
@@ -57,7 +57,10 @@ import com.gemstone.gemfire.internal.cache.execute.data.CustId;
 import com.gemstone.gemfire.internal.cache.execute.data.Customer;
 import com.gemstone.gemfire.internal.cache.execute.data.Order;
 import com.gemstone.gemfire.internal.cache.execute.data.OrderId;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -79,7 +82,7 @@ public class DistributedTransactionDUnitTest extends CacheTestCase {
   @Override
   public void setUp() throws Exception{
     super.setUp();
-    this.invokeInEveryVM(new SerializableCallable() {
+    Invoke.invokeInEveryVM(new SerializableCallable() {
       @Override
       public Object call() throws Exception {
         System.setProperty("gemfire.sync-commits", "true");
@@ -95,7 +98,7 @@ public class DistributedTransactionDUnitTest extends CacheTestCase {
 //      }
 //    });
 
-    this.invokeInEveryVM(new SerializableCallable() {
+    Invoke.invokeInEveryVM(new SerializableCallable() {
       @Override
       public Object call() throws Exception {
         //System.setProperty("gemfire.ALLOW_PERSISTENT_TRANSACTIONS", "true");
@@ -105,15 +108,16 @@ public class DistributedTransactionDUnitTest extends CacheTestCase {
     }); 
   }
   
-  public void tearDown2() throws Exception {
-    this.invokeInEveryVM(new SerializableCallable() {
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableCallable() {
       @Override
       public Object call() throws Exception {
         System.setProperty("gemfire.sync-commits", "false");
         return null;
       }
     });
-    this.invokeInEveryVM(new SerializableCallable() {
+    Invoke.invokeInEveryVM(new SerializableCallable() {
       @Override
       public Object call() throws Exception {
         //System.setProperty("gemfire.ALLOW_PERSISTENT_TRANSACTIONS", "false");
@@ -121,8 +125,6 @@ public class DistributedTransactionDUnitTest extends CacheTestCase {
         return null;
       }
     }); 
-    
-    super.tearDown2();
   }
   
   public DistributedTransactionDUnitTest(String name) {
@@ -1950,7 +1952,7 @@ public class DistributedTransactionDUnitTest extends CacheTestCase {
         public Exception ex = new Exception();
         
         public void run() {
-          getLogWriter().info("Inside TxConflictRunnable.TxThread after aquiring locks");
+          LogWriterUtils.getLogWriter().info("Inside TxConflictRunnable.TxThread after aquiring locks");
           CacheTransactionManager mgr = getGemfireCache().getTxManager();
           mgr.setDistributed(true);
           mgr.begin();
@@ -1963,10 +1965,10 @@ public class DistributedTransactionDUnitTest extends CacheTestCase {
             mgr.commit();
           } catch (CommitConflictException ce) {
             gotConflict = true;
-            getLogWriter().info("Received exception ", ce);
+            LogWriterUtils.getLogWriter().info("Received exception ", ce);
           } catch (Exception e) {
             gotOtherException = true;
-            getLogWriter().info("Received exception ", e);
+            LogWriterUtils.getLogWriter().info("Received exception ", e);
             ex.initCause(e);
           }
         }
@@ -1984,7 +1986,7 @@ public class DistributedTransactionDUnitTest extends CacheTestCase {
       assertTrue("This test should fail with CommitConflictException",
           txThread.gotConflict);
       if (txThread.gotOtherException) {
-        fail("Received unexpected exception ", txThread.ex);
+        Assert.fail("Received unexpected exception ", txThread.ex);
       }
     }
   }
@@ -2085,7 +2087,7 @@ public class DistributedTransactionDUnitTest extends CacheTestCase {
         public Exception ex = new Exception();
 
         public void run() {
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
               .info("Inside TxRunnable.TxThread after aquiring locks");
           CacheTransactionManager mgr = getGemfireCache().getTxManager();
           mgr.setDistributed(true);
@@ -2099,7 +2101,7 @@ public class DistributedTransactionDUnitTest extends CacheTestCase {
             mgr.commit();
           } catch (Exception e) {
             gotException = true;
-            getLogWriter().info("Received exception ", e);
+            LogWriterUtils.getLogWriter().info("Received exception ", e);
             ex.initCause(e);
           }
         }
@@ -2114,7 +2116,7 @@ public class DistributedTransactionDUnitTest extends CacheTestCase {
         e.printStackTrace();
       }
       if (txThread.gotException) {
-        fail("Received exception ", txThread.ex);
+        Assert.fail("Received exception ", txThread.ex);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/BackwardCompatibilitySerializationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/BackwardCompatibilitySerializationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/BackwardCompatibilitySerializationDUnitTest.java
index 35269f7..f1e5dda 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/BackwardCompatibilitySerializationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/BackwardCompatibilitySerializationDUnitTest.java
@@ -70,7 +70,7 @@ public class BackwardCompatibilitySerializationDUnitTest extends CacheTestCase {
   }
 
   @After
-  public void tearDown2() {
+  protected final void preTearDownCacheTestCase() {
     resetFlags();
     // reset the class mapped to the dsfid
     DSFIDFactory.registerDSFID(DataSerializableFixedID.PUTALL_VERSIONS_LIST,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/ClassNotFoundExceptionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/ClassNotFoundExceptionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/ClassNotFoundExceptionDUnitTest.java
index 185ae2d..8ab0b2c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/ClassNotFoundExceptionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/ClassNotFoundExceptionDUnitTest.java
@@ -39,6 +39,8 @@ import com.gemstone.gemfire.pdx.PdxReader;
 import com.gemstone.gemfire.pdx.PdxSerializable;
 import com.gemstone.gemfire.pdx.PdxWriter;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -65,7 +67,7 @@ public class ClassNotFoundExceptionDUnitTest extends CacheTestCase {
   }
   
   public void doTest(final ObjectFactory objectFactory) throws InterruptedException {
-    addExpectedException("SerializationException");
+    IgnoredException.addIgnoredException("SerializationException");
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -189,7 +191,7 @@ public class ClassNotFoundExceptionDUnitTest extends CacheTestCase {
       public Object call() throws Exception {
         disconnectFromDS();
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(vm.getHost()), port);
+        cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port);
         cf.setPoolSubscriptionEnabled(true);
         ClientCache cache = getClientCache(cf);
         cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/JSSESocketJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/JSSESocketJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/JSSESocketJUnitTest.java
index 54275dc..c8927b6 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/JSSESocketJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/JSSESocketJUnitTest.java
@@ -48,7 +48,7 @@ import org.junit.rules.TestName;
 
 import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.util.test.TestUtil;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -122,7 +122,7 @@ public class JSSESocketJUnitTest {
     oos.writeObject( expected );
     oos.flush();
     
-    DistributedTestCase.join(serverThread, 30 * 1000, null);
+    ThreadUtils.join(serverThread, 30 * 1000);
     
     client.close();
     if ( expected.equals( receiver[0] ) ) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/JarDeployerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/JarDeployerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/JarDeployerDUnitTest.java
index 99a3053..5895a7d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/JarDeployerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/JarDeployerDUnitTest.java
@@ -45,6 +45,7 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -65,7 +66,7 @@ public class JarDeployerDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     JarDeployer jarDeployer = new JarDeployer();
     for (JarClassLoader jarClassLoader : jarDeployer.findJarClassLoaders()) {
       if (jarClassLoader.getJarName().startsWith("JarDeployerDUnit")) {
@@ -79,7 +80,6 @@ public class JarDeployerDUnitTest extends CacheTestCase {
     }
     disconnectAllFromDS();
     deleteSavedJarFiles();
-    super.tearDown2();
   }
   
   @Test
@@ -194,9 +194,9 @@ public class JarDeployerDUnitTest extends CacheTestCase {
             fail("Should not have been able to obtain exclusive lock on file:" + jarFile1.getAbsolutePath());
           }
         } catch (FileNotFoundException fnfex) {
-          fail("JAR file not found where expected", fnfex);
+          Assert.fail("JAR file not found where expected", fnfex);
         } catch (IOException ioex) {
-          fail("IOException when trying to obtain exclusive lock", ioex);
+          Assert.fail("IOException when trying to obtain exclusive lock", ioex);
         } finally {
           if (outStream != null) {
             try {
@@ -628,7 +628,7 @@ public class JarDeployerDUnitTest extends CacheTestCase {
         randomAccessFile.write("GARBAGE".getBytes(), 0, 7);
         randomAccessFile.close();
         } catch (IOException ioex) {
-          fail("Error trying to create garbage file for test", ioex);
+          Assert.fail("Error trying to create garbage file for test", ioex);
         }
         
         getSystem();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/PdxDeleteFieldDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/PdxDeleteFieldDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/PdxDeleteFieldDUnitTest.java
index d805a38..f7b6529 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/PdxDeleteFieldDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/PdxDeleteFieldDUnitTest.java
@@ -39,6 +39,7 @@ import com.gemstone.gemfire.pdx.PdxWriter;
 import com.gemstone.gemfire.pdx.internal.PdxType;
 import com.gemstone.gemfire.pdx.internal.PdxUnreadData;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -171,16 +172,15 @@ public class PdxDeleteFieldDUnitTest  extends CacheTestCase{
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  public void preTearDownCacheTestCase() throws Exception {
     for (String path : this.filesToBeDeleted) {
       try {
         FileUtil.delete(new File(path));
       } catch (IOException e) {
-        getLogWriter().error("Unable to delete file", e);
+        LogWriterUtils.getLogWriter().error("Unable to delete file", e);
       }
     }
     this.filesToBeDeleted.clear();
-    super.tearDown2();
   }
   
   public static class PdxValue implements PdxSerializable {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/PdxRenameDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/PdxRenameDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/PdxRenameDUnitTest.java
index 6303719..f10d953 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/PdxRenameDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/PdxRenameDUnitTest.java
@@ -40,6 +40,7 @@ import com.gemstone.gemfire.pdx.internal.EnumInfo;
 import com.gemstone.gemfire.pdx.internal.PdxInstanceImpl;
 import com.gemstone.gemfire.pdx.internal.PdxType;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -178,16 +179,15 @@ public class PdxRenameDUnitTest  extends CacheTestCase{
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  public void preTearDownCacheTestCase() throws Exception {
     for (String path : this.filesToBeDeleted) {
       try {
         FileUtil.delete(new File(path));
       } catch (IOException e) {
-        getLogWriter().error("Unable to delete file", e);
+        LogWriterUtils.getLogWriter().error("Unable to delete file", e);
       }
     }
     this.filesToBeDeleted.clear();
-    super.tearDown2();
   }
   
   enum Day {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/SocketCloserJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/SocketCloserJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/SocketCloserJUnitTest.java
index 7335528..83f4a08 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/SocketCloserJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/SocketCloserJUnitTest.java
@@ -30,8 +30,8 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.UnitTest;
 
 /**
@@ -121,7 +121,7 @@ public class SocketCloserJUnitTest {
           return "expected " + 2*maxThreads + " waiters but found only " + waitingToClose.get();
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 5000, 10, true);
+      Wait.waitForCriterion(wc, 5000, 10, true);
     }
     // now count down the latch that allows the sockets to close
     cdl.countDown();
@@ -141,7 +141,7 @@ public class SocketCloserJUnitTest {
           return "one or more sockets did not close";
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 5000, 10, true);
+      Wait.waitForCriterion(wc, 5000, 10, true);
     }
   }
   
@@ -162,7 +162,7 @@ public class SocketCloserJUnitTest {
     Socket s = createClosableSocket();
     s.close();
     this.socketCloser.asyncClose(s, "A", r);
-    DistributedTestCase.pause(10);
+    Wait.pause(10);
     assertEquals(false, runnableCalled.get());
   }
   
@@ -190,6 +190,6 @@ public class SocketCloserJUnitTest {
         return "runnable was not called or socket was not closed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 5000, 10, true);
+    Wait.waitForCriterion(wc, 5000, 10, true);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/BackupDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/BackupDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/BackupDUnitTest.java
index 6b0493e..a91fb8e 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/BackupDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/BackupDUnitTest.java
@@ -50,9 +50,12 @@ import com.gemstone.gemfire.distributed.internal.DistributionMessageObserver;
 import com.gemstone.gemfire.distributed.internal.ReplyMessage;
 import com.gemstone.gemfire.internal.FileUtil;
 import com.gemstone.gemfire.internal.cache.partitioned.PersistentPartitionedRegionTestBase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DUnitEnv;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -72,13 +75,12 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     StringBuilder failures = new StringBuilder();
     FileUtil.delete(getBackupDir(), failures);
     if (failures.length() > 0) {
-      getLogWriter().error(failures.toString());
+      LogWriterUtils.getLogWriter().error(failures.toString());
     }
-    super.tearDown2();
   }
   
   public void testBackupPR() throws Throwable {
@@ -87,9 +89,9 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
     
     long lm0 = setBackupFiles(vm0);
@@ -119,7 +121,7 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     closeCache(vm1);
     
     //Destroy the current data
-    invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
       public void run() {
         try {
           cleanDiskDirs();
@@ -131,9 +133,9 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     
     restoreBackup(2);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     AsyncInvocation async0 = createPersistentRegionAsync(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     AsyncInvocation async1 = createPersistentRegionAsync(vm1);
     
     async0.getResult(MAX_WAIT);
@@ -154,9 +156,9 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
     
     createData(vm0, 0, 5, "A", "region1");
@@ -176,7 +178,7 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     closeCache(vm1);
     
     //Destroy the current data
-    invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
       public void run() {
         try {
           cleanDiskDirs();
@@ -188,9 +190,9 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     
     restoreBackup(2);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     AsyncInvocation async0 = createPersistentRegionAsync(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     AsyncInvocation async1 = createPersistentRegionAsync(vm1);
     
     async0.getResult(MAX_WAIT);
@@ -219,14 +221,14 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     final VM vm2 = host.getVM(2);
     
       
-      getLogWriter().info("Creating region in VM0");
+      LogWriterUtils.getLogWriter().info("Creating region in VM0");
       createPersistentRegion(vm0);
 
       //create a bucket on vm0
       createData(vm0, 0, 1, "A", "region1");
 
       //create the pr on vm1, which won't have any buckets
-      getLogWriter().info("Creating region in VM1");
+      LogWriterUtils.getLogWriter().info("Creating region in VM1");
       createPersistentRegion(vm1);
 
 
@@ -266,7 +268,7 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
       closeCache(vm1);
 
       //Destroy the current data
-      invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
+      Invoke.invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
         public void run() {
           try {
             cleanDiskDirs();
@@ -278,9 +280,9 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
 
       restoreBackup(2);
 
-      getLogWriter().info("Creating region in VM0");
+      LogWriterUtils.getLogWriter().info("Creating region in VM0");
       AsyncInvocation async0 = createPersistentRegionAsync(vm0);
-      getLogWriter().info("Creating region in VM1");
+      LogWriterUtils.getLogWriter().info("Creating region in VM1");
       AsyncInvocation async1 = createPersistentRegionAsync(vm1);
 
       async0.getResult(MAX_WAIT);
@@ -379,14 +381,14 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     });
     try {
       
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
 
     //create twos bucket on vm0
     createData(vm0, 0, 2, "A", "region1");
 
     //create the pr on vm1, which won't have any buckets
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
 
     createPersistentRegion(vm1);
     
@@ -402,7 +404,7 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
           results = op.getResults();
           assertEquals(1, results.getTotalBucketTransfersCompleted());
         } catch (Exception e) {
-          fail("interupted", e);
+          Assert.fail("interupted", e);
         }
       }
     });
@@ -416,7 +418,7 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     closeCache(vm1);
 
     //Destroy the current data
-    invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
       public void run() {
         try {
           cleanDiskDirs();
@@ -428,9 +430,9 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
 
     restoreBackup(2);
 
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     AsyncInvocation async0 = createPersistentRegionAsync(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     AsyncInvocation async1 = createPersistentRegionAsync(vm1);
 
     async0.getResult(MAX_WAIT);
@@ -459,9 +461,9 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createOverflowRegion(vm1);
     
     createData(vm0, 0, 5, "A", "region1");
@@ -484,11 +486,11 @@ public class BackupDUnitTest extends PersistentPartitionedRegionTestBase {
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
-    getLogWriter().info("Creating region in VM2");
+    LogWriterUtils.getLogWriter().info("Creating region in VM2");
     createPersistentRegion(vm2);
     
     createData(vm0, 0, 5, "A", "region1");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug33359DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug33359DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug33359DUnitTest.java
index aa2a9b2..d6ff35c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug33359DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug33359DUnitTest.java
@@ -36,6 +36,7 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -67,16 +68,16 @@ public class Bug33359DUnitTest extends DistributedTestCase {
       VM vm1 = host.getVM(1);
       vm0.invoke(Bug33359DUnitTest.class, "createCacheVM0");
       vm1.invoke(Bug33359DUnitTest.class, "createCacheVM1");
-      getLogWriter().fine("Cache created in successfully");
+      LogWriterUtils.getLogWriter().fine("Cache created in successfully");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(Bug33359DUnitTest.class, "closeCache");
-        vm1.invoke(Bug33359DUnitTest.class, "closeCache");
-        
+    @Override
+    protected final void preTearDown(){
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(Bug33359DUnitTest.class, "closeCache");
+      vm1.invoke(Bug33359DUnitTest.class, "closeCache");
     }
     
     public static void createCacheVM0(){
@@ -140,7 +141,7 @@ public class Bug33359DUnitTest extends DistributedTestCase {
                 for(int i=0; i<10; i++){
                     region.put(new Integer(i), Integer.toString(i));
                 }                
-                getLogWriter().fine("Did all puts successfully");
+                LogWriterUtils.getLogWriter().fine("Did all puts successfully");
             }
         }
         );        
@@ -148,7 +149,7 @@ public class Bug33359DUnitTest extends DistributedTestCase {
         vm0.invoke(new CacheSerializableRunnable("perform clear on region"){
             public void run2() throws CacheException {
                 region.clear();
-                getLogWriter().fine("region is cleared");
+                LogWriterUtils.getLogWriter().fine("region is cleared");
             }
         }
         );        

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug33726DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug33726DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug33726DUnitTest.java
index 08cb340..5e7903e 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug33726DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug33726DUnitTest.java
@@ -44,7 +44,8 @@ public class Bug33726DUnitTest extends DistributedTestCase {
 	super(name);
   }
 
-  public void tearDown2() {
+  @Override
+  protected final void preTearDown() throws Exception {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug37241DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug37241DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug37241DUnitTest.java
index 4ad5133..3726ae8 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug37241DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug37241DUnitTest.java
@@ -31,6 +31,7 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.ReplyException;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /*
@@ -155,10 +156,10 @@ public class Bug37241DUnitTest extends DistributedTestCase
                              .getName());
 
     //added for not to log exepected IllegalStateExcepion.
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "<ExpectedException action=add>" + expectedReplyException
             + "</ExpectedException>");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
             "<ExpectedException action=add>" + expectedException
             + "</ExpectedException>");
     cache.getLogger().info(
@@ -199,10 +200,10 @@ public class Bug37241DUnitTest extends DistributedTestCase
     cache.getLogger().info(
         "<ExpectedException action=remove>" + expectedReplyException
         + "</ExpectedException>");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "<ExpectedException action=remove>" + expectedException
         + "</ExpectedException>");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "<ExpectedException action=remove>" + expectedReplyException
         + "</ExpectedException>");
   }  
@@ -222,12 +223,11 @@ public class Bug37241DUnitTest extends DistributedTestCase
     }
   }
 
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     server1.invoke(Bug37241DUnitTest.class, "closeCache");
     server2.invoke(Bug37241DUnitTest.class, "closeCache");
   }
-
 }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug37377DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug37377DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug37377DUnitTest.java
index f7a2911..766ecd4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug37377DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug37377DUnitTest.java
@@ -34,10 +34,11 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.lru.EnableLRU;
 import com.gemstone.gemfire.internal.util.concurrent.CustomEntryConcurrentHashMap.HashEntry;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Bug37377 DUNIT Test: The Clear operation during a GII in progress can leave a
@@ -94,13 +95,12 @@ public class Bug37377DUnitTest extends CacheTestCase
 
   }
 
-  public void tearDown2() throws Exception
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception
   {
 
     vm1.invoke(destroyRegion());
     vm0.invoke(destroyRegion());
-
-    super.tearDown2();
   }
 
   /**
@@ -313,8 +313,8 @@ public class Bug37377DUnitTest extends CacheTestCase
     vm0.invoke(createCacheForVM0());
     vm0.invoke(putSomeEntries());
     AsyncInvocation as1 = vm1.invokeAsync(createCacheForVM1());
-    pause(10000);
-    DistributedTestCase.join(as1, 30 * 1000, getLogWriter());
+    Wait.pause(10000);
+    ThreadUtils.join(as1, 30 * 1000);
     vm0.invoke(closeCacheForVM(0));
     vm1.invoke(closeCacheForVM(1));
     vm1.invoke(createCacheForVM1());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug39079DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug39079DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug39079DUnitTest.java
index cc07a3a..532b806 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug39079DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug39079DUnitTest.java
@@ -43,7 +43,9 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.persistence.UninterruptibleFileChannel;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -175,12 +177,9 @@ public class Bug39079DUnitTest extends CacheTestCase {
     };
     return (CacheSerializableRunnable)createCache;
   }
-  
-  
 
- 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS();
 
     vm0.invoke(Bug39079DUnitTest.class, "ignorePreAllocate", new Object[] { Boolean.FALSE });
@@ -307,7 +306,7 @@ public class Bug39079DUnitTest extends CacheTestCase {
    Integer port = (Integer)vm0.invoke(Bug39079DUnitTest.class, "createServerCache");
    //create cache client
    vm1.invoke(Bug39079DUnitTest.class, "createClientCache",
-       new Object[] { getServerHostName(vm0.getHost()), port});
+       new Object[] { NetworkUtils.getServerHostName(vm0.getHost()), port});
    
    // validate 
    vm0.invoke(Bug39079DUnitTest.class, "validateRuningBridgeServerList");
@@ -370,7 +369,7 @@ public class Bug39079DUnitTest extends CacheTestCase {
       }catch(DiskAccessException dae) {
         //OK expected
       }catch (IOException e) {
-        fail("test failed due to ", e);
+        Assert.fail("test failed due to ", e);
       }
       
       ((LocalRegion) region).getDiskStore().waitForClose();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug40299DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug40299DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug40299DUnitTest.java
index e2d7eb7..60f9bd5 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug40299DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug40299DUnitTest.java
@@ -41,6 +41,7 @@ import com.gemstone.gemfire.internal.cache.SearchLoadAndWriteProcessor;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -83,10 +84,10 @@ public class Bug40299DUnitTest extends CacheTestCase
     vm0 = host.getVM(0);
   }
 
-  public void tearDown2() throws Exception
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception
   {
     vm0.invoke(destroyRegion());
-    super.tearDown2();
   }
 
   /**
@@ -290,7 +291,7 @@ public class Bug40299DUnitTest extends CacheTestCase
 
   public void testQueryGetWithClear()
   {
-    addExpectedException("Entry has been cleared and is not present on disk");
+    IgnoredException.addIgnoredException("Entry has been cleared and is not present on disk");
 	// create region in VM0 
 	vm0.invoke(createCacheForVM0());
 	// Do puts to region.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41091DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41091DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41091DUnitTest.java
index e023ed4..a03f035 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41091DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41091DUnitTest.java
@@ -34,7 +34,10 @@ import com.gemstone.gemfire.distributed.internal.DistributionMessage;
 import com.gemstone.gemfire.distributed.internal.DistributionMessageObserver;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.InitialImageOperation.RequestImageMessage;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -55,8 +58,7 @@ public class Bug41091DUnitTest extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS();
   }
   
@@ -96,7 +98,7 @@ public class Bug41091DUnitTest extends CacheTestCase {
    
         Properties props = new Properties();
         props.setProperty(DistributionConfig.ENABLE_NETWORK_PARTITION_DETECTION_NAME, "true");
-        props.setProperty(DistributionConfig.LOCATORS_NAME, getServerHostName(host) + "[" + locatorPort + "]");
+        props.setProperty(DistributionConfig.LOCATORS_NAME, NetworkUtils.getServerHostName(host) + "[" + locatorPort + "]");
         getSystem(props);
         
         
@@ -116,7 +118,7 @@ public class Bug41091DUnitTest extends CacheTestCase {
       public void run() {
         Properties props = new Properties();
         props.setProperty(DistributionConfig.ENABLE_NETWORK_PARTITION_DETECTION_NAME, "true");
-        props.setProperty(DistributionConfig.LOCATORS_NAME, getServerHostName(host) + "[" + locatorPort + "]");
+        props.setProperty(DistributionConfig.LOCATORS_NAME, NetworkUtils.getServerHostName(host) + "[" + locatorPort + "]");
         getSystem(props);
         Cache cache = getCache();
         AttributesFactory af = new AttributesFactory();
@@ -150,7 +152,7 @@ public class Bug41091DUnitTest extends CacheTestCase {
         disconnectFromDS();
         Properties props = new Properties();
         props.setProperty(DistributionConfig.MCAST_PORT_NAME, String.valueOf(0));
-        props.setProperty(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        props.setProperty(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
         props.setProperty(DistributionConfig.ENABLE_NETWORK_PARTITION_DETECTION_NAME, "true");
         props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
         try {
@@ -158,13 +160,13 @@ public class Bug41091DUnitTest extends CacheTestCase {
               + ".log");
           InetAddress bindAddr = null;
           try {
-            bindAddr = InetAddress.getByName(getServerHostName(vm.getHost()));
+            bindAddr = InetAddress.getByName(NetworkUtils.getServerHostName(vm.getHost()));
           } catch (UnknownHostException uhe) {
-            fail("While resolving bind address ", uhe);
+            Assert.fail("While resolving bind address ", uhe);
           }
           Locator locator = Locator.startLocatorAndDS(locatorPort, logFile, bindAddr, props);
         } catch (IOException ex) {
-          fail("While starting locator on port " + locatorPort, ex);
+          Assert.fail("While starting locator on port " + locatorPort, ex);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41733DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41733DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41733DUnitTest.java
index 2679d45..58179f2 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41733DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41733DUnitTest.java
@@ -58,9 +58,8 @@ public class Bug41733DUnitTest extends CacheTestCase {
   
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS();
-    super.tearDown2();
   }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41957DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41957DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41957DUnitTest.java
index 1cdf92e..a16a9c3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41957DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug41957DUnitTest.java
@@ -34,7 +34,10 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.ClientServerTestCase;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -52,8 +55,8 @@ import com.gemstone.gemfire.test.dunit.VM;
     super(name);
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS();
   }
 
@@ -63,7 +66,7 @@ import com.gemstone.gemfire.test.dunit.VM;
     final VM client = host.getVM(1);
     final String regionName = getUniqueName();
     final int serverPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server.getHost());
 
     createBridgeServer(server, regionName, serverPort, false);
 
@@ -88,7 +91,7 @@ import com.gemstone.gemfire.test.dunit.VM;
       public void run2() throws CacheException {
         // Create DS
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         getSystem(config);
 
         // Create Region
@@ -112,7 +115,7 @@ import com.gemstone.gemfire.test.dunit.VM;
         try {
           startBridgeServer(serverPort);
         } catch (Exception e) {
-          fail("While starting CacheServer", e);
+          Assert.fail("While starting CacheServer", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug45164DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug45164DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug45164DUnitTest.java
index a2e9c5d..371df3f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug45164DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug45164DUnitTest.java
@@ -25,6 +25,7 @@ import com.gemstone.gemfire.cache.RegionShortcut;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 public class Bug45164DUnitTest extends CacheTestCase {
@@ -78,7 +79,7 @@ public class Bug45164DUnitTest extends CacheTestCase {
         Cache cache = getCache(new CacheFactory());
         Region<Integer, Object> region = cache.<Integer, Object>createRegionFactory(RegionShortcut.PARTITION).create("test");
         if (region == null) {
-          getLogWriter().error("oops!");
+          LogWriterUtils.getLogWriter().error("oops!");
         }
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug47667DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug47667DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug47667DUnitTest.java
index 40ba9ea..097aeb5 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug47667DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/Bug47667DUnitTest.java
@@ -25,6 +25,7 @@ import com.gemstone.gemfire.cache.client.PoolManager;
 import com.gemstone.gemfire.cache.client.internal.LocatorTestBase;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -43,8 +44,7 @@ public class Bug47667DUnitTest extends LocatorTestBase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownLocatorTestBase() throws Exception {
     disconnectAllFromDS();
   }
 
@@ -56,7 +56,7 @@ public class Bug47667DUnitTest extends LocatorTestBase {
     VM client = host.getVM(3);
 
     final int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String locatorHost = getServerHostName(host);
+    final String locatorHost = NetworkUtils.getServerHostName(host);
     startLocatorInVM(locator, locatorPort, "");
 
     String locString = getLocatorString(host, locatorPort);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/CacheAdvisorDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/CacheAdvisorDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/CacheAdvisorDUnitTest.java
index 66a7880..eb64e93 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/CacheAdvisorDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/CacheAdvisorDUnitTest.java
@@ -40,6 +40,7 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -174,7 +175,7 @@ public class CacheAdvisorDUnitTest extends CacheTestCase {
     RegionAttributes attrs = fac.create();
     DistributedRegion rgn = (DistributedRegion)createRegion(rgnName, attrs);
     
-    invokeInEveryVM(new CacheSerializableRunnable("CachAdvisorTest.testNetLoadAdviceWithAttributesMutator;createRegion") {
+    Invoke.invokeInEveryVM(new CacheSerializableRunnable("CachAdvisorTest.testNetLoadAdviceWithAttributesMutator;createRegion") {
       public void run2() throws CacheException {
         AttributesFactory f = new AttributesFactory();
         f.setScope(Scope.DISTRIBUTED_ACK);
@@ -231,7 +232,7 @@ public class CacheAdvisorDUnitTest extends CacheTestCase {
     final InternalDistributedMember myMemberId = getSystem().getDistributionManager().getId();
     
     // assert that other VMs advisors have test member id 
-    invokeInEveryVM(new CacheSerializableRunnable("CacheAdvisorDUnitTest.basicTestClose;verify1") {
+    Invoke.invokeInEveryVM(new CacheSerializableRunnable("CacheAdvisorDUnitTest.basicTestClose;verify1") {
       public void run2() throws CacheException {
         DistributedRegion rgn1 = (DistributedRegion)getRootRegion();
         assertTrue(rgn1.getDistributionAdvisor().adviseGeneric().contains(myMemberId));
@@ -249,7 +250,7 @@ public class CacheAdvisorDUnitTest extends CacheTestCase {
       fail("expected op(" + op + ") to be CACHE_CLOSE, REGION_CLOSE, or REGION_LOCAL_DESTROY");
     }
     final InternalDistributedMember closedMemberId = getSystem().getDistributionManager().getId();
-    invokeInEveryVM(new CacheSerializableRunnable("CacheAdvisorDUnitTest.basicTestClose;verify") {
+    Invoke.invokeInEveryVM(new CacheSerializableRunnable("CacheAdvisorDUnitTest.basicTestClose;verify") {
       public void run2() throws CacheException {
         DistributedRegion rgn1 = (DistributedRegion)getRootRegion();
         assertTrue(!rgn1.getDistributionAdvisor().adviseGeneric().contains(closedMemberId));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClearDAckDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClearDAckDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClearDAckDUnitTest.java
index 39e7604..dabc277 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClearDAckDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClearDAckDUnitTest.java
@@ -38,6 +38,8 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.versions.VersionSource;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -71,20 +73,21 @@ public class ClearDAckDUnitTest extends DistributedTestCase {
       VM vm1 = host.getVM(1);
       vm0ID = (DistributedMember)vm0.invoke(ClearDAckDUnitTest.class, "createCacheVM0");
       vm1ID = (DistributedMember)vm1.invoke(ClearDAckDUnitTest.class, "createCacheVM1");
-      getLogWriter().info("Cache created in successfully");
+      LogWriterUtils.getLogWriter().info("Cache created in successfully");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        VM vm2 = host.getVM(2);
-        vm0.invoke(ClearDAckDUnitTest.class, "closeCache");
-        vm1.invoke(ClearDAckDUnitTest.class, "resetClearCallBack");
-        vm1.invoke(ClearDAckDUnitTest.class, "closeCache");
-        vm2.invoke(ClearDAckDUnitTest.class, "closeCache");
-        cache = null;
-        invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      VM vm2 = host.getVM(2);
+      vm0.invoke(ClearDAckDUnitTest.class, "closeCache");
+      vm1.invoke(ClearDAckDUnitTest.class, "resetClearCallBack");
+      vm1.invoke(ClearDAckDUnitTest.class, "closeCache");
+      vm2.invoke(ClearDAckDUnitTest.class, "closeCache");
+      cache = null;
+      Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
     }
     
     public static long getRegionVersion(DistributedMember memberID) {
@@ -102,7 +105,7 @@ public class ClearDAckDUnitTest extends DistributedTestCase {
         try{
             //            props.setProperty("mcast-port", "1234");
             //            ds = DistributedSystem.connect(props);
-            getLogWriter().info("I am vm0");
+            LogWriterUtils.getLogWriter().info("I am vm0");
             ds = (new ClearDAckDUnitTest("temp")).getSystem(props);
             cache = CacheFactory.create(ds);
             
@@ -116,7 +119,7 @@ public class ClearDAckDUnitTest extends DistributedTestCase {
             RegionAttributes attr = factory.create();
             
             region = cache.createRegion("map", attr);
-            getLogWriter().info("vm0 map region: " + region);
+            LogWriterUtils.getLogWriter().info("vm0 map region: " + region);
             paperWork = cache.createRegion("paperWork", attr);
             return cache.getDistributedSystem().getDistributedMember();
         } catch (CacheException ex){
@@ -127,7 +130,7 @@ public class ClearDAckDUnitTest extends DistributedTestCase {
         try{
             //   props.setProperty("mcast-port", "1234");
             //   ds = DistributedSystem.connect(props);
-            getLogWriter().info("I am vm1");
+            LogWriterUtils.getLogWriter().info("I am vm1");
             ds = (new ClearDAckDUnitTest("temp")).getSystem(props);
             //DistributedSystem.setThreadsSocketPolicy(false);
             CacheObserverImpl observer = new CacheObserverImpl();
@@ -143,7 +146,7 @@ public class ClearDAckDUnitTest extends DistributedTestCase {
             RegionAttributes attr = factory.create();
             
             region = cache.createRegion("map", attr);
-            getLogWriter().info("vm1 map region: " + region);
+            LogWriterUtils.getLogWriter().info("vm1 map region: " + region);
             paperWork = cache.createRegion("paperWork", attr);
             return cache.getDistributedSystem().getDistributedMember();
             
@@ -156,7 +159,7 @@ public class ClearDAckDUnitTest extends DistributedTestCase {
       try{
           //   props.setProperty("mcast-port", "1234");
           //   ds = DistributedSystem.connect(props);
-          getLogWriter().info("I am vm2");
+          LogWriterUtils.getLogWriter().info("I am vm2");
           ds = (new ClearDAckDUnitTest("temp")).getSystem(props);
           //DistributedSystem.setThreadsSocketPolicy(false);
           CacheObserverImpl observer = new CacheObserverImpl();
@@ -172,7 +175,7 @@ public class ClearDAckDUnitTest extends DistributedTestCase {
           RegionAttributes attr = factory.create();
           
           region = cache.createRegion("map", attr);
-          getLogWriter().info("vm2 map region: " + region);
+          LogWriterUtils.getLogWriter().info("vm2 map region: " + region);
           paperWork = cache.createRegion("paperWork", attr);
           
           region.put("vm2Key", "vm2Value");
@@ -207,14 +210,14 @@ public class ClearDAckDUnitTest extends DistributedTestCase {
             vm0.invoke(ClearDAckDUnitTest.class, "putMethod", objArr);
             
         }
-        getLogWriter().info("Did all puts successfully");
+        LogWriterUtils.getLogWriter().info("Did all puts successfully");
         
         long regionVersion = (Long)vm1.invoke(ClearDAckDUnitTest.class, "getRegionVersion", new Object[]{vm0ID});
         
         vm0.invoke(ClearDAckDUnitTest.class,"clearMethod");
         
         boolean flag = vm1.invokeBoolean(ClearDAckDUnitTest.class,"getVM1Flag");
-        getLogWriter().fine("Flag in VM1="+ flag);
+        LogWriterUtils.getLogWriter().fine("Flag in VM1="+ flag);
         
         assertTrue(flag);
         
@@ -226,7 +229,7 @@ public class ClearDAckDUnitTest extends DistributedTestCase {
         vm2.invoke(ClearDAckDUnitTest.class, "createCacheVM2AndLocalClear");
         
         flag = vm1.invokeBoolean(ClearDAckDUnitTest.class,"getVM1Flag");
-        getLogWriter().fine("Flag in VM1="+ flag);
+        LogWriterUtils.getLogWriter().fine("Flag in VM1="+ flag);
         assertFalse(flag);
         
     }//end of test case
@@ -255,7 +258,7 @@ public class ClearDAckDUnitTest extends DistributedTestCase {
             long end = System.currentTimeMillis();
             
             long diff = end - start;
-            getLogWriter().info("Clear Thread proceeded before receiving the ack message in (milli seconds): "+diff);
+            LogWriterUtils.getLogWriter().info("Clear Thread proceeded before receiving the ack message in (milli seconds): "+diff);
               
         }catch (Exception e){
             e.printStackTrace();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClearGlobalDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClearGlobalDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClearGlobalDUnitTest.java
index 29b54df..54df822 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClearGlobalDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClearGlobalDUnitTest.java
@@ -35,6 +35,8 @@ import com.gemstone.gemfire.internal.cache.CacheObserverAdapter;
 import com.gemstone.gemfire.internal.cache.CacheObserverHolder;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 import java.util.Properties;
@@ -80,11 +82,11 @@ public class ClearGlobalDUnitTest extends DistributedTestCase
     server1 = host.getVM(0);    
     server1.invoke(ClearGlobalDUnitTest.class, "createCacheServer1");
     createCacheServer2();
-    getLogWriter().fine("Cache created in successfully");
+    LogWriterUtils.getLogWriter().fine("Cache created in successfully");
   }
 
-  public void tearDown2()
-  {        
+  @Override
+  protected final void preTearDown() throws Exception {
     server1.invoke(ClearGlobalDUnitTest.class, "closeCache");
     resetClearCallBack();
     closeCache();
@@ -172,7 +174,7 @@ public class ClearGlobalDUnitTest extends DistributedTestCase
       throw new Exception("Test Failed: " + exceptionMsg);
     }   
     else{  
-      getLogWriter().info("Test Passed Successfully ");
+      LogWriterUtils.getLogWriter().info("Test Passed Successfully ");
     } 
   }
   
@@ -182,7 +184,7 @@ public class ClearGlobalDUnitTest extends DistributedTestCase
     {
       Thread th = new PutThread();
       th.start();
-      DistributedTestCase.join(th, 5 * 60 * 1000, getLogWriter());
+      ThreadUtils.join(th, 5 * 60 * 1000);
       synchronized (lock) {    
         testComplete = true;
         lock.notify();
@@ -202,7 +204,7 @@ public class ClearGlobalDUnitTest extends DistributedTestCase
       catch (TimeoutException ex) {
         //pass
         testFailed = false;        
-        getLogWriter().info("Expected TimeoutException in thread ");        
+        LogWriterUtils.getLogWriter().info("Expected TimeoutException in thread ");        
       }
       catch (Exception ex) {        
         exceptionMsg.append(" Exception occurred while region.put(key,value)");                   



[43/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRemoteNodeExceptionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRemoteNodeExceptionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRemoteNodeExceptionDUnitTest.java
index 830d35b..07807c8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRemoteNodeExceptionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRemoteNodeExceptionDUnitTest.java
@@ -41,11 +41,13 @@ import com.gemstone.gemfire.distributed.internal.DistributionMessageObserver;
 import com.gemstone.gemfire.internal.cache.BucketRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
 
 /**
  * This test verifies exception handling on coordinator node for remote as
@@ -88,9 +90,8 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
   private int numOfBuckets = 10;
 
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(QueryObserverHolder.class, "reset");
-    super.tearDown2();
+  protected final void preTearDownPartitionedRegionDUnitTest() throws Exception {
+    Invoke.invokeInEveryVM(QueryObserverHolder.class, "reset");
   }
 
   /**
@@ -108,7 +109,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
   public void testPRWithLocalAndRemoteException()
       throws Exception {
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception test Started");
     Host host = Host.getHost(0);
@@ -119,7 +120,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     vmList.add(vm1);
     vmList.add(vm0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating PR's across all VM0 , VM1");
     vm0.invoke(PRQHelp
@@ -127,17 +128,17 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     vm1.invoke(PRQHelp
         .getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created PR on VM0 , VM1");
 
     // creating a local region on one of the JVM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
 
@@ -147,29 +148,29 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data through the accessor node");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data through the accessor node");
 
     // Putting the same data in the local region created
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data on local node  VM0 for result Set Comparison");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data on local node  VM0 for result Set Comparison");
 
     // Execute query first time. This is to make sure all the buckets are
     // created
     // (lazy bucket creation).
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying on VM0 First time");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
@@ -219,9 +220,9 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
               gotException = true;
               if (ex.getMessage().contains("local node")) {
 //                ex.printStackTrace();
-                getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from local node successfully.");
+                LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from local node successfully.");
               } else {
-                fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from local node rather received", ex);
+                Assert.fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from local node rather received", ex);
               }
             }
             if (!gotException) {
@@ -231,14 +232,14 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
         }
       );
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception Test ENDED");
   }
   
   public void testRemoteException() throws Exception {
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception test Started");
     Host host = Host.getHost(0);
@@ -249,7 +250,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     vmList.add(vm1);
     vmList.add(vm0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating PR's across all VM0 , VM1");
     vm0.invoke(PRQHelp
@@ -257,17 +258,17 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     vm1.invoke(PRQHelp
         .getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created PR on VM0 , VM1");
 
     // creating a local region on one of the JVM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
 
@@ -277,22 +278,22 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data through the accessor node");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data through the accessor node");
 
     // Putting the same data in the local region created
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data on local node  VM0 for result Set Comparison");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data on local node  VM0 for result Set Comparison");
 
@@ -350,9 +351,9 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
               gotException = true;
               if (ex.getMessage().contains("remote node")) {
                 ex.printStackTrace();
-                getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from remote node successfully.");
+                LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from remote node successfully.");
               } else {
-                fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from remote node rather received", ex);
+                Assert.fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from remote node rather received", ex);
               }
             }
             if (!gotException) {
@@ -362,14 +363,14 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
         }
       );
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception Test ENDED");
   }
   
   public void testCacheCloseExceptionFromLocalAndRemote() throws Exception {
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception test Started");
     Host host = Host.getHost(0);
@@ -380,7 +381,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     vmList.add(vm1);
     vmList.add(vm0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating PR's across all VM0 , VM1");
     vm0.invoke(PRQHelp
@@ -388,17 +389,17 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     vm1.invoke(PRQHelp
         .getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created PR on VM0 , VM1");
 
     // creating a local region on one of the JVM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
 
@@ -408,22 +409,22 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data through the accessor node");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data through the accessor node");
 
     // Putting the same data in the local region created
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data on local node  VM0 for result Set Comparison");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data on local node  VM0 for result Set Comparison");
 
@@ -439,7 +440,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
               
               @Override
               public void afterIterationEvaluation(Object result) {
-                getLogWriter().info("Calling after IterationEvaluation :" + noOfAccess);
+                LogWriterUtils.getLogWriter().info("Calling after IterationEvaluation :" + noOfAccess);
                 if (noOfAccess > 2) {
                   PRQHelp.getCache().getRegion(name).destroyRegion();
                 }
@@ -465,7 +466,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
               @Override
               public void afterIterationEvaluation(Object result) {
                 //Object region = ((DefaultQuery)query).getRegionsInQuery(null).iterator().next();
-                getLogWriter().info("Calling after IterationEvaluation :" + noOfAccess);
+                LogWriterUtils.getLogWriter().info("Calling after IterationEvaluation :" + noOfAccess);
                 if (noOfAccess > 2) {
                   PRQHelp.getCache().close();
                 }
@@ -482,10 +483,10 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
             } catch (Exception ex) {
               gotException = true;
               if (ex instanceof CacheClosedException || ex instanceof QueryInvocationTargetException) {
-                getLogWriter().info(ex.getMessage());
-                getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from local node successfully.");
+                LogWriterUtils.getLogWriter().info(ex.getMessage());
+                LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from local node successfully.");
               } else {
-                fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from local node rather received", ex);
+                Assert.fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from local node rather received", ex);
               }
             }
             if (!gotException) {
@@ -495,14 +496,14 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
         }
       );
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception Test ENDED");
   }
   
   public void testCacheCloseExceptionFromLocalAndRemote2() throws Exception {
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception test Started");
     Host host = Host.getHost(0);
@@ -513,7 +514,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     vmList.add(vm1);
     vmList.add(vm0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating PR's across all VM0 , VM1");
     vm0.invoke(PRQHelp
@@ -521,17 +522,17 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     vm1.invoke(PRQHelp
         .getCacheSerializableRunnableForPRCreateLimitedBuckets(name, redundancy, numOfBuckets));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created PR on VM0 , VM1");
 
     // creating a local region on one of the JVM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
 
@@ -541,22 +542,22 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data through the accessor node");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data through the accessor node");
 
     // Putting the same data in the local region created
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data on local node  VM0 for result Set Comparison");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data on local node  VM0 for result Set Comparison");
 
@@ -572,7 +573,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
               
               @Override
               public void afterIterationEvaluation(Object result) {
-                getLogWriter().info("Calling after IterationEvaluation :" + noOfAccess);
+                LogWriterUtils.getLogWriter().info("Calling after IterationEvaluation :" + noOfAccess);
                 if (noOfAccess > 1) {
                   PRQHelp.getCache().getRegion(name).destroyRegion();
                 }
@@ -625,10 +626,10 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
             } catch (Exception ex) {
               gotException = true;
               if (ex instanceof QueryInvocationTargetException) {
-                getLogWriter().info(ex.getMessage());
-                getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from remote node successfully as region.destroy happened before cache.close().");
+                LogWriterUtils.getLogWriter().info(ex.getMessage());
+                LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Test received Exception from remote node successfully as region.destroy happened before cache.close().");
               } else {
-                fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from local node rather received", ex);
+                Assert.fail("PRQueryRemoteNodeExceptionDUnitTest: Test did not receive Exception as expected from local node rather received", ex);
               }
             }
             if (!gotException) {
@@ -638,14 +639,14 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
         }
       );
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception Test ENDED");
   }
   
   public void testForceReattemptExceptionFromLocal() throws Exception {
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception test Started");
     Host host = Host.getHost(0);
@@ -658,7 +659,7 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     vmList.add(vm0);
     vmList.add(vm2);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating PR's across all VM0 , VM1");
     vm0.invoke(PRQHelp
@@ -668,17 +669,17 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     vm2.invoke(PRQHelp
         .getCacheSerializableRunnableForPRCreateLimitedBuckets(name, 1/*redundancy*/, numOfBuckets));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created PR on VM0 , VM1");
 
     // creating a local region on one of the JVM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Created Local Region on VM0");
 
@@ -688,22 +689,22 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
     final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data through the accessor node");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data through the accessor node");
 
     // Putting the same data in the local region created
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Inserting Portfolio data on local node  VM0 for result Set Comparison");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithLocalAndRemoteException: Successfully Inserted Portfolio data on local node  VM0 for result Set Comparison");
 
@@ -720,13 +721,13 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
               @Override
               public void startQuery(Query query) {
                 Object region = ((DefaultQuery)query).getRegionsInQuery(null).iterator().next();
-                getLogWriter().info("Region type on VM1:"+region);
+                LogWriterUtils.getLogWriter().info("Region type on VM1:"+region);
                 if (noOfAccess == 1) {
                   PartitionedRegion pr = (PartitionedRegion)PRQHelp.getCache().getRegion(name);
                   List buks = pr.getLocalPrimaryBucketsListTestOnly();
-                  getLogWriter().info("Available buckets:"+buks);
+                  LogWriterUtils.getLogWriter().info("Available buckets:"+buks);
                   int bukId = ((Integer)(buks.get(0))).intValue();
-                  getLogWriter().info("Destroying bucket id:"+bukId);
+                  LogWriterUtils.getLogWriter().info("Destroying bucket id:"+bukId);
                   pr.getDataStore().getLocalBucketById(bukId).destroyRegion();
                 }
                 ++noOfAccess;
@@ -751,13 +752,13 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
               @Override
               public void startQuery(Query query) {
                 Object region = ((DefaultQuery)query).getRegionsInQuery(null).iterator().next();
-                getLogWriter().info("Region type on VM0:"+region);
+                LogWriterUtils.getLogWriter().info("Region type on VM0:"+region);
                 if (noOfAccess == 2) {
                   PartitionedRegion pr = (PartitionedRegion)PRQHelp.getCache().getRegion(name);
                   List buks = pr.getLocalPrimaryBucketsListTestOnly();
-                  getLogWriter().info("Available buckets:"+buks);
+                  LogWriterUtils.getLogWriter().info("Available buckets:"+buks);
                   int bukId = ((Integer)(buks.get(0))).intValue();
-                  getLogWriter().info("Destroying bucket id:"+bukId);
+                  LogWriterUtils.getLogWriter().info("Destroying bucket id:"+bukId);
                   pr.getDataStore().getLocalBucketById(bukId).destroyRegion();
                 }
                 ++noOfAccess;
@@ -769,16 +770,16 @@ public class PRQueryRemoteNodeExceptionDUnitTest extends PartitionedRegionDUnitT
 
             try {
               query.execute();
-              getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Query executed successfully with ForceReattemptException on local and remote both.");
+              LogWriterUtils.getLogWriter().info("PRQueryRemoteNodeExceptionDUnitTest: Query executed successfully with ForceReattemptException on local and remote both.");
             } catch (Exception ex) {
               gotException = true;
-              fail("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Test received Exception", ex);
+              Assert.fail("PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Test received Exception", ex);
             }
           }
         }
       );
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRemoteNodeExceptionDUnitTest#testPRWithLocalAndRemoteException: Querying with PR Local/Remote Exception Test ENDED");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/ParallelSnapshotDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/ParallelSnapshotDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/ParallelSnapshotDUnitTest.java
index ea1195e..ea95558 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/ParallelSnapshotDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/ParallelSnapshotDUnitTest.java
@@ -161,9 +161,8 @@ public class ParallelSnapshotDUnitTest extends CacheTestCase {
     loadCache();
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     File[] snaps = new File(".").listFiles(new FilenameFilter() {
       @Override
       public boolean accept(File dir, String name) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/SnapshotByteArrayDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/SnapshotByteArrayDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/SnapshotByteArrayDUnitTest.java
index e43d39b..43fc2e3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/SnapshotByteArrayDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/SnapshotByteArrayDUnitTest.java
@@ -28,6 +28,7 @@ import com.gemstone.gemfire.cache.snapshot.SnapshotOptions.SnapshotFormat;
 import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 
 public class SnapshotByteArrayDUnitTest extends CacheTestCase {
@@ -80,13 +81,13 @@ public class SnapshotByteArrayDUnitTest extends CacheTestCase {
           }
           
           private void dump(EntryEvent<Integer, Object> event) {
-            getLogWriter().info("op = " + event.getOperation());
+            LogWriterUtils.getLogWriter().info("op = " + event.getOperation());
             
             Object obj1 = event.getNewValue();
-            getLogWriter().info("new = " + obj1);
+            LogWriterUtils.getLogWriter().info("new = " + obj1);
 
             Object obj2 = event.getOldValue();
-            getLogWriter().info("old = " + obj2);
+            LogWriterUtils.getLogWriter().info("old = " + obj2);
           }
         });
         
@@ -109,11 +110,11 @@ public class SnapshotByteArrayDUnitTest extends CacheTestCase {
     loadCache();
   }
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
     if (snap.exists()) {
       snap.delete();
     }
-    super.tearDown2();
   }
   
   public void loadCache() throws Exception {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/SnapshotPerformanceDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/SnapshotPerformanceDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/SnapshotPerformanceDUnitTest.java
index 309363e..83fc231 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/SnapshotPerformanceDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/snapshot/SnapshotPerformanceDUnitTest.java
@@ -31,6 +31,7 @@ import com.gemstone.gemfire.cache.snapshot.RegionGenerator.SerializationType;
 import com.gemstone.gemfire.cache.snapshot.SnapshotOptions.SnapshotFormat;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 
 public class SnapshotPerformanceDUnitTest extends CacheTestCase {
@@ -52,7 +53,7 @@ public class SnapshotPerformanceDUnitTest extends CacheTestCase {
       for (SerializationType st : sts) {
         for (int i = 0; i < iterations; i++) {
           Region<Integer, MyObject> region = createRegion(rt, st);
-          getLogWriter().info("SNP: Testing region " + region.getName() + ", iteration = " + i);
+          LogWriterUtils.getLogWriter().info("SNP: Testing region " + region.getName() + ", iteration = " + i);
 
           loadData(region, st, dataCount);
           doExport(region);
@@ -77,9 +78,9 @@ public class SnapshotPerformanceDUnitTest extends CacheTestCase {
     double eps = 1000.0 * size / elapsed;
     double mbps = 1000.0 * bytes / elapsed / (1024 * 1024);
 
-    getLogWriter().info("SNP: Exported " + size + " entries (" + bytes + " bytes) in " + elapsed + " ms");
-    getLogWriter().info("SNP: Export entry rate: " + eps + " entries / sec");
-    getLogWriter().info("SNP: Export data rate: " + mbps + " MB / sec");
+    LogWriterUtils.getLogWriter().info("SNP: Exported " + size + " entries (" + bytes + " bytes) in " + elapsed + " ms");
+    LogWriterUtils.getLogWriter().info("SNP: Export entry rate: " + eps + " entries / sec");
+    LogWriterUtils.getLogWriter().info("SNP: Export data rate: " + mbps + " MB / sec");
   }
   
   private void doImport(Region<Integer, MyObject> region) throws Exception {
@@ -95,9 +96,9 @@ public class SnapshotPerformanceDUnitTest extends CacheTestCase {
     double eps = 1000.0 * size / elapsed;
     double mbps = 1000.0 * bytes / elapsed / (1024 * 1024);
 
-    getLogWriter().info("SNP: Imported " + size + " entries (" + bytes + " bytes) in " + elapsed + " ms");
-    getLogWriter().info("SNP: Import entry rate: " + eps + " entries / sec");
-    getLogWriter().info("SNP: Import data rate: " + mbps + " MB / sec");
+    LogWriterUtils.getLogWriter().info("SNP: Imported " + size + " entries (" + bytes + " bytes) in " + elapsed + " ms");
+    LogWriterUtils.getLogWriter().info("SNP: Import entry rate: " + eps + " entries / sec");
+    LogWriterUtils.getLogWriter().info("SNP: Import data rate: " + mbps + " MB / sec");
   }
   
   public void setUp() throws Exception {
@@ -105,10 +106,6 @@ public class SnapshotPerformanceDUnitTest extends CacheTestCase {
     createCache();
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-  
   private void createCache() throws Exception {
     SerializableCallable setup = new SerializableCallable() {
       @Override
@@ -163,7 +160,7 @@ public class SnapshotPerformanceDUnitTest extends CacheTestCase {
     }
     
     long elapsed = System.currentTimeMillis() - start;
-    getLogWriter().info("SNP: loaded " + count + " entries in " + elapsed + " ms");
+    LogWriterUtils.getLogWriter().info("SNP: loaded " + count + " entries in " + elapsed + " ms");
     
     assertEquals(count, region.size());
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/Bug35214DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/Bug35214DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/Bug35214DUnitTest.java
index 04b214f..7f6d29a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/Bug35214DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/Bug35214DUnitTest.java
@@ -29,10 +29,14 @@ import com.gemstone.gemfire.cache.RegionEvent;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Make sure entry expiration does not happen during gii for bug 35214
@@ -93,7 +97,7 @@ public class Bug35214DUnitTest extends CacheTestCase {
                 //pause(5);
               }
               else {
-                getLogWriter().info("PROFILE CHECK: Found " + numProfiles + " getInitialImage Profiles (OK)");
+                LogWriterUtils.getLogWriter().info("PROFILE CHECK: Found " + numProfiles + " getInitialImage Profiles (OK)");
                 break;
               }
             }
@@ -156,7 +160,7 @@ public class Bug35214DUnitTest extends CacheTestCase {
       throw e;
     }
     catch (Throwable e1) {
-      fail("failed due to "+e1, e1);
+      Assert.fail("failed due to "+e1, e1);
     }
     System.setProperty(LocalRegion.EXPIRY_MS_PROPERTY, "true");
     com.gemstone.gemfire.internal.cache.InitialImageOperation.slowImageProcessing = 30;
@@ -184,7 +188,7 @@ public class Bug35214DUnitTest extends CacheTestCase {
         };
       af.addCacheListener(cl1);
       final Region r1 = createRootRegion("r1", af.create());
-      DistributedTestCase.join(updater, 60 * 1000, getLogWriter());
+      ThreadUtils.join(updater, 60 * 1000);
       WaitCriterion ev = new WaitCriterion() {
         public boolean done() {
           return r1.values().size() == 0;
@@ -193,7 +197,7 @@ public class Bug35214DUnitTest extends CacheTestCase {
           return "region never became empty";
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 2 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 2 * 1000, 200, true);
       {
         assertEquals(0, r1.values().size());
         assertEquals(ENTRY_COUNT, r1.keys().size());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
index 417456f..d1a8bb4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/Bug38741DUnitTest.java
@@ -50,9 +50,12 @@ import com.gemstone.gemfire.internal.cache.ha.HARegionQueue;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ClientProxyMembershipID;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ClientUpdateMessageImpl;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  *
@@ -153,7 +156,7 @@ public class Bug38741DUnitTest extends ClientServerTestCase {
     // Setup a client which subscribes to the server region, registers (aka pulls)
     // interest in keys which creates an assumed HARegionQueue on the server
     // (in the event that the above code didn't already create a HARegion)
-    final String serverHostName = getServerHostName(server.getHost());
+    final String serverHostName = NetworkUtils.getServerHostName(server.getHost());
     client.invoke(new CacheSerializableRunnable("Assert server copy behavior from client") {
       public void run2() throws CacheException {
         getCache();
@@ -200,7 +203,7 @@ public class Bug38741DUnitTest extends ClientServerTestCase {
             return "region queue never became empty";
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 60 * 1000, 200, true);
         
         // Capture the current processed message count to know
         // when the next message has been serialized
@@ -221,7 +224,7 @@ public class Bug38741DUnitTest extends ClientServerTestCase {
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 60 * 1000, 200, true);
         
         // assert one serialization to send value to interested client
         // more than one implies copy-on-read behavior (bad)
@@ -242,7 +245,7 @@ public class Bug38741DUnitTest extends ClientServerTestCase {
           long start = NanoTimer.getTime();
           final int maxSecs = 30;
           while(!r.containsKey(ks2)) {
-            pause(100);
+            Wait.pause(100);
             if ((NanoTimer.getTime() - start) > TimeUnit.SECONDS.toNanos(maxSecs)) {
               fail("Waited over " + maxSecs + "s");
             }
@@ -313,7 +316,7 @@ public class Bug38741DUnitTest extends ClientServerTestCase {
               SerializationCountingValue scv = (SerializationCountingValue)cd.getDeserializedForReading();
               assertEquals(1, scv.count.get());
             } catch (IOException fail) {
-              fail("Unexpected IOException", fail);
+              Assert.fail("Unexpected IOException", fail);
             }
           }
         });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheListenerTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheListenerTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheListenerTestCase.java
index 4e4147f..a8df712 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheListenerTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheListenerTestCase.java
@@ -16,10 +16,16 @@
  */
 package com.gemstone.gemfire.cache30;
 
-import com.gemstone.gemfire.cache.*;
-//import com.gemstone.gemfire.cache.util.*;
-//import java.util.*;
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.CacheException;
+import com.gemstone.gemfire.cache.CacheListener;
+import com.gemstone.gemfire.cache.EntryEvent;
+import com.gemstone.gemfire.cache.EntryNotFoundException;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.RegionEvent;
 import com.gemstone.gemfire.internal.cache.AbstractRegionMap;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * An abstract class whose test methods test the functionality of
@@ -359,7 +365,7 @@ public abstract class CacheListenerTestCase
     assertTrue(region.getAttributes().getCacheListener() != null);
 //    com.gemstone.gemfire.internal.util.DebuggerSupport.waitForJavaDebugger(getLogWriter());
     region.destroyRegion();
-    pause(100); // extra pause
+    Wait.pause(100); // extra pause
     assertTrue(region.isDestroyed());
     assertTrue(listener.wasInvoked());
 
@@ -414,7 +420,7 @@ public abstract class CacheListenerTestCase
 
     region = createRegion(name, attrs);
     region.invalidateRegion();
-    pause(500);
+    Wait.pause(500);
     assertTrue(listener.wasInvoked());
     assertEquals(0, region.values().size());
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheLoaderTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheLoaderTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheLoaderTestCase.java
index fc23365..2e1def8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheLoaderTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheLoaderTestCase.java
@@ -26,6 +26,8 @@ import com.gemstone.gemfire.cache.LoaderHelper;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.TimeoutException;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * An abstract class whose test methods test the functionality of
@@ -68,7 +70,7 @@ public abstract class CacheLoaderTestCase
             }
 
           } catch (TimeoutException ex) {
-            fail("Why did I time out?", ex);
+            Assert.fail("Why did I time out?", ex);
           }
 
           Object argument = helper.getArgument();
@@ -303,7 +305,7 @@ public abstract class CacheLoaderTestCase
 
     assertEquals(oldValue, region.get(key));
     assertTrue(loader.wasInvoked());
-    pause(500);
+    Wait.pause(500);
     assertTrue(listener.wasInvoked());
 
     listener = new TestCacheListener() {
@@ -320,7 +322,7 @@ public abstract class CacheLoaderTestCase
     region.getAttributesMutator().setCacheListener(listener);
 
     region.put(key, newValue);
-    pause(500);
+    Wait.pause(500);
     assertFalse(loader.wasInvoked());
     assertTrue(listener.wasInvoked());
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheMapTxnDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheMapTxnDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheMapTxnDUnitTest.java
index 588ec7e..b889bc7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheMapTxnDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheMapTxnDUnitTest.java
@@ -35,9 +35,11 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.UnsupportedOperationInTransactionException;
 import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 public class CacheMapTxnDUnitTest extends DistributedTestCase{
@@ -64,13 +66,13 @@ public class CacheMapTxnDUnitTest extends DistributedTestCase{
       vm1.invoke(CacheMapTxnDUnitTest.class, "createCache");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(CacheMapTxnDUnitTest.class, "closeCache");
-        vm1.invoke(CacheMapTxnDUnitTest.class, "closeCache");
-        
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(CacheMapTxnDUnitTest.class, "closeCache");
+      vm1.invoke(CacheMapTxnDUnitTest.class, "closeCache");
     }
     
     public static void createCache(){
@@ -145,15 +147,15 @@ public class CacheMapTxnDUnitTest extends DistributedTestCase{
         vm0.invoke(CacheMapTxnDUnitTest.class, "miscMethodsOwner");
         AsyncInvocation o2 = vm0.invokeAsync(CacheMapTxnDUnitTest.class, "miscMethodsNotOwner");//invoke in same vm but in seperate thread
         AsyncInvocation o3 = vm1.invokeAsync(CacheMapTxnDUnitTest.class, "miscMethodsNotOwner");//invoke in another vm
-        DistributedTestCase.join(o2, 30 * 1000, getLogWriter());
-        DistributedTestCase.join(o3, 30 * 1000, getLogWriter());
+        ThreadUtils.join(o2, 30 * 1000);
+        ThreadUtils.join(o3, 30 * 1000);
         
         if(o2.exceptionOccurred()){
-          fail("o2 failed", o2.getException());
+          Assert.fail("o2 failed", o2.getException());
         }
         
         if(o3.exceptionOccurred()){
-          fail("o3 failed", o3.getException());
+          Assert.fail("o3 failed", o3.getException());
         }
         
     }//end of testMiscMethods

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheRegionsReliablityStatsCheckDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheRegionsReliablityStatsCheckDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheRegionsReliablityStatsCheckDUnitTest.java
index 3834f83..1efae19 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheRegionsReliablityStatsCheckDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheRegionsReliablityStatsCheckDUnitTest.java
@@ -33,6 +33,7 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.CachePerfStats;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -101,7 +102,7 @@ public class CacheRegionsReliablityStatsCheckDUnitTest extends CacheTestCase {
 	      {
 	        
 	        Properties props = new Properties();
-	        props.setProperty(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+	        props.setProperty(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
 	        props.setProperty(DistributionConfig.ROLES_NAME, rr1);
 
 	        getSystem(props);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheStatisticsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheStatisticsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheStatisticsDUnitTest.java
index a07a749..422dbe9 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheStatisticsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheStatisticsDUnitTest.java
@@ -27,6 +27,7 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Tests the {@link CacheStatistics} that are maintained by a {@link
@@ -213,7 +214,7 @@ public class CacheStatisticsDUnitTest extends CacheTestCase {
 
     oldBefore = before;
     oldAfter = after;
-    pause(150);
+    Wait.pause(150);
     before = ((GemFireCacheImpl)getCache()).cacheTimeMillis();
     region.get(key);
     after = ((GemFireCacheImpl)getCache()).cacheTimeMillis();
@@ -222,7 +223,7 @@ public class CacheStatisticsDUnitTest extends CacheTestCase {
     assertInRange(before, after,  rootStats.getLastAccessedTime());
     assertInRange(oldBefore, oldAfter,  rootStats.getLastModifiedTime());
 
-    pause(150);
+    Wait.pause(150);
     before = ((GemFireCacheImpl)getCache()).cacheTimeMillis();
     region.put(key, value);
     CacheStatistics eStats = region.getEntry(key).getStatistics();
@@ -237,7 +238,7 @@ public class CacheStatisticsDUnitTest extends CacheTestCase {
 
     oldBefore = before;
     oldAfter = after;
-    pause(150);
+    Wait.pause(150);
     before = ((GemFireCacheImpl)getCache()).cacheTimeMillis();
     region.get(key);
     after = ((GemFireCacheImpl)getCache()).cacheTimeMillis();
@@ -252,7 +253,7 @@ public class CacheStatisticsDUnitTest extends CacheTestCase {
     long oldOldAfter = oldAfter;
     oldBefore = before;
     oldAfter = after;
-    pause(150);
+    Wait.pause(150);
     before = ((GemFireCacheImpl)getCache()).cacheTimeMillis();
     region.create(key2, null);
     CacheStatistics eStats2 = region.getEntry(key2).getStatistics();
@@ -272,7 +273,7 @@ public class CacheStatisticsDUnitTest extends CacheTestCase {
     // times
     oldBefore = before;
     oldAfter = after;
-    pause(150);
+    Wait.pause(150);
     before = ((GemFireCacheImpl)getCache()).cacheTimeMillis();
     region.invalidate(key2);
     after = ((GemFireCacheImpl)getCache()).cacheTimeMillis();
@@ -283,7 +284,7 @@ public class CacheStatisticsDUnitTest extends CacheTestCase {
     assertInRange(oldBefore, oldAfter,  rootStats.getLastAccessedTime());
     assertInRange(oldBefore, oldAfter,  rootStats.getLastModifiedTime());
 
-    pause(150);
+    Wait.pause(150);
     before = ((GemFireCacheImpl)getCache()).cacheTimeMillis();
     region.destroy(key2);
     after = ((GemFireCacheImpl)getCache()).cacheTimeMillis();
@@ -397,7 +398,7 @@ public class CacheStatisticsDUnitTest extends CacheTestCase {
 
     // make sure at least 100ms have passed; otherwise, the update
     // may not actually bump the statistics
-    pause(100);
+    Wait.pause(100);
 
     vm1.invoke(new CacheSerializableRunnable("Update") {
         public void run2() throws CacheException {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheTestCase.java
index 7f32a46..c228e5c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheTestCase.java
@@ -54,9 +54,15 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
 import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * The abstract superclass of tests that require the creation of a
@@ -113,13 +119,13 @@ public abstract class CacheTestCase extends DistributedTestCase {
         }
         cache = c;
       } catch (CacheExistsException e) {
-        fail("the cache already exists", e);
+        Assert.fail("the cache already exists", e);
 
       } catch (RuntimeException ex) {
         throw ex;
 
       } catch (Exception ex) {
-        fail("Checked exception while initializing cache??", ex);
+        Assert.fail("Checked exception while initializing cache??", ex);
       } finally {
         System.clearProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE");
       }
@@ -137,13 +143,13 @@ public abstract class CacheTestCase extends DistributedTestCase {
         Cache c = CacheFactory.create(getLonerSystem()); 
         cache = c;
       } catch (CacheExistsException e) {
-        fail("the cache already exists", e);
+        Assert.fail("the cache already exists", e);
 
       } catch (RuntimeException ex) {
         throw ex;
 
       } catch (Exception ex) {
-        fail("Checked exception while initializing cache??", ex);
+        Assert.fail("Checked exception while initializing cache??", ex);
       } finally {
         System.clearProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE");
       }
@@ -163,13 +169,13 @@ public abstract class CacheTestCase extends DistributedTestCase {
         Cache c = CacheFactory.create(getLonerSystemWithEnforceUniqueHost()); 
         cache = c;
       } catch (CacheExistsException e) {
-        fail("the cache already exists", e);
+        Assert.fail("the cache already exists", e);
 
       } catch (RuntimeException ex) {
         throw ex;
 
       } catch (Exception ex) {
-        fail("Checked exception while initializing cache??", ex);
+        Assert.fail("Checked exception while initializing cache??", ex);
       } finally {
         System.clearProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE");
       }
@@ -200,7 +206,7 @@ public abstract class CacheTestCase extends DistributedTestCase {
         CacheXmlGenerator.generate(cache, pw);
         pw.close();
       } catch (IOException ex) {
-        fail("IOException during cache.xml generation to " + file, ex);
+        Assert.fail("IOException during cache.xml generation to " + file, ex);
       }
       cache = null;
       GemFireCacheImpl.testCacheXml = file;
@@ -226,7 +232,7 @@ public abstract class CacheTestCase extends DistributedTestCase {
         CacheXmlGenerator.generate(cache, pw, useSchema, xmlVersion);
         pw.close();
       } catch (IOException ex) {
-        fail("IOException during cache.xml generation to " + file, ex);
+        Assert.fail("IOException during cache.xml generation to " + file, ex);
       }
       cache = null;
       GemFireCacheImpl.testCacheXml = file;
@@ -258,7 +264,7 @@ public abstract class CacheTestCase extends DistributedTestCase {
       final GemFireCacheImpl gfCache = GemFireCacheImpl.getInstance();
       if (gfCache != null && !gfCache.isClosed()
           && gfCache.getCancelCriterion().cancelInProgress() != null) {
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
 
           public boolean done() {
             return gfCache.isClosed();
@@ -274,7 +280,7 @@ public abstract class CacheTestCase extends DistributedTestCase {
         createCache(client, cf);
       }
       if (client && cache != null) {
-        addExpectedException("java.net.ConnectException");
+        IgnoredException.addIgnoredException("java.net.ConnectException");
       }
       return cache;
     }
@@ -291,7 +297,7 @@ public abstract class CacheTestCase extends DistributedTestCase {
       final GemFireCacheImpl gfCache = GemFireCacheImpl.getInstance();
       if (gfCache != null && !gfCache.isClosed()
           && gfCache.getCancelCriterion().cancelInProgress() != null) {
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
 
           public boolean done() {
             return gfCache.isClosed();
@@ -308,7 +314,7 @@ public abstract class CacheTestCase extends DistributedTestCase {
         cache = (Cache)factory.create();
       }
       if (cache != null) {
-        addExpectedException("java.net.ConnectException");
+        IgnoredException.addIgnoredException("java.net.ConnectException");
       }
       return (ClientCache)cache;
     }
@@ -377,11 +383,13 @@ public abstract class CacheTestCase extends DistributedTestCase {
   /** Closed the cache in all VMs. */
   protected void closeAllCache() {
     closeCache();
-    invokeInEveryVM(CacheTestCase.class, "closeCache");
+    Invoke.invokeInEveryVM(CacheTestCase.class, "closeCache");
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDown() throws Exception {
+    preTearDownCacheTestCase();
+    
     // locally destroy all root regions and close the cache
     remoteTearDown();
     // Now invoke it in every VM
@@ -389,10 +397,17 @@ public abstract class CacheTestCase extends DistributedTestCase {
       Host host = Host.getHost(h);
       for (int v = 0; v < host.getVMCount(); v++) {
         VM vm = host.getVM(v);
-        vm.invoke(CacheTestCase.class, "remoteTearDown");
+        vm.invoke(()->remoteTearDown());
       }
     }
-    super.tearDown2(); 
+    
+    postTearDownCacheTestCase();
+  }
+  
+  protected void preTearDownCacheTestCase() throws Exception {
+  }
+
+  protected void postTearDownCacheTestCase() throws Exception {
   }
 
   /**
@@ -411,7 +426,7 @@ public abstract class CacheTestCase extends DistributedTestCase {
         try {
           cleanDiskDirs();
         } catch(Exception e) {
-          getLogWriter().error("Error cleaning disk dirs", e);
+          LogWriterUtils.getLogWriter().error("Error cleaning disk dirs", e);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml30DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml30DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml30DUnitTest.java
index 6df49b2..85fa8a4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml30DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml30DUnitTest.java
@@ -61,7 +61,8 @@ import com.gemstone.gemfire.internal.cache.xmlcache.ClientCacheCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.Declarable2;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionCreation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.util.test.TestUtil;
 
 /**
@@ -96,7 +97,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
     setXmlFile(nonExistent);
 //    System.out.println("testNonExistentFile - set: " + System.currentTimeMillis());
 
-    ExpectedException expectedException = CacheTestCase.addExpectedException(LocalizedStrings.
+    IgnoredException expectedException = IgnoredException.addIgnoredException(LocalizedStrings.
         GemFireCache_DECLARATIVE_CACHE_XML_FILERESOURCE_0_DOES_NOT_EXIST.toLocalizedString(nonExistent.getPath()));
     try {
       getCache();
@@ -119,7 +120,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
     dir.deleteOnExit();
     setXmlFile(dir);
 
-    ExpectedException expectedException = CacheTestCase.addExpectedException(LocalizedStrings.
+    IgnoredException expectedException = IgnoredException.addIgnoredException(LocalizedStrings.
         GemFireCache_DECLARATIVE_XML_FILE_0_IS_NOT_A_FILE.toLocalizedString(dir));
     try {
       getCache();
@@ -293,7 +294,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
   public void testMalformed() {
     setXmlFile(findFile("malformed.xml"));
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -311,7 +312,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
   public void testBadInt() {
     setXmlFile(findFile("badInt.xml"));
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -333,7 +334,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
   public void testBadFloat() {
     setXmlFile(findFile("badFloat.xml"));
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -352,7 +353,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
   public void testBadScope() {
     setXmlFile(findFile("badScope.xml"));
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -371,7 +372,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
   public void testBadKeyConstraintClass() {
     setXmlFile(findFile("badKeyConstraintClass.xml"));
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -390,7 +391,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
   public void testCallbackNotDeclarable() {
     setXmlFile(findFile("callbackNotDeclarable.xml"));
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -412,7 +413,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
   public void testCallbackWithException() {
     setXmlFile(findFile("callbackWithException.xml"));
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -434,7 +435,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
   public void testLoaderNotLoader() {
     setXmlFile(findFile("loaderNotLoader.xml"));
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -595,7 +596,7 @@ public class CacheXml30DUnitTest extends CacheXmlTestCase {
       assertTrue("No XML files in " + dirName, xmlFiles.length > 0);
       for (int i = 0; i < xmlFiles.length; i++) {
         File xmlFile = xmlFiles[i];
-        getLogWriter().info("Parsing " + xmlFile);
+        LogWriterUtils.getLogWriter().info("Parsing " + xmlFile);
 
         FileInputStream fis = new FileInputStream(xmlFile);
         CacheXmlParser.parse(fis);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml41DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml41DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml41DUnitTest.java
index bc1f7e4..7fffce5 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml41DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml41DUnitTest.java
@@ -42,7 +42,8 @@ import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 
 /**
  * Tests the declarative caching functionality introduced in GemFire 4.1.
@@ -162,7 +163,7 @@ public class CacheXml41DUnitTest extends CacheXml40DUnitTest
   {
     setXmlFile(findFile("unknownNamedAttributes.xml"));
 
-    ExpectedException expectedException = CacheTestCase.addExpectedException(LocalizedStrings.RegionAttributesCreation_CANNOT_REFERENCE_NONEXISTING_REGION_ATTRIBUTES_NAMED_0.toLocalizedString());
+    IgnoredException expectedException = IgnoredException.addIgnoredException(LocalizedStrings.RegionAttributesCreation_CANNOT_REFERENCE_NONEXISTING_REGION_ATTRIBUTES_NAMED_0.toLocalizedString());
     try {
       getCache();
       fail("Should have thrown an IllegalStateException");
@@ -198,7 +199,7 @@ public class CacheXml41DUnitTest extends CacheXml40DUnitTest
 
     setXmlFile(findFile("sameRootRegion.xml"));
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -209,7 +210,7 @@ public class CacheXml41DUnitTest extends CacheXml40DUnitTest
       assertTrue(cause instanceof SAXException);
       cause = ((SAXException)cause).getException();
       if (!(cause instanceof RegionExistsException)) {
-        fail("Expected a RegionExistsException, not a "
+        Assert.fail("Expected a RegionExistsException, not a "
             + cause.getClass().getName(), cause);
       }
     } finally {
@@ -242,7 +243,7 @@ public class CacheXml41DUnitTest extends CacheXml40DUnitTest
 
     setXmlFile(findFile("sameSubregion.xml"));
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       getCache();
       fail("Should have thrown a CacheXmlException");
@@ -253,7 +254,7 @@ public class CacheXml41DUnitTest extends CacheXml40DUnitTest
       assertTrue(cause instanceof SAXException);
       cause = ((SAXException)cause).getException();
       if (!(cause instanceof RegionExistsException)) {
-        fail("Expected a RegionExistsException, not a "
+        Assert.fail("Expected a RegionExistsException, not a "
             + cause.getClass().getName(), cause);
       }
     } finally {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml45DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml45DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml45DUnitTest.java
index 30284a4..19dc89f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml45DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml45DUnitTest.java
@@ -169,7 +169,7 @@ public class CacheXml45DUnitTest extends CacheXml41DUnitTest {
     } finally {
       this.xmlProps = null;
       try {
-        tearDown2();
+        preTearDown();
       } finally {
         DistributedRegion.ignoreReconnect = false;
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml57DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml57DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml57DUnitTest.java
index be78e26..f2eddd6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml57DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml57DUnitTest.java
@@ -53,6 +53,8 @@ import com.gemstone.gemfire.internal.cache.xmlcache.Declarable2;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionCreation;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -207,7 +209,7 @@ public class CacheXml57DUnitTest extends CacheXml55DUnitTest
     RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
     attrs.setPoolName("mypool");
     cache.createVMRegion("rootNORMAL", attrs);
-    addExpectedException("Connection refused: connect");
+    IgnoredException.addIgnoredException("Connection refused: connect");
     testXml(cache);
     Cache c = getCache();
     assertNotNull(c);
@@ -326,7 +328,7 @@ public class CacheXml57DUnitTest extends CacheXml55DUnitTest
     RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
     attrs.setPoolName("mypool");
     cache.createVMRegion("rootNORMAL", attrs);
-    ExpectedException expectedException = CacheTestCase.addExpectedException(LocalizedStrings.AbstractRegion_THE_CONNECTION_POOL_0_HAS_NOT_BEEN_CREATED.toLocalizedString("mypool"));
+    IgnoredException expectedException = IgnoredException.addIgnoredException(LocalizedStrings.AbstractRegion_THE_CONNECTION_POOL_0_HAS_NOT_BEEN_CREATED.toLocalizedString("mypool"));
     try {
       testXml(cache);
       fail("expected IllegalStateException");
@@ -344,7 +346,7 @@ public class CacheXml57DUnitTest extends CacheXml55DUnitTest
       // now make sure declarative cache can't create the same pool
       CacheCreation cache = new CacheCreation();
       cache.createPoolFactory().addLocator(ALIAS2, 12345).create("mypool");
-      ExpectedException expectedException = CacheTestCase.addExpectedException(LocalizedStrings.PoolManagerImpl_POOL_NAMED_0_ALREADY_EXISTS.toLocalizedString("mypool"));
+      IgnoredException expectedException = IgnoredException.addIgnoredException(LocalizedStrings.PoolManagerImpl_POOL_NAMED_0_ALREADY_EXISTS.toLocalizedString("mypool"));
       try {
         testXml(cache);
         fail("expected IllegalStateException");
@@ -358,10 +360,10 @@ public class CacheXml57DUnitTest extends CacheXml55DUnitTest
   }
 
   public void testDynamicRegionFactoryConnectionPool() throws CacheException, IOException {
-    addExpectedException("Connection reset");
-    addExpectedException("SocketTimeoutException");
-    addExpectedException("ServerConnectivityException");
-    addExpectedException("Socket Closed");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("SocketTimeoutException");
+    IgnoredException.addIgnoredException("ServerConnectivityException");
+    IgnoredException.addIgnoredException("Socket Closed");
     getSystem();
     VM vm0 = Host.getHost(0).getVM(0);
     final int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
@@ -378,7 +380,7 @@ public class CacheXml57DUnitTest extends CacheXml55DUnitTest
     });
     CacheCreation cache = new CacheCreation();
     cache.createPoolFactory()
-    .addServer(getServerHostName(vm0.getHost()), port)
+    .addServer(NetworkUtils.getServerHostName(vm0.getHost()), port)
       .setSubscriptionEnabled(true)
     .create("connectionPool");
     cache.setDynamicRegionFactoryConfig(new DynamicRegionFactory.Config(null, "connectionPool", false, false));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml60DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml60DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml60DUnitTest.java
index 3a20c07..39edee3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml60DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml60DUnitTest.java
@@ -37,7 +37,7 @@ import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.ResourceManagerCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.SerializerCreation;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 
 /**
  * Tests 6.0 cache.xml features.
@@ -192,7 +192,7 @@ public class CacheXml60DUnitTest extends CacheXml58DUnitTest
     rmc.setEvictionHeapPercentage(high);
     rmc.setCriticalHeapPercentage(low);
     cache.setResourceManagerCreation(rmc);
-    ExpectedException expectedException = CacheTestCase.addExpectedException(LocalizedStrings.MemoryMonitor_EVICTION_PERCENTAGE_LTE_CRITICAL_PERCENTAGE.toLocalizedString());
+    IgnoredException expectedException = IgnoredException.addIgnoredException(LocalizedStrings.MemoryMonitor_EVICTION_PERCENTAGE_LTE_CRITICAL_PERCENTAGE.toLocalizedString());
     try {
       testXml(cache);
       assertTrue(false);
@@ -310,7 +310,7 @@ public class CacheXml60DUnitTest extends CacheXml58DUnitTest
     closeCache();
     cc.setSerializerCreation(sc);
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       testXml(cc);
       fail("Instantiator should not have registered due to bad class.");
@@ -324,7 +324,7 @@ public class CacheXml60DUnitTest extends CacheXml58DUnitTest
     closeCache();
     cc.setSerializerCreation(sc);
 
-    ExpectedException expectedException1 = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException1 = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       testXml(cc);
       fail("Serializer should not have registered due to bad class.");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml65DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml65DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml65DUnitTest.java
index e4d62f0..40718e1 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml65DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml65DUnitTest.java
@@ -60,6 +60,8 @@ import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.ResourceManagerCreation;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 
 /**
  * Tests 6.5 cache.xml features.
@@ -247,7 +249,7 @@ public class CacheXml65DUnitTest extends CacheXml61DUnitTest {
     attrs.setPoolName("mypool");
     attrs.setDataPolicy(DataPolicy.EMPTY); // required for multiuser mode
     cache.createVMRegion("rootNORMAL", attrs);
-    addExpectedException("Connection refused: connect");
+    IgnoredException.addIgnoredException("Connection refused: connect");
     testXml(cache);
     Cache c = getCache();
     assertNotNull(c);
@@ -296,7 +298,7 @@ public class CacheXml65DUnitTest extends CacheXml61DUnitTest {
     } catch (IllegalStateException e) {
       assertTrue(e.getMessage().contains(LocalizedStrings.DiskStore_IS_USED_IN_NONPERSISTENT_REGION.toLocalizedString()));
     } catch (Exception ex) {
-      fail("Unexpected exception", ex);
+      Assert.fail("Unexpected exception", ex);
     }
 
     EvictionAttributes ea = EvictionAttributes.createLRUEntryAttributes(1000, EvictionAction.OVERFLOW_TO_DISK);
@@ -304,9 +306,9 @@ public class CacheXml65DUnitTest extends CacheXml61DUnitTest {
     try {
       root = (RegionCreation)cache.createRegion("root", attrs);
     } catch (IllegalStateException e) {
-      fail("With eviction of overflow to disk, region can specify disk store name", e);
+      Assert.fail("With eviction of overflow to disk, region can specify disk store name", e);
     } catch (Exception ex) {
-      fail("Unexpected exception", ex);
+      Assert.fail("Unexpected exception", ex);
     }
     
     File dir = new File("testDiskStoreValidation");
@@ -323,7 +325,7 @@ public class CacheXml65DUnitTest extends CacheXml61DUnitTest {
       assertTrue(e.getMessage().contains(LocalizedStrings.DiskStore_Deprecated_API_0_Cannot_Mix_With_DiskStore_1
           .toLocalizedString(new Object[] {"setDiskDirs or setDiskWriteAttributes", getUniqueName()})));
     } catch (Exception ex) { 
-      fail("Unexpected exception", ex);
+      Assert.fail("Unexpected exception", ex);
     }
 
     try {
@@ -335,7 +337,7 @@ public class CacheXml65DUnitTest extends CacheXml61DUnitTest {
       assertTrue(e.getMessage().contains(LocalizedStrings.DiskStore_Deprecated_API_0_Cannot_Mix_With_DiskStore_1
           .toLocalizedString(new Object[] {"setDiskDirs", getUniqueName()})));
     } catch (Exception ex) {
-      fail("Unexpected exception", ex);
+      Assert.fail("Unexpected exception", ex);
     }
 
     testXml(cache);
@@ -369,7 +371,7 @@ public class CacheXml65DUnitTest extends CacheXml61DUnitTest {
     try {
       root = (RegionCreation)cache.createRegion("root", ra);
     } catch (Exception ex) {
-      fail("Unexpected exception", ex);
+      Assert.fail("Unexpected exception", ex);
     }
     
     factory = new AttributesFactory();
@@ -383,7 +385,7 @@ public class CacheXml65DUnitTest extends CacheXml61DUnitTest {
     try {
       root2 = (RegionCreation)cache.createRegion("root2", ra);
     } catch (Exception ex) {
-      fail("Unexpected exception", ex);
+      Assert.fail("Unexpected exception", ex);
     }
 
     factory = new AttributesFactory();
@@ -397,7 +399,7 @@ public class CacheXml65DUnitTest extends CacheXml61DUnitTest {
     try {
       root3 = (RegionCreation)cache.createRegion("root3", ra);
     } catch (Exception ex) {
-      fail("Unexpected exception", ex);
+      Assert.fail("Unexpected exception", ex);
     }
 
     testXml(cache);
@@ -418,7 +420,7 @@ public class CacheXml65DUnitTest extends CacheXml61DUnitTest {
     try {
       root = (RegionCreation)cache.createRegion("root", ra);
     } catch (Exception ex) {
-      fail("Unexpected exception", ex);
+      Assert.fail("Unexpected exception", ex);
     }
 
     testXml(cache);
@@ -481,7 +483,7 @@ public class CacheXml65DUnitTest extends CacheXml61DUnitTest {
           LocalizedStrings.DiskStore_Deprecated_API_0_Cannot_Mix_With_DiskStore_1
           .toLocalizedString(new Object[] {"setOverflowDirectory", getUniqueName()})));
     } catch (Exception ex) {
-      fail("Unexpected exception", ex);
+      Assert.fail("Unexpected exception", ex);
     }
 
     cache.getLogger().config(

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml66DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml66DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml66DUnitTest.java
index ce07ecb..63a9202 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml66DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml66DUnitTest.java
@@ -41,6 +41,8 @@ import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
 import com.gemstone.gemfire.internal.cache.xmlcache.ClientCacheCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 
 /**
  * Tests 7.0 cache.xml feature : Fixed Partitioning.
@@ -157,7 +159,7 @@ public class CacheXml66DUnitTest extends CacheXml65DUnitTest{
     catch (Exception illegal) {
       if (!((illegal instanceof IllegalStateException) && (illegal.getMessage()
           .contains("can not be specified in PartitionAttributesFactory")))) {
-        fail("Expected IllegalStateException ", illegal);
+        Assert.fail("Expected IllegalStateException ", illegal);
       }
 
       RegionAttributesCreation attrs = new RegionAttributesCreation();
@@ -353,7 +355,7 @@ public class CacheXml66DUnitTest extends CacheXml65DUnitTest{
     CacheTransactionManagerCreation txMgrCreation = new CacheTransactionManagerCreation();
     txMgrCreation.setWriter(new TestTransactionWriter());
     cc.addCacheTransactionManagerCreation(txMgrCreation);
-    ExpectedException expectedException = addExpectedException(LocalizedStrings.TXManager_NO_WRITER_ON_CLIENT.toLocalizedString());
+    IgnoredException expectedException = IgnoredException.addIgnoredException(LocalizedStrings.TXManager_NO_WRITER_ON_CLIENT.toLocalizedString());
     try {
       testXml(cc);
       fail("expected exception not thrown");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml80DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml80DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml80DUnitTest.java
index 27d7bf2..98134ce 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml80DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml80DUnitTest.java
@@ -47,6 +47,7 @@ import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
 import com.gemstone.gemfire.internal.cache.xmlcache.DiskStoreAttributesCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
+import com.gemstone.gemfire.test.dunit.Assert;
 
 /**
  * @author dsmith
@@ -121,7 +122,7 @@ public class CacheXml80DUnitTest extends CacheXml70DUnitTest {
       pw.close();
     }
     catch (IOException ex) {
-      fail("IOException during cache.xml generation to " + file, ex);
+      Assert.fail("IOException during cache.xml generation to " + file, ex);
     }
     
     // Get index info before closing cache.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml81DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml81DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml81DUnitTest.java
index c94505f..0a41b10 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml81DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml81DUnitTest.java
@@ -31,7 +31,7 @@ import com.gemstone.gemfire.internal.cache.xmlcache.CacheCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.XmlParser;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 
 /**
  * Tests 8.1 schema based configuration. From this point all config test cases
@@ -140,7 +140,7 @@ public class CacheXml81DUnitTest extends CacheXml80DUnitTest {
     assertEquals(0, extension.onCreateCounter.get());
     assertEquals(0, extension.getXmlGeneratorCounter.get());
 
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       testXml(cache);
       fail("Excepted CacheXmlException");



[25/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningTestBase.java
index f48ab61..95ea49f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningTestBase.java
@@ -73,8 +73,10 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
 import com.gemstone.gemfire.internal.cache.tier.sockets.Message;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This is the base class to do operations
@@ -154,7 +156,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
       assertNotNull(cache);
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      com.gemstone.gemfire.test.dunit.Assert.fail("Failed while creating the cache", e);
     }
   }
 
@@ -195,7 +197,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
     try {
       RebalanceResults result = operation.getResults();
     } catch (InterruptedException e) {
-      fail("Not expecting exception", e);
+      com.gemstone.gemfire.test.dunit.Assert.fail("Not expecting exception", e);
     }
     
   }
@@ -342,7 +344,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
           assertEquals(customer, region_Cust.get(custid));
         }
         catch (Exception e) {
-          fail(
+          com.gemstone.gemfire.test.dunit.Assert.fail(
               "getForColocation : failed while doing get operation in CustomerPartitionedRegion ",
               e);
         }
@@ -457,11 +459,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
         partitionedregion.put(custid, customer);
       }
       catch (Exception e) {
-        fail(
+        com.gemstone.gemfire.test.dunit.Assert.fail(
             "putCustomerPartitionedRegion : failed while doing put operation in CustomerPartitionedRegion ",
             e);
       }
-      getLogWriter().info("Customer :- { " + custid + " : " + customer + " }");
+      LogWriterUtils.getLogWriter().info("Customer :- { " + custid + " : " + customer + " }");
     }
   }
   
@@ -481,11 +483,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
           partitionedregion.put(orderId, order);
         }
         catch (Exception e) {
-          fail(
+          com.gemstone.gemfire.test.dunit.Assert.fail(
               "putOrderPartitionedRegion : failed while doing put operation in OrderPartitionedRegion ",
               e);
         }
-        getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
+        LogWriterUtils.getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
       }
     }
   }
@@ -508,11 +510,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
             partitionedregion.put(shipmentId, shipment);
           }
           catch (Exception e) {
-            fail(
+            com.gemstone.gemfire.test.dunit.Assert.fail(
                 "putShipmentPartitionedRegion : failed while doing put operation in ShipmentPartitionedRegion ",
                 e);
           }
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Shipment :- { " + shipmentId + " : " + shipment + " }");
         }
       }
@@ -532,11 +534,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
         partitionedregion.put(custid, customer);
       }
       catch (Exception e) {
-        fail(
+        com.gemstone.gemfire.test.dunit.Assert.fail(
             "putCustomerPartitionedRegion : failed while doing put operation in CustomerPartitionedRegion ",
             e);
       }
-      getLogWriter().info("Customer :- { " + custid + " : " + customer + " }");
+      LogWriterUtils.getLogWriter().info("Customer :- { " + custid + " : " + customer + " }");
     }
   }
   
@@ -556,11 +558,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
           partitionedregion.put(orderId, order);
         }
         catch (Exception e) {
-          fail(
+          com.gemstone.gemfire.test.dunit.Assert.fail(
               "putOrderPartitionedRegion : failed while doing put operation in OrderPartitionedRegion ",
               e);
         }
-        getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
+        LogWriterUtils.getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
       }
     }
   }
@@ -583,11 +585,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
             partitionedregion.put(shipmentId, shipment);
           }
           catch (Exception e) {
-            fail(
+            com.gemstone.gemfire.test.dunit.Assert.fail(
                 "putShipmentPartitionedRegion : failed while doing put operation in ShipmentPartitionedRegion ",
                 e);
           }
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Shipment :- { " + shipmentId + " : " + shipment + " }");
         }
       }
@@ -608,11 +610,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
           partitionedregion.put(custid, customer);
         }
         catch (Exception e) {
-          fail(
+          com.gemstone.gemfire.test.dunit.Assert.fail(
               "putCustomerPartitionedRegion : failed while doing put operation in CustomerPartitionedRegion ",
               e);
         }
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info("Customer :- { " + custid + " : " + customer + " }");
       }
     }
@@ -635,11 +637,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
             partitionedregion.put(orderId, order);
           }
           catch (Exception e) {
-            fail(
+            com.gemstone.gemfire.test.dunit.Assert.fail(
                 "putOrderPartitionedRegion : failed while doing put operation in OrderPartitionedRegion ",
                 e);
           }
-          getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
+          LogWriterUtils.getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
         }
       }
     }
@@ -664,11 +666,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
               partitionedregion.put(shipmentId, shipment);
             }
             catch (Exception e) {
-              fail(
+              com.gemstone.gemfire.test.dunit.Assert.fail(
                   "putShipmentPartitionedRegion : failed while doing put operation in ShipmentPartitionedRegion ",
                   e);
             }
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "Shipment :- { " + shipmentId + " : " + shipment + " }");
           }
         }
@@ -690,11 +692,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
           partitionedregion.put(custid, customer);
         }
         catch (Exception e) {
-          fail(
+          com.gemstone.gemfire.test.dunit.Assert.fail(
               "putCustomerPartitionedRegion : failed while doing put operation in CustomerPartitionedRegion ",
               e);
         }
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info("Customer :- { " + custid + " : " + customer + " }");
       }
     }
@@ -717,11 +719,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
             partitionedregion.put(orderId, order);
           }
           catch (Exception e) {
-            fail(
+            com.gemstone.gemfire.test.dunit.Assert.fail(
                 "putOrderPartitionedRegion : failed while doing put operation in OrderPartitionedRegion ",
                 e);
           }
-          getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
+          LogWriterUtils.getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
         }
       }
     }
@@ -746,11 +748,11 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
               partitionedregion.put(shipmentId, shipment);
             }
             catch (Exception e) {
-              fail(
+              com.gemstone.gemfire.test.dunit.Assert.fail(
                   "putShipmentPartitionedRegion : failed while doing put operation in ShipmentPartitionedRegion ",
                   e);
             }
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "Shipment :- { " + shipmentId + " : " + shipment + " }");
           }
         }
@@ -800,7 +802,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
       }
     }
     catch (ParseException e) {
-      FixedPartitioningTestBase.fail("Exception Occured while parseing date", e);
+      com.gemstone.gemfire.test.dunit.Assert.fail("Exception Occured while parseing date", e);
     }
     return null;
   }
@@ -822,7 +824,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
           .getRegion(Region.SEPARATOR + shipmentPartitionedRegionName);
     }
     catch (Exception e) {
-      fail(
+      com.gemstone.gemfire.test.dunit.Assert.fail(
           "validateAfterPutPartitionedRegion : failed while getting the region",
           e);
     }
@@ -851,7 +853,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
           // assertNotNull(orderPartitionedregion.get(orderId));
 
           if (custId.equals(orderId.getCustId())) {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 orderId + "belongs to node " + idmForCustomer + " "
                     + idmForOrder);
             assertEquals(idmForCustomer, idmForOrder);
@@ -863,7 +865,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
             ShipmentId shipmentId = (ShipmentId)shipmentIterator.next();
             // assertNotNull(shipmentPartitionedregion.get(shipmentId));
             if (orderId.equals(shipmentId.getOrderId())) {
-              getLogWriter().info(
+              LogWriterUtils.getLogWriter().info(
                   shipmentId + "belongs to node " + idmForOrder + " "
                       + idmForShipment);
             }
@@ -1087,15 +1089,15 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
       Integer primaryBuckets) {
     HashMap localBucket2RegionMap = (HashMap)region_FPR.getDataStore()
         .getSizeLocally();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of the " + region_FPR + " in this VM :- "
             + localBucket2RegionMap.size() + "List of buckets : "
             + localBucket2RegionMap.keySet());
     assertEquals(numBuckets.intValue(), localBucket2RegionMap.size());
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of primary buckets the " + region_FPR + " in this VM :- "
             + region_FPR.getDataStore().getNumberOfPrimaryBucketsManaged());
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Lit of Primaries in this VM :- "
             + region_FPR.getDataStore().getAllLocalPrimaryBucketIds());
     
@@ -1107,15 +1109,15 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
       Integer numBuckets, Integer primaryBuckets) {
     HashMap localBucket2RegionMap = (HashMap)region_FPR.getDataStore()
         .getSizeLocally();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of the " + region_FPR + " in this VM :- "
             + localBucket2RegionMap.size() + "List of buckets : "
             + localBucket2RegionMap.keySet());
     assertEquals(numBuckets.intValue(), localBucket2RegionMap.size());
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of primary buckets the " + region_FPR + " in this VM :- "
             + region_FPR.getDataStore().getNumberOfPrimaryBucketsManaged());
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Lit of Primaries in this VM :- "
             + region_FPR.getDataStore().getAllLocalPrimaryBucketIds());
 
@@ -1131,7 +1133,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
           .getRegion(Region.SEPARATOR + customerPartitionedRegionName);
     }
     catch (Exception e) {
-      fail(
+      com.gemstone.gemfire.test.dunit.Assert.fail(
           "validateAfterPutPartitionedRegion : failed while getting the region",
           e);
     }
@@ -1159,7 +1161,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
           .getRegion(Region.SEPARATOR + shipmentPartitionedRegionName);
     }
     catch (Exception e) {
-      fail(
+      com.gemstone.gemfire.test.dunit.Assert.fail(
           "validateAfterPutPartitionedRegion : failed while getting the region",
           e);
     }
@@ -1210,7 +1212,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
           .getRegion(Region.SEPARATOR + shipmentPartitionedRegionName);
     }
     catch (Exception e) {
-      fail(
+      com.gemstone.gemfire.test.dunit.Assert.fail(
           "validateAfterPutPartitionedRegion : failed while getting the region",
           e);
     }
@@ -1304,8 +1306,8 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
                 return excuse;
               }
             };
-            DistributedTestCase.waitForCriterion(wc, 20000, 500, false);
-            getLogWriter().info("end of beforeCalculatingStartingBucketId");
+            Wait.waitForCriterion(wc, 20000, 500, false);
+            LogWriterUtils.getLogWriter().info("end of beforeCalculatingStartingBucketId");
           }
         });
   }
@@ -1315,7 +1317,8 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
     PartitionedRegion.BEFORE_CALCULATE_STARTING_BUCKET_FLAG = false;
   }
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     try {
       closeCache();
       member1.invoke(FixedPartitioningTestBase.class, "closeCache");
@@ -1334,7 +1337,6 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
           vm.invoke(FixedPartitioningTestBase.class, "remoteTearDown");
         }
       }
-      super.tearDown2();
     }
   }
 
@@ -1362,7 +1364,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
             throw e;
           }
           catch (Throwable t) {
-            getLogWriter().error(t);
+            LogWriterUtils.getLogWriter().error(t);
           }
         }
       }
@@ -1376,7 +1378,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
         throw e;
       }
       catch (Throwable t) {
-        getLogWriter().error("Error in closing the cache ", t);
+        LogWriterUtils.getLogWriter().error("Error in closing the cache ", t);
         
       }
     }
@@ -1384,7 +1386,7 @@ public class FixedPartitioningTestBase extends DistributedTestCase {
     try {
       cleanDiskDirs();
     } catch(IOException e) {
-      getLogWriter().error("Error cleaning disk dirs", e);
+      LogWriterUtils.getLogWriter().error("Error cleaning disk dirs", e);
     }
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningWithColocationAndPersistenceDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningWithColocationAndPersistenceDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningWithColocationAndPersistenceDUnitTest.java
index 8f2dceb..8a06f7e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningWithColocationAndPersistenceDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningWithColocationAndPersistenceDUnitTest.java
@@ -20,8 +20,10 @@ import java.util.ArrayList;
 import java.util.List;
 
 import com.gemstone.gemfire.cache.FixedPartitionAttributes;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
     FixedPartitioningTestBase {
@@ -42,10 +44,6 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   /**
    * This tests validates that in colocation of FPRs child region cannot specify
    * FixedPartitionAttributes
@@ -75,7 +73,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
       if (!((illegal.getCause() instanceof IllegalStateException) && (illegal
           .getCause().getMessage()
           .contains("not be specified in PartitionAttributesFactory if colocated-with is specified")))) {
-        fail("Expected IllegalStateException ", illegal);
+        Assert.fail("Expected IllegalStateException ", illegal);
       }
     }
   }
@@ -232,7 +230,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
 
     }
     catch (Exception e) {
-      fail("Unexpected Exception ", e);
+      Assert.fail("Unexpected Exception ", e);
     }
   }
 
@@ -373,7 +371,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
 
     }
     catch (Exception e) {
-      fail("Unexpected Exception ", e);
+      Assert.fail("Unexpected Exception ", e);
     }
   }
 
@@ -491,7 +489,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
               "Customer", "Order", "Shipment" });
 
       member3.invoke(FixedPartitioningTestBase.class, "closeCache");
-      pause(4000);
+      Wait.pause(4000);
 
       member1.invoke(FixedPartitioningTestBase.class,
           "checkPrimaryBucketsForColocationAfterCacheClosed", new Object[] {
@@ -521,7 +519,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
               null, 2, 50, 20, new CustomerFixedPartitionResolver(), "Order",
               false });
 
-      pause(4000);
+      Wait.pause(4000);
 
       member1.invoke(FixedPartitioningTestBase.class,
           "validateAfterPutPartitionedRegion", new Object[] { "Customer",
@@ -552,7 +550,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
               null, 2, 50, 20, new CustomerFixedPartitionResolver(), "Order",
               false });
 
-      pause(4000);
+      Wait.pause(4000);
 
       member1.invoke(FixedPartitioningTestBase.class,
           "validateAfterPutPartitionedRegion", new Object[] { "Customer",
@@ -573,7 +571,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
 
     }
     catch (Exception e) {
-      fail("Unexpected Exception ", e);
+      Assert.fail("Unexpected Exception ", e);
     }
   }
 
@@ -686,7 +684,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
               "Customer", "Order", "Shipment" });
 
       member3.invoke(FixedPartitioningTestBase.class, "closeCache");
-      pause(4000);
+      Wait.pause(4000);
 
       member1.invoke(FixedPartitioningTestBase.class,
           "checkPrimaryBucketsForColocationAfterCacheClosed", new Object[] {
@@ -716,7 +714,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
               null, 2, 50, 20, new CustomerFixedPartitionResolver(), "Order",
               false });
 
-      pause(4000);
+      Wait.pause(4000);
 
       member1.invoke(FixedPartitioningTestBase.class,
           "validateAfterPutPartitionedRegion", new Object[] { "Customer",
@@ -747,7 +745,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
               null, 2, 50, 20, new CustomerFixedPartitionResolver(), "Order",
               false });
 
-      pause(4000);
+      Wait.pause(4000);
 
       member1.invoke(FixedPartitioningTestBase.class,
           "validateAfterPutPartitionedRegion", new Object[] { "Customer",
@@ -768,7 +766,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
 
     }
     catch (Exception e) {
-      fail("Unexpected Exception ", e);
+      Assert.fail("Unexpected Exception ", e);
     }
   }
 
@@ -801,7 +799,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
 
     member2.invoke(FixedPartitioningTestBase.class, "closeCache");
 
-    pause(1000);
+    Wait.pause(1000);
 
     member2.invoke(FixedPartitioningTestBase.class, "createCacheOnMember");
     fpa1 = FixedPartitionAttributes.createFixedPartition(Quarter2, true, 3);
@@ -909,14 +907,14 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
         "createRegionWithPartitionAttributes", new Object[] { "Quarter",
             fpaList, 1, 40, 12, new QuarterPartitionResolver(), null, true });
 
-    pause(4000);
+    Wait.pause(4000);
     member2.invoke(FixedPartitioningTestBase.class, "getForQuarter",
         new Object[] { "Quarter", Quarter1 });
     member2.invoke(FixedPartitioningTestBase.class, "getForQuarter",
         new Object[] { "Quarter", Quarter2 });
     member2.invoke(FixedPartitioningTestBase.class,
         "checkPrimaryDataPersistence", new Object[] { Quarter2 });
-    pause(2000);
+    Wait.pause(2000);
     member2.invoke(FixedPartitioningTestBase.class,
         "checkPrimaryBucketsForQuarter", new Object[] { 6, 6 });
 
@@ -930,7 +928,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
         "createRegionWithPartitionAttributes", new Object[] { "Quarter",
             fpaList, 1, 40, 12, new QuarterPartitionResolver(), null, true });
 
-    pause(4000);
+    Wait.pause(4000);
 
     member1.invoke(FixedPartitioningTestBase.class,
         "checkPrimaryDataPersistence", new Object[] { Quarter1 });
@@ -1064,7 +1062,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
               null, 1, 50, 20, new CustomerFixedPartitionResolver(), "Order",
               false });
 
-      pause(4000);
+      Wait.pause(4000);
       member1.invoke(FixedPartitioningTestBase.class,
           "checkPrimaryBucketsForColocation", new Object[] { 10, 5, "Customer",
               "Order", "Shipment" });
@@ -1080,7 +1078,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
 
     }
     catch (Exception e) {
-      fail("Unexpected Exception ", e);
+      Assert.fail("Unexpected Exception ", e);
     }
   }
 
@@ -1178,13 +1176,13 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
               fpaList, 1, 50, 20, new CustomerFixedPartitionResolver(), null,
               true });
 
-      pause(4000);
+      Wait.pause(4000);
       member2.invoke(FixedPartitioningTestBase.class, "getForColocation",
           new Object[] { "Customer", "Order", "Shipment" });
 
     }
     catch (Exception e) {
-      fail("Unexpected Exception ", e);
+      Assert.fail("Unexpected Exception ", e);
     }
   }
 
@@ -1247,7 +1245,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
         "createRegionWithPartitionAttributes", new Object[] { "Quarter",
             fpaList, 0, 40, 12, new QuarterPartitionResolver(), null, true });
 
-    pause(4000);
+    Wait.pause(4000);
     member2.invoke(FixedPartitioningTestBase.class,
         "checkPrimarySecondaryData", new Object[] { Quarter3, false });
 
@@ -1374,7 +1372,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
         "createRegionWithPartitionAttributes", new Object[] { "Quarter",
             fpaList, 1, 40, 12, new QuarterPartitionResolver(), null, true });
 
-    pause(4000);
+    Wait.pause(4000);
     member4.invoke(FixedPartitioningTestBase.class,
         "checkPrimarySecondaryData", new Object[] { Quarter4, false });
 
@@ -1424,7 +1422,7 @@ public class FixedPartitioningWithColocationAndPersistenceDUnitTest extends
     member1.invoke(FixedPartitioningTestBase.class, "closeCache");
     member2.invoke(FixedPartitioningTestBase.class, "closeCache");
 
-    pause(1000);
+    Wait.pause(1000);
 
     member2.invoke(FixedPartitioningTestBase.class, "createCacheOnMember");
     member2.invoke(FixedPartitioningTestBase.class,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentRVVRecoveryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentRVVRecoveryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentRVVRecoveryDUnitTest.java
index ed0b6dc..ecaa1ba 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentRVVRecoveryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentRVVRecoveryDUnitTest.java
@@ -66,10 +66,15 @@ import com.gemstone.gemfire.internal.cache.TombstoneService;
 import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class PersistentRVVRecoveryDUnitTest extends PersistentReplicatedTestBase {
   
@@ -80,9 +85,8 @@ public class PersistentRVVRecoveryDUnitTest extends PersistentReplicatedTestBase
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    invokeInEveryVM(PersistentRecoveryOrderDUnitTest.class, "resetAckWaitThreshold");
+  protected final void postTearDownPersistentReplicatedTestBase() throws Exception {
+    Invoke.invokeInEveryVM(PersistentRecoveryOrderDUnitTest.class, "resetAckWaitThreshold");
   }
   
   public void testNoConcurrencyChecks () {
@@ -215,7 +219,7 @@ public class PersistentRVVRecoveryDUnitTest extends PersistentReplicatedTestBase
       }
       
     });
-    ExpectedException ex = addExpectedException("DiskAccessException");
+    IgnoredException ex = IgnoredException.addIgnoredException("DiskAccessException");
     try {
       //Force expiration, with our test hook that should close the cache
       tombstoneService = cache.getTombstoneService();
@@ -285,7 +289,7 @@ public class PersistentRVVRecoveryDUnitTest extends PersistentReplicatedTestBase
 
           // We should wait for timeout time so that tomstones are expired
           // right away when they are gIId based on their original timestamp.
-          pause((int) TEST_REPLICATED_TOMBSTONE_TIMEOUT);
+          Wait.pause((int) TEST_REPLICATED_TOMBSTONE_TIMEOUT);
 
           TombstoneService.REPLICATED_TOMBSTONE_TIMEOUT = TEST_REPLICATED_TOMBSTONE_TIMEOUT;
           TombstoneService.EXPIRED_TOMBSTONE_LIMIT = entryCount;
@@ -295,7 +299,7 @@ public class PersistentRVVRecoveryDUnitTest extends PersistentReplicatedTestBase
           assertEquals(entryCount, getTombstoneCount(region));
 
           getCache().getLogger().fine("Waiting for maximumSleepTime ms");
-          pause(10000); // maximumSleepTime+500 in TombstoneSweeper GC thread
+          Wait.pause(10000); // maximumSleepTime+500 in TombstoneSweeper GC thread
 
           // Tombstones should have been expired and garbage collected by now by
           // TombstoneService.
@@ -390,7 +394,7 @@ public class PersistentRVVRecoveryDUnitTest extends PersistentReplicatedTestBase
         Cache cache = getCache();
         Region region = cache.getRegion("prRegion");
         while (!region.get("testKey").equals("testValue2")) {
-          pause(100);
+          Wait.pause(100);
         }
         region.destroy("testKey");
       }
@@ -411,7 +415,7 @@ public class PersistentRVVRecoveryDUnitTest extends PersistentReplicatedTestBase
         
         Region.Entry entry = ((PartitionedRegion)region).getEntry("testKey", true /*Entry is destroyed*/);
         RegionEntry re = ((EntrySnapshot)entry).getRegionEntry();
-        getLogWriter().fine("RegionEntry for testKey: " + re.getKey() + " " + re.getValueInVM((LocalRegion) region));
+        LogWriterUtils.getLogWriter().fine("RegionEntry for testKey: " + re.getKey() + " " + re.getValueInVM((LocalRegion) region));
         assertTrue(re.getValueInVM((LocalRegion) region) instanceof Tombstone);
         
         VersionTag tag = re.getVersionStamp().asVersionTag();
@@ -751,7 +755,7 @@ public class PersistentRVVRecoveryDUnitTest extends PersistentReplicatedTestBase
     //and then do the wait in the flusher thread.
     
     //Setup the callbacks to wait for krf creation and throw an exception
-    ExpectedException ex = addExpectedException("DiskAccessException");
+    IgnoredException ex = IgnoredException.addIgnoredException("DiskAccessException");
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER=true;
     try {
       final CountDownLatch krfCreated = new CountDownLatch(1);
@@ -808,7 +812,7 @@ public class PersistentRVVRecoveryDUnitTest extends PersistentReplicatedTestBase
 
       //Wait for the region to be destroyed. The region won't be destroyed
       //until the async flusher thread ends up switching oplogs
-      waitForCriterion(new WaitCriterion() {
+      Wait.waitForCriterion(new WaitCriterion() {
 
         @Override
         public boolean done() {
@@ -835,7 +839,7 @@ public class PersistentRVVRecoveryDUnitTest extends PersistentReplicatedTestBase
     for(int i = 0; i < 3; i++) {
       NonTXEntry entry = (NonTXEntry) recoveredRegion.getEntry("key" + i);
       tagsFromKrf[i] = entry.getRegionEntry().getVersionStamp().asVersionTag();
-      getLogWriter().info("krfTag[" + i + "]="+ tagsFromKrf[i] + ",value=" + entry.getValue());
+      LogWriterUtils.getLogWriter().info("krfTag[" + i + "]="+ tagsFromKrf[i] + ",value=" + entry.getValue());
     }
     
     closeCache();
@@ -850,7 +854,7 @@ public class PersistentRVVRecoveryDUnitTest extends PersistentReplicatedTestBase
       for(int i = 0; i < 3; i++) {
         NonTXEntry entry = (NonTXEntry) recoveredRegion.getEntry("key" + i);
         tagsFromCrf[i] = entry.getRegionEntry().getVersionStamp().asVersionTag();
-        getLogWriter().info("crfTag[" + i + "]="+ tagsFromCrf[i] + ",value=" + entry.getValue());
+        LogWriterUtils.getLogWriter().info("crfTag[" + i + "]="+ tagsFromCrf[i] + ",value=" + entry.getValue());
       }
       
       //Make sure the version tags from the krf and the crf match.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentRecoveryOrderDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentRecoveryOrderDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentRecoveryOrderDUnitTest.java
index cd6118a..f20762c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentRecoveryOrderDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentRecoveryOrderDUnitTest.java
@@ -70,11 +70,16 @@ import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.TXManagerImpl;
 import com.gemstone.gemfire.internal.cache.versions.RegionVersionHolder;
 import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This is a test of how persistent distributed
@@ -106,31 +111,31 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
     
     putAnEntry(vm0);
     
-    getLogWriter().info("closing region in vm0");
+    LogWriterUtils.getLogWriter().info("closing region in vm0");
     closeRegion(vm0);
     
     updateTheEntry(vm1);
     
-    getLogWriter().info("closing region in vm1");
+    LogWriterUtils.getLogWriter().info("closing region in vm1");
     closeRegion(vm1);
     
     
     //This ought to wait for VM1 to come back
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     AsyncInvocation future = createPersistentRegionAsync(vm0);
     
     waitForBlockedInitialization(vm0);
     
     assertTrue(future.isAlive());
     
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
     
     future.join(MAX_WAIT);
@@ -159,9 +164,9 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
     
     putAnEntry(vm0);
@@ -176,17 +181,17 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
       }
     });
     
-    getLogWriter().info("closing region in vm0");
+    LogWriterUtils.getLogWriter().info("closing region in vm0");
     closeRegion(vm0);
     
     updateTheEntry(vm1);
     
-    getLogWriter().info("closing region in vm1");
+    LogWriterUtils.getLogWriter().info("closing region in vm1");
     closeCache(vm1);
     
     
     //This ought to wait for VM1 to come back
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     AsyncInvocation future = createPersistentRegionAsync(vm0);
     
     waitForBlockedInitialization(vm0);
@@ -204,7 +209,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
           adminDS = AdminDistributedSystemFactory.getDistributedSystem(config);
           adminDS.connect();
           Set<PersistentID> missingIds = adminDS.getMissingPersistentMembers();
-          getLogWriter().info("waiting members=" + missingIds);
+          LogWriterUtils.getLogWriter().info("waiting members=" + missingIds);
           assertEquals(1, missingIds.size());
           PersistentID missingMember = missingIds.iterator().next();
           adminDS.revokePersistentMember(
@@ -247,8 +252,8 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     
     //Now, we should not be able to create a region
     //in vm1, because the this member was revoked
-    getLogWriter().info("Creating region in VM1");
-    ExpectedException e = addExpectedException(RevokedPersistentDataException.class.getSimpleName(), vm1);
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
+    IgnoredException e = IgnoredException.addIgnoredException(RevokedPersistentDataException.class.getSimpleName(), vm1);
     try {
       createPersistentRegion(vm1);
       fail("We should have received a split distributed system exception");
@@ -294,9 +299,9 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
     
     putAnEntry(vm0);
@@ -311,12 +316,12 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
       }
     });
     
-    getLogWriter().info("closing region in vm0");
+    LogWriterUtils.getLogWriter().info("closing region in vm0");
     closeRegion(vm0);
     
     updateTheEntry(vm1);
     
-    getLogWriter().info("closing region in vm1");
+    LogWriterUtils.getLogWriter().info("closing region in vm1");
     closeRegion(vm1);
     
     final File dirToRevoke = getDiskDirForVM(vm1);
@@ -332,7 +337,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
         adminDS.connect();
         adminDS.revokePersistentMember(InetAddress.getLocalHost(), dirToRevoke.getCanonicalPath());
         } catch(Exception e) {
-          fail("Unexpected exception", e);
+          Assert.fail("Unexpected exception", e);
         } finally {
           if(adminDS != null) {
             adminDS.disconnect();
@@ -342,7 +347,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     });
     
     //This shouldn't wait, because we revoked the member
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
     
     checkForRecoveryStat(vm0, true);
@@ -361,8 +366,8 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     
     //Now, we should not be able to create a region
     //in vm1, because the this member was revoked
-    getLogWriter().info("Creating region in VM1");
-    ExpectedException e = addExpectedException(RevokedPersistentDataException.class.getSimpleName(), vm1);
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
+    IgnoredException e = IgnoredException.addIgnoredException(RevokedPersistentDataException.class.getSimpleName(), vm1);
     try {
       createPersistentRegion(vm1);
       fail("We should have received a split distributed system exception");
@@ -387,9 +392,9 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
     createPersistentRegion(vm2);
     
@@ -405,28 +410,28 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
       }
     });
     
-    getLogWriter().info("closing region in vm0");
+    LogWriterUtils.getLogWriter().info("closing region in vm0");
     closeRegion(vm0);
     
     updateTheEntry(vm1);
     
-    getLogWriter().info("closing region in vm1");
+    LogWriterUtils.getLogWriter().info("closing region in vm1");
     closeRegion(vm1);
     
     updateTheEntry(vm2, "D");
     
-    getLogWriter().info("closing region in vm2");
+    LogWriterUtils.getLogWriter().info("closing region in vm2");
     closeRegion(vm2);
     
     
     //These ought to wait for VM2 to come back
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     AsyncInvocation future0 = createPersistentRegionAsync(vm0);
     
     waitForBlockedInitialization(vm0);
     assertTrue(future0.isAlive());
     
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     final AsyncInvocation future1 = createPersistentRegionAsync(vm1);
     waitForBlockedInitialization(vm1);
     assertTrue(future1.isAlive());
@@ -442,7 +447,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
           adminDS = AdminDistributedSystemFactory.getDistributedSystem(config);
           adminDS.connect();
           Set<PersistentID> missingIds = adminDS.getMissingPersistentMembers();
-          getLogWriter().info("waiting members=" + missingIds);
+          LogWriterUtils.getLogWriter().info("waiting members=" + missingIds);
           assertEquals(1, missingIds.size());
         } catch (AdminException e) {
           throw new RuntimeException(e);
@@ -461,7 +466,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
       }
     });
     
-    waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       
       public boolean done() {
         return !future1.isAlive();
@@ -484,7 +489,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
           adminDS = AdminDistributedSystemFactory.getDistributedSystem(config);
           adminDS.connect();
           final AdminDistributedSystem connectedDS = adminDS;
-          waitForCriterion(new WaitCriterion() {
+          Wait.waitForCriterion(new WaitCriterion() {
 
             public String description() {
               return "Waiting for waiting members to have 2 members";
@@ -577,12 +582,12 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     restoreBackup(vm1);
     
   //This ought to wait for VM1 to come back
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     AsyncInvocation future = createPersistentRegionAsync(vm0);
     waitForBlockedInitialization(vm0);
     assertTrue(future.isAlive());
     
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
     
     future.join(MAX_WAIT);
@@ -1045,7 +1050,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     //so it will start up.
     createPersistentRegion(vm0);
 
-    ExpectedException e = addExpectedException(ConflictingPersistentDataException.class.getSimpleName(), vm1);
+    IgnoredException e = IgnoredException.addIgnoredException(ConflictingPersistentDataException.class.getSimpleName(), vm1);
     try {
       //VM1 should not start up, because we should detect that vm1
       //was never in the same distributed system as vm0
@@ -1072,24 +1077,24 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
     
     putAnEntry(vm0);
     
-    getLogWriter().info("closing region in vm0");
+    LogWriterUtils.getLogWriter().info("closing region in vm0");
     closeRegion(vm0);
     
     updateTheEntry(vm1);
     
-    getLogWriter().info("closing region in vm1");
+    LogWriterUtils.getLogWriter().info("closing region in vm1");
     closeRegion(vm1);
     
     
     //This ought to wait for VM1 to come back
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     AsyncInvocation future = createPersistentRegionAsync(vm0);
     
     waitForBlockedInitialization(vm0);
@@ -1175,7 +1180,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     final VM vm1 = host.getVM(1);
     final VM vm2 = host.getVM(2);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
     
     //Add a hook which will disconnect from the distributed
@@ -1240,7 +1245,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
       
       public void run() {
        final  Cache cache = getCache();
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
 
           public String description() {
             return "Waiting for creation of region " + REGION_NAME;
@@ -1341,7 +1346,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     
     createNonPersistentRegion(vm0);
     
-    ExpectedException e = addExpectedException(IllegalStateException.class.getSimpleName(), vm1);
+    IgnoredException e = IgnoredException.addIgnoredException(IllegalStateException.class.getSimpleName(), vm1);
     try {
       createPersistentRegion(vm1);
       fail("Should have received an IllegalState exception");
@@ -1381,10 +1386,10 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
              Cache cache = getCache();
              Region region = cache.getRegion(REGION_NAME);
              if (region == null) {
-               getLogWriter().severe("removing listener for PersistentRecoveryOrderDUnitTest because region was not found: " + REGION_NAME);
+               LogWriterUtils.getLogWriter().severe("removing listener for PersistentRecoveryOrderDUnitTest because region was not found: " + REGION_NAME);
                Object old = DistributionMessageObserver.setInstance(null);
                if (old != this) {
-                 getLogWriter().severe("removed listener was not the invoked listener", new Exception("stack trace"));
+                 LogWriterUtils.getLogWriter().severe("removed listener was not the invoked listener", new Exception("stack trace"));
                }
                return;
              }
@@ -1668,18 +1673,18 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
     putAnEntry(vm0);
-    getLogWriter().info("closing region in vm0");
+    LogWriterUtils.getLogWriter().info("closing region in vm0");
     closeCache(vm0);
     
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
     putAnEntry(vm1);
     
-    getLogWriter().info("Creating region in VM0");
-    ExpectedException ex = addExpectedException("ConflictingPersistentDataException", vm0);
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
+    IgnoredException ex = IgnoredException.addIgnoredException("ConflictingPersistentDataException", vm0);
     try {
       //this should cause a conflict
       createPersistentRegion(vm0);
@@ -1692,7 +1697,7 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
       ex.remove();
     }
     
-    getLogWriter().info("closing region in vm1");
+    LogWriterUtils.getLogWriter().info("closing region in vm1");
     closeCache(vm1);
     
     //This should work now
@@ -1700,9 +1705,9 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
     
     updateTheEntry(vm0);
     
-    ex = addExpectedException("ConflictingPersistentDataException", vm1);
+    ex = IgnoredException.addIgnoredException("ConflictingPersistentDataException", vm1);
     //Now make sure vm1 gets a conflict
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     try {
       //this should cause a conflict
       createPersistentRegion(vm1);
@@ -1775,11 +1780,11 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
 
   @Override
   public Properties getDistributedSystemProperties() {
-    getLogWriter().info("Looking for ack-wait-threshold");
+    LogWriterUtils.getLogWriter().info("Looking for ack-wait-threshold");
     String s = System.getProperty("gemfire.ack-wait-threshold");
     if (s != null) {
       SAVED_ACK_WAIT_THRESHOLD = s;
-      getLogWriter().info("removing system property gemfire.ack-wait-threshold");
+      LogWriterUtils.getLogWriter().info("removing system property gemfire.ack-wait-threshold");
       System.getProperties().remove("gemfire.ack-wait-threshold");
     }
     Properties props = super.getDistributedSystemProperties();
@@ -1827,9 +1832,9 @@ public class PersistentRecoveryOrderDUnitTest extends PersistentReplicatedTestBa
         try {
           ((GemFireCacheImpl)cache).createVMRegion(REGION_NAME, rf.create(), internalArgs);
         } catch (ClassNotFoundException e) {
-          fail("error", e);
+          Assert.fail("error", e);
         } catch (IOException e) {
-          fail("error", e);
+          Assert.fail("error", e);
         }
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentReplicatedTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentReplicatedTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentReplicatedTestBase.java
index ff82082..e07b145 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentReplicatedTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/persistence/PersistentReplicatedTestBase.java
@@ -37,8 +37,11 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.InternalRegionArguments;
 import com.gemstone.gemfire.internal.cache.RegionFactoryImpl;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public abstract class PersistentReplicatedTestBase extends CacheTestCase {
 
@@ -54,7 +57,7 @@ public abstract class PersistentReplicatedTestBase extends CacheTestCase {
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(PersistentReplicatedTestBase.class,"setRegionName", new Object[]{getUniqueName()});
+    Invoke.invokeInEveryVM(PersistentReplicatedTestBase.class,"setRegionName", new Object[]{getUniqueName()});
     setRegionName(getUniqueName());
     diskDir = new File("diskDir-" + getName()).getAbsoluteFile();
     com.gemstone.gemfire.internal.FileUtil.delete(diskDir);
@@ -67,16 +70,19 @@ public abstract class PersistentReplicatedTestBase extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     com.gemstone.gemfire.internal.FileUtil.delete(diskDir);
+    postTearDownPersistentReplicatedTestBase();
+  }
+  
+  protected void postTearDownPersistentReplicatedTestBase() throws Exception {
   }
 
   protected void waitForBlockedInitialization(VM vm) {
     vm.invoke(new SerializableRunnable() {
   
       public void run() {
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
   
           public String description() {
             return "Waiting for another persistent member to come online";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/Bug40396DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/Bug40396DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/Bug40396DUnitTest.java
index ed85295..c28f9e7 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/Bug40396DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/Bug40396DUnitTest.java
@@ -32,6 +32,7 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.DeltaEOFException;
 import com.gemstone.gemfire.internal.cache.tier.sockets.FaultyDelta;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 /**
@@ -201,13 +202,13 @@ public class Bug40396DUnitTest extends DistributedTestCase {
     assertTrue("pattern not found", matched);
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // then close the servers
     server.invoke(Bug40396DUnitTest.class, "removeExceptions");
     server.invoke(Bug40396DUnitTest.class, "closeCache");
     server2.invoke(Bug40396DUnitTest.class, "closeCache");
     cache = null;
-    invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+    Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/BackwardCompatibilityHigherVersionClientDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/BackwardCompatibilityHigherVersionClientDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/BackwardCompatibilityHigherVersionClientDUnitTest.java
index 27240e5..3d53df9 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/BackwardCompatibilityHigherVersionClientDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/BackwardCompatibilityHigherVersionClientDUnitTest.java
@@ -30,8 +30,10 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.tier.ConnectionProxy;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.cache.client.PoolManager;
 import com.gemstone.gemfire.cache.client.internal.ConnectionFactoryImpl;
@@ -133,8 +135,7 @@ public class BackwardCompatibilityHigherVersionClientDUnitTest extends
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDown() throws Exception {
     client1.invoke(BackwardCompatibilityHigherVersionClientDUnitTest.class,
         "unsetHandshakeVersionForTesting");
     client1.invoke(BackwardCompatibilityHigherVersionClientDUnitTest.class,
@@ -168,7 +169,7 @@ public class BackwardCompatibilityHigherVersionClientDUnitTest extends
         "setHandshakeVersionForTesting");
     client1.invoke(BackwardCompatibilityHigherVersionClientDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), port1 });
+            NetworkUtils.getServerHostName(server1.getHost()), port1 });
     client1.invoke(BackwardCompatibilityHigherVersionClientDUnitTest.class,
         "verifyConnectionToServerFailed");
   }
@@ -250,7 +251,7 @@ public class BackwardCompatibilityHigherVersionClientDUnitTest extends
       r.destroyRegion();
     }
     catch (Exception ex) {
-      fail("failed while destroy region ", ex);
+      Assert.fail("failed while destroy region ", ex);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36269DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36269DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36269DUnitTest.java
index 6198ad2..c8ac91e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36269DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36269DUnitTest.java
@@ -34,9 +34,13 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.ServerLocation;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.EventID;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * The Region Destroy Operation from Cache Client does not pass the Client side
@@ -104,10 +108,10 @@ public class Bug36269DUnitTest extends DistributedTestCase
   {
     try {
       createClientCache();
-      acquireConnectionsAndDestroyRegion(getServerHostName(Host.getHost(0)));
+      acquireConnectionsAndDestroyRegion(NetworkUtils.getServerHostName(Host.getHost(0)));
       server1.invoke(Bug36269DUnitTest.class, "verifyRegionDestroy");
       server2.invoke(Bug36269DUnitTest.class, "verifyRegionDestroy");
-      pause(5000);
+      Wait.pause(5000);
       verifyNoRegionDestroyOnOriginator();
     }
     catch (Exception ex) {
@@ -124,7 +128,7 @@ public class Bug36269DUnitTest extends DistributedTestCase
       srp.destroyRegionOnForTestsOnly(desCon, new EventID(new byte[] {1}, 1, 1), null);
     }
     catch (Exception ex) {
-      fail("while setting acquireConnections", ex);
+      Assert.fail("while setting acquireConnections", ex);
     }
   }
 
@@ -137,7 +141,7 @@ public class Bug36269DUnitTest extends DistributedTestCase
     new Bug36269DUnitTest("temp").createCache(props);
     CacheServerTestUtil.disableShufflingOfEndpoints();
     PoolImpl p;
-    String host = getServerHostName(Host.getHost(0));
+    String host = NetworkUtils.getServerHostName(Host.getHost(0));
     try {
       p = (PoolImpl)PoolManager.createFactory()
         .addServer(host, PORT1)
@@ -184,7 +188,7 @@ public class Bug36269DUnitTest extends DistributedTestCase
       assertNotNull(r);
     }
     catch (Exception ex) {
-      fail("failed while verifyNoRegionDestroyOnOriginator()", ex);
+      Assert.fail("failed while verifyNoRegionDestroyOnOriginator()", ex);
     }
   }
 
@@ -199,10 +203,10 @@ public class Bug36269DUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 40 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 40 * 1000, 200, true);
     }
     catch (Exception ex) {
-      fail("failed while verifyRegionDestroy", ex);
+      Assert.fail("failed while verifyRegionDestroy", ex);
     }
   }
 
@@ -214,13 +218,11 @@ public class Bug36269DUnitTest extends DistributedTestCase
     }
   }
 
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
     // close server
     server1.invoke(Bug36269DUnitTest.class, "closeCache");
     server2.invoke(Bug36269DUnitTest.class, "closeCache");
-
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36457DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36457DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36457DUnitTest.java
index 0124e92..cff2266 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36457DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36457DUnitTest.java
@@ -37,8 +37,11 @@ import com.gemstone.gemfire.distributed.internal.ServerLocation;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -136,9 +139,8 @@ public class Bug36457DUnitTest extends DistributedTestCase
     return new Integer(server1.getPort());
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     client1.invoke(Bug36457DUnitTest.class, "closeCache");
     client2.invoke(Bug36457DUnitTest.class, "closeCache");
@@ -162,9 +164,9 @@ public class Bug36457DUnitTest extends DistributedTestCase
     Integer port2 = ((Integer)server2.invoke(Bug36457DUnitTest.class,
         "createServerCache"));
     client1.invoke(Bug36457DUnitTest.class, "createClientCache", new Object[] {
-        getServerHostName(server1.getHost()), port1, port2 });
+        NetworkUtils.getServerHostName(server1.getHost()), port1, port2 });
     client2.invoke(Bug36457DUnitTest.class, "createClientCache", new Object[] {
-        getServerHostName(server1.getHost()), port1, port2 });
+        NetworkUtils.getServerHostName(server1.getHost()), port1, port2 });
     //set a cllabck so that we come to know that whether a failover is called or not
     // if failover is called means this bug is present.
     client2.invoke(Bug36457DUnitTest.class, "setClientServerObserver");
@@ -188,7 +190,7 @@ public class Bug36457DUnitTest extends DistributedTestCase
         .setInstance(new ClientServerObserverAdapter() {
           public void afterPrimaryIdentificationFromBackup(ServerLocation primaryEndpoint)
           {
-            getLogWriter().fine("TEST FAILED HERE YOGI ");
+            LogWriterUtils.getLogWriter().fine("TEST FAILED HERE YOGI ");
             isFaileoverHappened = true;
           }
         });
@@ -207,7 +209,7 @@ public class Bug36457DUnitTest extends DistributedTestCase
       r.destroyRegion();
     }
     catch (Exception ex) {
-      fail("failed while destroy region ", ex);
+      Assert.fail("failed while destroy region ", ex);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36805DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36805DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36805DUnitTest.java
index fa1bed1..d9f1a2e 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36805DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36805DUnitTest.java
@@ -36,7 +36,10 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * bug test for bug 36805
@@ -130,9 +133,8 @@ public class Bug36805DUnitTest extends DistributedTestCase
     return new Integer(server1.getPort());
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     client1.invoke(Bug36805DUnitTest.class, "closeCache");
     client2.invoke(Bug36805DUnitTest.class, "closeCache");
@@ -156,9 +158,9 @@ public class Bug36805DUnitTest extends DistributedTestCase
     Integer port2 = ((Integer)server2.invoke(Bug36805DUnitTest.class,
         "createServerCache"));
     client1.invoke(Bug36805DUnitTest.class, "createClientCache", new Object[] {
-        getServerHostName(server1.getHost()), port1, port2 });
+        NetworkUtils.getServerHostName(server1.getHost()), port1, port2 });
     client2.invoke(Bug36805DUnitTest.class, "createClientCache", new Object[] {
-        getServerHostName(server1.getHost()), port1, port2 });
+        NetworkUtils.getServerHostName(server1.getHost()), port1, port2 });
     // set a cllabck so that we come to know that whether a failover is called
     // or not
     // if failover is called means this bug is present.
@@ -215,7 +217,7 @@ public class Bug36805DUnitTest extends DistributedTestCase
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
 
     // we no longer verify dead servers; live is good enough
 //     start = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36829DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36829DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36829DUnitTest.java
index 74139bb..ceac13b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36829DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36829DUnitTest.java
@@ -27,8 +27,10 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache.client.*;
 import com.gemstone.gemfire.internal.cache.PoolFactoryImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 public class Bug36829DUnitTest extends DistributedTestCase {
@@ -66,7 +68,7 @@ public class Bug36829DUnitTest extends DistributedTestCase {
 
     this.ClientVM.invoke(CacheServerTestUtil.class, "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(ClientVM.getHost()), PORT, true, 0),
+            getClientPool(NetworkUtils.getServerHostName(ClientVM.getHost()), PORT, true, 0),
             regionName,
             getClientDistributedSystemProperties(durableClientId,
                 durableClientTimeout), Boolean.TRUE });
@@ -117,7 +119,7 @@ public class Bug36829DUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception ex) {
-      fail("failed while registering interest in registerKey function", ex);
+      Assert.fail("failed while registering interest in registerKey function", ex);
     }
   }
 
@@ -141,7 +143,7 @@ public class Bug36829DUnitTest extends DistributedTestCase {
     }
 
     catch (Exception ex) {
-      fail("failed while registering interest in registerKey function", ex);
+      Assert.fail("failed while registering interest in registerKey function", ex);
     }
   }
 
@@ -166,9 +168,8 @@ public class Bug36829DUnitTest extends DistributedTestCase {
     return properties;
   }
   
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     CacheServerTestUtil.resetDisableShufflingOfEndpointsFlag();
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36995DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36995DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36995DUnitTest.java
index 3eb924e..872f95c 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36995DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug36995DUnitTest.java
@@ -29,7 +29,11 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.cache.client.*;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 
@@ -135,9 +139,8 @@ public class Bug36995DUnitTest extends DistributedTestCase
     return new Integer(server1.getPort());
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
     server1.invoke(Bug36995DUnitTest.class, "closeCache");
     server2.invoke(Bug36995DUnitTest.class, "closeCache");
@@ -164,7 +167,7 @@ public class Bug36995DUnitTest extends DistributedTestCase
     Integer port3 = ((Integer)server3.invoke(Bug36995DUnitTest.class,
         "createServerCache"));
     createClientCacheWithDefaultMessageTrackingTimeout(
-        getServerHostName(server1.getHost()), port1.intValue(), port2
+        NetworkUtils.getServerHostName(server1.getHost()), port1.intValue(), port2
         .intValue(), port3.intValue());
     assertEquals(PoolFactory.DEFAULT_SUBSCRIPTION_MESSAGE_TRACKING_TIMEOUT,
                  pool.getSubscriptionMessageTrackingTimeout());
@@ -176,14 +179,14 @@ public class Bug36995DUnitTest extends DistributedTestCase
   public void testBug36995_UserSpecified()
   {
     //work around GEODE-507
-    addExpectedException("Connection reset");
+    IgnoredException.addIgnoredException("Connection reset");
     Integer port1 = ((Integer)server1.invoke(Bug36995DUnitTest.class,
         "createServerCache"));
     Integer port2 = ((Integer)server2.invoke(Bug36995DUnitTest.class,
         "createServerCache"));
     Integer port3 = ((Integer)server3.invoke(Bug36995DUnitTest.class,
         "createServerCache"));
-    createClientCache(getServerHostName(server1.getHost()),
+    createClientCache(NetworkUtils.getServerHostName(server1.getHost()),
         port1.intValue(), port2.intValue(), port3.intValue());
     assertEquals(54321, pool.getSubscriptionMessageTrackingTimeout());
   }
@@ -199,7 +202,7 @@ public class Bug36995DUnitTest extends DistributedTestCase
         "createServerCache"));
     Integer port3 = ((Integer)server3.invoke(Bug36995DUnitTest.class,
         "createServerCache"));
-    createClientCache(getServerHostName(server1.getHost()),
+    createClientCache(NetworkUtils.getServerHostName(server1.getHost()),
         port1.intValue(), port2.intValue(), port3.intValue());
     verifyDeadAndLiveServers(0, 3);
     server2.invoke(Bug36995DUnitTest.class, "stopServer");
@@ -232,7 +235,7 @@ public class Bug36995DUnitTest extends DistributedTestCase
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
     
     // we no longer verify dead servers; just live
 //     while (proxy.getDeadServers().size() != expectedDeadServers) { // wait

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug37210DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug37210DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug37210DUnitTest.java
index 3a9ab10..050a8fc 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug37210DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug37210DUnitTest.java
@@ -35,6 +35,9 @@ import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.ha.HARegionQueue;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.cache.client.*;
 
@@ -83,7 +86,7 @@ public class Bug37210DUnitTest extends DistributedTestCase
   {
     super.setUp();
     
-    addExpectedException("java.io.IOException");
+    IgnoredException.addIgnoredException("java.io.IOException");
     
     final Host host = Host.getHost(0);
     server = host.getVM(0);
@@ -120,9 +123,8 @@ public class Bug37210DUnitTest extends DistributedTestCase
    * @throws Exception
    *                 thrown if any problem occurs in closing cache
    */
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close client
     client.invoke(Bug37210DUnitTest.class, "closeCache");
 
@@ -144,11 +146,11 @@ public class Bug37210DUnitTest extends DistributedTestCase
    */
   public void testHAStatsCleanup() throws Exception
   {
-    getLogWriter().info("testHAStatsCleanup : BEGIN");
-    addExpectedException("java.net.SocketException");
-    addExpectedException("Unexpected IOException");
+    LogWriterUtils.getLogWriter().info("testHAStatsCleanup : BEGIN");
+    IgnoredException.addIgnoredException("java.net.SocketException");
+    IgnoredException.addIgnoredException("Unexpected IOException");
     client.invoke(Bug37210DUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(Host.getHost(0)), new Integer(PORT) });
+        new Object[] { NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT) });
     server.invoke(Bug37210DUnitTest.class, "doEntryOperations");
     
     server.invoke(Bug37210DUnitTest.class,
@@ -158,7 +160,7 @@ public class Bug37210DUnitTest extends DistributedTestCase
     Thread.currentThread().sleep(1000);
     server.invoke(Bug37210DUnitTest.class,
             "closeCacheClientProxyAndVerifyStats2");
-    getLogWriter().info("testHAStatsCleanup : END");
+    LogWriterUtils.getLogWriter().info("testHAStatsCleanup : END");
   }
 
   /**
@@ -187,7 +189,7 @@ public class Bug37210DUnitTest extends DistributedTestCase
     server.setSocketBufferSize(32768);
     server.setMaximumTimeBetweenPings(1000000);
     server.start();
-    getLogWriter().info("Server started at PORT = " + port);
+    LogWriterUtils.getLogWriter().info("Server started at PORT = " + port);
     return new Integer(server.getPort());
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug37805DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug37805DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug37805DUnitTest.java
index 24674fe..f7adf6a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug37805DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/Bug37805DUnitTest.java
@@ -30,6 +30,7 @@ import com.gemstone.gemfire.internal.cache.HARegion;
 import com.gemstone.gemfire.internal.cache.PoolFactoryImpl;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -64,7 +65,8 @@ public class Bug37805DUnitTest extends DistributedTestCase{
     CacheServerTestUtil.disableShufflingOfEndpoints();
   }
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     // Stop server 1
     this.server1VM.invoke(CacheServerTestUtil.class, "closeCache");
     CacheServerTestUtil.resetDisableShufflingOfEndpointsFlag();
@@ -84,7 +86,7 @@ public class Bug37805DUnitTest extends DistributedTestCase{
 
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, true, 0),
+            getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, true, 0),
             regionName,
             getDurableClientDistributedSystemProperties(durableClientId,
                 durableClientTimeout), Boolean.TRUE });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerMaxConnectionsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerMaxConnectionsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerMaxConnectionsJUnitTest.java
index 3418ae2..ed0d7fd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerMaxConnectionsJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerMaxConnectionsJUnitTest.java
@@ -39,8 +39,8 @@ import com.gemstone.gemfire.cache.client.internal.Connection;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -177,7 +177,7 @@ public class CacheServerMaxConnectionsJUnitTest
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 1000, 200, true);
+    Wait.waitForCriterion(ev, 1000, 200, true);
     assertEquals(MAX_CNXS, s.getInt("currentClientConnections"));
     assertEquals(1, s.getInt("currentClients"));
     this.system.getLogWriter().info("<ExpectedException action=add>" 
@@ -214,7 +214,7 @@ public class CacheServerMaxConnectionsJUnitTest
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 3 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 3 * 1000, 200, true);
     this.system.getLogWriter().info("currentClients="
         + s.getInt("currentClients")
         + " currentClientConnections="

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerTestUtil.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerTestUtil.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerTestUtil.java
index f70a808..4a3cf5f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerTestUtil.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/CacheServerTestUtil.java
@@ -50,7 +50,11 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.PoolFactoryImpl;
 import com.gemstone.gemfire.internal.cache.PoolFactoryImpl.PoolAttributes;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 /**
  *
  * @author Yogesh Mahajan
@@ -59,7 +63,7 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 public class CacheServerTestUtil extends DistributedTestCase
 {
   private static Cache cache = null;
-  private static ExpectedException expected;
+  private static IgnoredException expected;
 
   private static PoolImpl pool = null;
 
@@ -156,7 +160,7 @@ public class CacheServerTestUtil extends DistributedTestCase
   public static void createCacheClient(Pool poolAttr, String regionName,
       Properties dsProperties, Boolean addControlListener, Properties javaSystemProperties) throws Exception {  		
       new CacheServerTestUtil("temp").createCache(dsProperties);
-      addExpectedException("java.net.ConnectException||java.net.SocketException");
+      IgnoredException.addIgnoredException("java.net.ConnectException||java.net.SocketException");
       
       if (javaSystemProperties != null && javaSystemProperties.size() > 0) {
       	Enumeration e = javaSystemProperties.propertyNames();
@@ -222,9 +226,9 @@ public class CacheServerTestUtil extends DistributedTestCase
     ccf.set(DistributionConfig.DURABLE_CLIENT_ID_NAME, durableClientId);
     ccf.set(DistributionConfig.DURABLE_CLIENT_TIMEOUT_NAME, String.valueOf(timeout));
     ccf.set("log-file", "abs_client_system.log");
-    ccf.set("log-level", getDUnitLogLevel());
+    ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
     cache = (Cache)ccf.create();
-    expected = addExpectedException("java.net.ConnectionException||java.net.SocketException");
+    expected = IgnoredException.addIgnoredException("java.net.ConnectionException||java.net.SocketException");
     pool = (PoolImpl)PoolManager.find(poolName);
     
   }
@@ -243,7 +247,7 @@ public class CacheServerTestUtil extends DistributedTestCase
     ccf.set(DistributionConfig.DURABLE_CLIENT_ID_NAME, durableClientId);
     ccf.set(DistributionConfig.DURABLE_CLIENT_TIMEOUT_NAME, String.valueOf(timeout));
     cache = (Cache)ccf.create();
-    expected = addExpectedException("java.net.ConnectionException||java.net.SocketException");
+    expected = IgnoredException.addIgnoredException("java.net.ConnectionException||java.net.SocketException");
     pool = (PoolImpl)PoolManager.find(poolName);
     
   }
@@ -254,9 +258,9 @@ public class CacheServerTestUtil extends DistributedTestCase
       File cacheXmlFile = new File(url.toURI().getPath());
       ccf.set("cache-xml-file", cacheXmlFile.toURI().getPath());
       ccf.set("mcast-port", "0");
-      ccf.set("locators", "localhost["+DistributedTestCase.getDUnitLocatorPort()+"]");
+      ccf.set("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
       ccf.set("log-file", "abs_server_system.log");
-      ccf.set("log-level", getDUnitLogLevel());
+      ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
     }
     catch (URISyntaxException e) {
       throw new ExceptionInInitializerError(e);
@@ -271,7 +275,7 @@ public class CacheServerTestUtil extends DistributedTestCase
       File cacheXmlFile = new File(url.toURI().getPath());
       ccf.set("cache-xml-file", cacheXmlFile.toURI().getPath());
       ccf.set("mcast-port", "0");
-      ccf.set("locators", "localhost["+DistributedTestCase.getDUnitLocatorPort()+"]");
+      ccf.set("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     }
     catch (URISyntaxException e) {
       throw new ExceptionInInitializerError(e);
@@ -326,7 +330,7 @@ public class CacheServerTestUtil extends DistributedTestCase
   {
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
-    props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestCase.getDUnitLocatorPort()+"]");
+    props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     new CacheServerTestUtil("temp").createCache(props);
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.DISTRIBUTED_ACK);
@@ -348,7 +352,7 @@ public class CacheServerTestUtil extends DistributedTestCase
     Properties props = new Properties();
 //    int mcastPort = AvailablePort.getRandomAvailablePort(AvailablePort.JGROUPS);
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
-    props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestCase.getDUnitLocatorPort()+"]");
+    props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     new CacheServerTestUtil("temp").createCache(props);
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.DISTRIBUTED_ACK);
@@ -369,7 +373,7 @@ public class CacheServerTestUtil extends DistributedTestCase
       throws Exception {
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
-    props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestCase.getDUnitLocatorPort()+"]");
+    props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     new CacheServerTestUtil("temp").createCache(props);
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.DISTRIBUTED_ACK);
@@ -425,7 +429,7 @@ public class CacheServerTestUtil extends DistributedTestCase
     setSystem(props, cc.getDistributedSystem());
     cache = (Cache)cc;
     assertNotNull(cache);
-    expected = addExpectedException("java.net.ConnectionException||java.net.SocketException");
+    expected = IgnoredException.addIgnoredException("java.net.ConnectionException||java.net.SocketException");
   }
 
   public static void closeCache()
@@ -489,7 +493,7 @@ public class CacheServerTestUtil extends DistributedTestCase
       try {
         server.start();
       } catch(Exception e) {
-        fail("Unexpected exception", e);
+        Assert.fail("Unexpected exception", e);
       }
       assertTrue(server.isRunning());
     }



[26/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/locks/TXLockServiceDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/locks/TXLockServiceDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/locks/TXLockServiceDUnitTest.java
index 6d60816..01e7eca 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/locks/TXLockServiceDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/locks/TXLockServiceDUnitTest.java
@@ -37,7 +37,10 @@ import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedM
 import com.gemstone.gemfire.internal.cache.TXRegionLockRequestImpl;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 
 /**
  * This class tests distributed ownership via the DistributedLockService api.
@@ -85,11 +88,12 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
     com.gemstone.gemfire.internal.OSProcess.printStacks(0);
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
 //    invokeInEveryVM(TXLockServiceDUnitTest.class,
 //                    "remoteDumpAllDLockServices");
                     
-    invokeInEveryVM(TXLockServiceDUnitTest.class,
+    Invoke.invokeInEveryVM(TXLockServiceDUnitTest.class,
                     "destroyServices"); 
     
     destroyServices();
@@ -102,9 +106,6 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
 
     testTXRecoverGrantor_replyCode_PASS = false;
     testTXRecoverGrantor_heldLocks_PASS = false;
-  
-    // Disconnects from GemFire if using shared memory
-    super.tearDown2();
   }
   
   // -------------------------------------------------------------------------
@@ -123,7 +124,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
   }
   
   public void disable_testTXRecoverGrantorMessageProcessor() throws Exception {
-    getLogWriter().info("[testTXOriginatorRecoveryProcessor]");
+    LogWriterUtils.getLogWriter().info("[testTXOriginatorRecoveryProcessor]");
     TXLockService.createDTLS();
     checkDLockRecoverGrantorMessageProcessor();
     
@@ -173,7 +174,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
     dtls.release(txLockId);
     
     // check results to verify no locks were provided in reply
-    DistributedTestCase.join(thread, 30 * 1000, getLogWriter());
+    ThreadUtils.join(thread, 30 * 1000);
     assertEquals("testTXRecoverGrantor_replyCode_PASS is false", true, 
         testTXRecoverGrantor_replyCode_PASS);
     assertEquals("testTXRecoverGrantor_heldLocks_PASS is false", true, 
@@ -182,7 +183,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
   
   protected static volatile TXLockId testTXLock_TXLockId;
   public void testTXLock() {
-    getLogWriter().info("[testTXLock]");
+    LogWriterUtils.getLogWriter().info("[testTXLock]");
     final int grantorVM = 0;
     final int clientA = 1;
     final int clientB = 2;
@@ -200,7 +201,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
         ));
     
     // create grantor
-    getLogWriter().info("[testTXLock] create grantor");
+    LogWriterUtils.getLogWriter().info("[testTXLock] create grantor");
     
     Host.getHost(0).getVM(grantorVM).invoke(new SerializableRunnable() {
       public void run() {
@@ -210,7 +211,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
     sleep(20);
     
     // create client and request txLock
-    getLogWriter().info("[testTXLock] create clientA and request txLock");
+    LogWriterUtils.getLogWriter().info("[testTXLock] create clientA and request txLock");
     
     Host.getHost(0).getVM(clientA).invoke(new SerializableRunnable() {
       public void run() {
@@ -228,7 +229,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
     });
     
     // create nuther client and request overlapping txLock... verify fails
-    getLogWriter().info("[testTXLock] create clientB and fail txLock");
+    LogWriterUtils.getLogWriter().info("[testTXLock] create clientB and fail txLock");
     
     Host.getHost(0).getVM(clientB).invoke(new SerializableRunnable() {
       public void run() {
@@ -258,7 +259,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
     */
     
     // release txLock
-    getLogWriter().info("[testTXLock] clientA releases txLock");
+    LogWriterUtils.getLogWriter().info("[testTXLock] clientA releases txLock");
     
     Host.getHost(0).getVM(clientA).invoke(
         new SerializableRunnable("[testTXLock] clientA releases txLock") {
@@ -270,7 +271,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
     sleep(20);
     
     // try nuther client again and verify success
-    getLogWriter().info("[testTXLock] clientB requests txLock");
+    LogWriterUtils.getLogWriter().info("[testTXLock] clientB requests txLock");
     
     Host.getHost(0).getVM(clientB).invoke(
         new SerializableRunnable("[testTXLock] clientB requests txLock") {
@@ -282,7 +283,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
     });
 
     // release txLock
-    getLogWriter().info("[testTXLock] clientB releases txLock");
+    LogWriterUtils.getLogWriter().info("[testTXLock] clientB releases txLock");
     
     Host.getHost(0).getVM(clientB).invoke(
         new SerializableRunnable("[testTXLock] clientB releases txLock") {
@@ -295,7 +296,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
   
   protected static volatile TXLockId testTXOriginatorRecoveryProcessor_TXLockId;
   public void testTXOriginatorRecoveryProcessor() {
-    getLogWriter().info("[testTXOriginatorRecoveryProcessor]");
+    LogWriterUtils.getLogWriter().info("[testTXOriginatorRecoveryProcessor]");
     final int originatorVM = 0;
     final int grantorVM = 1;
     final int particpantA = 2;
@@ -321,7 +322,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
     }
     
     // create grantor
-    getLogWriter().info("[testTXOriginatorRecoveryProcessor] grantorVM becomes grantor");
+    LogWriterUtils.getLogWriter().info("[testTXOriginatorRecoveryProcessor] grantorVM becomes grantor");
     
     Host.getHost(0).getVM(grantorVM).invoke(new SerializableRunnable() {
       public void run() {
@@ -338,7 +339,7 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
                  Boolean.TRUE, isGrantor);
     
     // have a originatorVM get a txLock with three participants including grantor
-    getLogWriter().info("[testTXOriginatorRecoveryProcessor] originatorVM requests txLock");
+    LogWriterUtils.getLogWriter().info("[testTXOriginatorRecoveryProcessor] originatorVM requests txLock");
     
     Host.getHost(0).getVM(originatorVM).invoke(new SerializableRunnable() {
       public void run() {
@@ -422,14 +423,14 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
   }
   
   public void testDTLSIsDistributed() {
-    getLogWriter().info("[testDTLSIsDistributed]");
+    LogWriterUtils.getLogWriter().info("[testDTLSIsDistributed]");
     
     // have all vms lock and hold the same LTLS lock simultaneously
     final Host host = Host.getHost(0);
     int vmCount = host.getVMCount();
     for (int vm = 0; vm < vmCount; vm++) {
       final int finalvm = vm;
-      getLogWriter().info("[testDTLSIsDistributed] testing vm " + finalvm);
+      LogWriterUtils.getLogWriter().info("[testDTLSIsDistributed] testing vm " + finalvm);
     
       Host.getHost(0).getVM(finalvm).invoke(new SerializableRunnable() {
         public void run() {
@@ -442,21 +443,21 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
           TXLockServiceDUnitTest.class, "isDistributed_DTLS", new Object[] {});
       assertEquals("isDistributed should be true for DTLS", 
                    Boolean.TRUE, isDistributed);
-      getLogWriter().info("[testDTLSIsDistributed] isDistributed=" + isDistributed);
+      LogWriterUtils.getLogWriter().info("[testDTLSIsDistributed] isDistributed=" + isDistributed);
                    
       // lock a key...                
       Boolean gotLock = (Boolean)host.getVM(finalvm).invoke(
           TXLockServiceDUnitTest.class, "lock_DTLS", new Object[] {"KEY"});
       assertEquals("gotLock is false after calling lock_DTLS", 
                    Boolean.TRUE, gotLock);
-      getLogWriter().info("[testDTLSIsDistributed] gotLock=" + gotLock);
+      LogWriterUtils.getLogWriter().info("[testDTLSIsDistributed] gotLock=" + gotLock);
       
       // unlock it...                
       Boolean unlock = (Boolean)host.getVM(finalvm).invoke(
           TXLockServiceDUnitTest.class, "unlock_DTLS", new Object[] {"KEY"});
       assertEquals("unlock is false after calling unlock_DTLS", 
                    Boolean.TRUE, unlock);
-      getLogWriter().info("[testDTLSIsDistributed] unlock=" + unlock);
+      LogWriterUtils.getLogWriter().info("[testDTLSIsDistributed] unlock=" + unlock);
     }
   }
   
@@ -667,14 +668,14 @@ public class TXLockServiceDUnitTest extends DistributedTestCase {
     Host host = Host.getHost(0);
     int vmCount = host.getVMCount();
     for (int i=0; i<vmCount; i++) {
-      getLogWriter().info("Invoking " + methodName + "on VM#" + i);
+      LogWriterUtils.getLogWriter().info("Invoking " + methodName + "on VM#" + i);
       host.getVM(i).invoke(this.getClass(), methodName, args);
     }
   }
   
   public Properties getDistributedSystemProperties() {
     Properties props = super.getDistributedSystemProperties();
-    props.setProperty("log-level", getDUnitLogLevel());
+    props.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
     return props;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug39356DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug39356DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug39356DUnitTest.java
index 13adb98..0371df7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug39356DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug39356DUnitTest.java
@@ -44,6 +44,7 @@ import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 
 /**
@@ -110,7 +111,7 @@ public class Bug39356DUnitTest extends CacheTestCase {
     SerializableRunnable verifyBuckets = new SerializableRunnable("Verify buckets") {
 
       public void run() {
-        LogWriter log = getLogWriter();
+        LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
         Cache cache = getCache();
         PartitionedRegion r = (PartitionedRegion) cache.getRegion(REGION_NAME);
         for(int i = 0; i < r.getAttributes().getPartitionAttributes().getTotalNumBuckets(); i++) {
@@ -120,7 +121,7 @@ public class Bug39356DUnitTest extends CacheTestCase {
               owners = r.getBucketOwnersForValidation(i);
             } catch (ForceReattemptException e) {
               log.info(Bug39356DUnitTest.class + " verify buckets Caught a ForceReattemptException");
-              pause(1000);
+              Wait.pause(1000);
             }
           }
           if(owners.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug43684DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug43684DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug43684DUnitTest.java
index 8410030..eca9bca 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug43684DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug43684DUnitTest.java
@@ -36,7 +36,9 @@ import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.RegionEntry;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -76,10 +78,11 @@ public class Bug43684DUnitTest extends DistributedTestCase {
     server2 = host.getVM(1);
     server3 = host.getVM(2);
     client1 = host.getVM(3);
-    addExpectedException("Connection refused: connect");
+    IgnoredException.addIgnoredException("Connection refused: connect");
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
     client1.invoke(Bug43684DUnitTest.class, "closeCache");
     server1.invoke(Bug43684DUnitTest.class, "closeCache");
@@ -235,7 +238,7 @@ public class Bug43684DUnitTest extends DistributedTestCase {
   public static Integer createServerCache(Boolean isReplicated, Boolean isPrimaryEmpty) throws Exception {
     DistributedTestCase.disconnectFromDS();
     Properties props = new Properties();
-    props.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+    props.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
 //    props.setProperty("log-file", "server_" + OSProcess.getId() + ".log");
 //    props.setProperty("log-level", "fine");
     props.setProperty("statistic-archive-file", "server_" + OSProcess.getId()

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug47388DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug47388DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug47388DUnitTest.java
index 3512385..1a02b90 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug47388DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug47388DUnitTest.java
@@ -44,6 +44,8 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * The test creates two datastores with a partitioned region, and also running a
@@ -101,7 +103,8 @@ public class Bug47388DUnitTest extends DistributedTestCase {
 
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
 
     vm2.invoke(Bug47388DUnitTest.class, "closeCache");
@@ -244,7 +247,7 @@ public class Bug47388DUnitTest extends DistributedTestCase {
       }
     };
     
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 500, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 500, true);
   }
 
   public static void waitForLastKeyDestroyed() throws Exception {
@@ -261,7 +264,7 @@ public class Bug47388DUnitTest extends DistributedTestCase {
 
     };
 
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 500, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 500, true);
   }
 
   public void bug51931_testQRMOfExpiredEventsProcessedSuccessfully() throws Exception {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug51400DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug51400DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug51400DUnitTest.java
index 9234db4..5b08a3e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug51400DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/Bug51400DUnitTest.java
@@ -37,7 +37,9 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxyStats;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -73,7 +75,8 @@ public class Bug51400DUnitTest extends DistributedTestCase {
 
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
 
     client0.invoke(Bug51400DUnitTest.class, "closeCache");
@@ -92,7 +95,7 @@ public class Bug51400DUnitTest extends DistributedTestCase {
   public static Integer createServerCache(Integer mcastPort,
       Integer maxMessageCount) throws Exception {
     Properties props = new Properties();
-    props.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+    props.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
 //    props.setProperty("log-file", "server_" + OSProcess.getId() + ".log");
 //    props.setProperty("log-level", "fine");
 //    props.setProperty("statistic-archive-file", "server_" + OSProcess.getId()
@@ -175,7 +178,7 @@ public class Bug51400DUnitTest extends DistributedTestCase {
         "createServerCache", new Object[] { maxQSize });
 
     client1.invoke(Bug51400DUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(Host.getHost(0)), new Integer[]{port1}, ackInterval});
+        new Object[] { NetworkUtils.getServerHostName(Host.getHost(0)), new Integer[]{port1}, ackInterval});
 
     // Do puts from server as well as from client on the same key.
     AsyncInvocation ai1 = server0.invokeAsync(Bug51400DUnitTest.class,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/ElidedPutAllDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/ElidedPutAllDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/ElidedPutAllDUnitTest.java
index 681d0d9..26103c9 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/ElidedPutAllDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/ElidedPutAllDUnitTest.java
@@ -35,6 +35,7 @@ import com.gemstone.gemfire.internal.cache.RegionEntry;
 import com.gemstone.gemfire.internal.cache.tier.sockets.VersionedObjectList;
 import com.gemstone.gemfire.internal.cache.versions.ConcurrentCacheModificationException;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -98,14 +99,14 @@ public class ElidedPutAllDUnitTest extends CacheTestCase {
           try {
             region.postPutAllSend(dpao, successfulPuts);
           } catch (ConcurrentCacheModificationException e) {
-            fail("Should not have received an exception for an elided operation", e);
+            Assert.fail("Should not have received an exception for an elided operation", e);
           } finally {
             event.release();
             dpao.getBaseEvent().release();
             dpao.freeOffHeapResources();
           }
         } catch (Exception e) {
-          fail("caught unexpected exception", e);
+          Assert.fail("caught unexpected exception", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionResolverDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionResolverDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionResolverDUnitTest.java
index e35b014..1ba824e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionResolverDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionResolverDUnitTest.java
@@ -77,11 +77,12 @@ public class PartitionResolverDUnitTest extends CacheTestCase {
     datastore1 = host.getVM(1);
     datastore2 = host.getVM(2);
   }
+  
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     CountingResolver.resetResolverCount();
   }
+  
   void createRegion(boolean isAccessor, int redundantCopies) {
     AttributesFactory af = new AttributesFactory();
     af.setScope(Scope.DISTRIBUTED_ACK);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionLoaderWriterDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionLoaderWriterDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionLoaderWriterDUnitTest.java
index 2678621..e06e49a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionLoaderWriterDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionLoaderWriterDUnitTest.java
@@ -32,6 +32,7 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -159,7 +160,7 @@ public class PartitionedRegionLoaderWriterDUnitTest extends CacheTestCase {
       cache.createRegion(PartitionedRegionName, attrs);
     }
     catch (Exception e) {
-      fail("Not Expected : " , e);
+      Assert.fail("Not Expected : " , e);
     }
   }
 
@@ -200,7 +201,7 @@ public class PartitionedRegionLoaderWriterDUnitTest extends CacheTestCase {
       assertNotNull(cache);
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      Assert.fail("Failed while creating the cache", e);
     }
   }
 
@@ -224,9 +225,4 @@ public class PartitionedRegionLoaderWriterDUnitTest extends CacheTestCase {
     public CacheWriter2() {
     }
   }
-
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionMetaDataCleanupDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionMetaDataCleanupDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionMetaDataCleanupDUnitTest.java
index 825774d..99b4ba2 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionMetaDataCleanupDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PartitionedRegionMetaDataCleanupDUnitTest.java
@@ -21,10 +21,13 @@ import com.gemstone.gemfire.cache.RegionFactory;
 import com.gemstone.gemfire.cache.RegionShortcut;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.RMIException;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * @author dsmith
@@ -43,7 +46,7 @@ public class PartitionedRegionMetaDataCleanupDUnitTest extends CacheTestCase {
     createPR(vm0, "region1", 5);
     createPR(vm1, "region2", 10);
     //This should fail
-    ExpectedException ex = addExpectedException( "IllegalStateException", vm1);
+    IgnoredException ex = IgnoredException.addIgnoredException( "IllegalStateException", vm1);
     try {
       createPR(vm1, "region1", 10);
       fail("Should have received an exception");
@@ -63,7 +66,7 @@ public class PartitionedRegionMetaDataCleanupDUnitTest extends CacheTestCase {
     createPR(vm0, "region1", 5);
     createPR(vm1, "region2", 10);
     //This should fail
-    ExpectedException ex = addExpectedException( "IllegalStateException", vm1);
+    IgnoredException ex = IgnoredException.addIgnoredException( "IllegalStateException", vm1);
     try {
       createPR(vm1, "region1", 10);
       fail("Should have received an exception");
@@ -83,7 +86,7 @@ public class PartitionedRegionMetaDataCleanupDUnitTest extends CacheTestCase {
     createPR(vm0, "region1", 5);
     createPR(vm1, "region2", 10);
     //This should fail
-    ExpectedException ex = addExpectedException("IllegalStateException", vm1);
+    IgnoredException ex = IgnoredException.addIgnoredException("IllegalStateException", vm1);
     try {
       createPR(vm1, "region1", 10);
       fail("Should have received an exception");
@@ -93,7 +96,7 @@ public class PartitionedRegionMetaDataCleanupDUnitTest extends CacheTestCase {
       ex.remove();
     }
     
-    ex = addExpectedException("DistributedSystemDisconnectedException", vm0);
+    ex = IgnoredException.addIgnoredException("DistributedSystemDisconnectedException", vm0);
     try {
       fakeCrash(vm0);
     } finally {
@@ -161,7 +164,7 @@ public class PartitionedRegionMetaDataCleanupDUnitTest extends CacheTestCase {
           .setEntryTimeToLive(new ExpirationAttributes(expirationTime));
 
         //We may log an exception if the create fails. Ignore thse.
-          ExpectedException ex = addExpectedException("IllegalStateException");
+          IgnoredException ex = IgnoredException.addIgnoredException("IllegalStateException");
           try {
             int i= 0;
             //Loop until a successful create
@@ -174,11 +177,11 @@ public class PartitionedRegionMetaDataCleanupDUnitTest extends CacheTestCase {
               } catch(IllegalStateException expected) {
                 //give up if we can't create the region in 20 tries
                 if(i == 20) {
-                  fail("Metadata was never cleaned up in 20 tries", expected);
+                  Assert.fail("Metadata was never cleaned up in 20 tries", expected);
                 }
                 
                 //wait a bit before the next attempt.
-                pause(500);
+                Wait.pause(500);
               }
             }
           } finally {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistPRKRFDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistPRKRFDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistPRKRFDUnitTest.java
index c0a0149..d74a94b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistPRKRFDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistPRKRFDUnitTest.java
@@ -28,9 +28,11 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.DiskStoreImpl;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Tests the basic use cases for PR persistence.
@@ -73,7 +75,7 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     AsyncInvocation async1 = vm0.invokeAsync(new CacheSerializableRunnable(title+"async create") {
       public void run2() throws CacheException {
         Region region = getRootRegion(PR_REGION_NAME);
-        ExpectedException expect = addExpectedException("CacheClosedException");
+        IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
         try {
           region.put(10, "b");
           fail("Expect CacheClosedException here");
@@ -92,14 +94,14 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     vm0.invoke(new CacheSerializableRunnable(title+"close disk store") {
       public void run2() throws CacheException {
         GemFireCacheImpl gfc = (GemFireCacheImpl)getCache();
-        pause(500);
+        Wait.pause(500);
         gfc.closeDiskStores();
         synchronized(lockObject) {
           lockObject.notify();
         }
       }
     });
-    DistributedTestCase.join(async1, MAX_WAIT, getLogWriter());
+    ThreadUtils.join(async1, MAX_WAIT);
     closeCache(vm0);
     
     // update
@@ -116,7 +118,7 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     async1 = vm0.invokeAsync(new CacheSerializableRunnable(title+"async update") {
       public void run2() throws CacheException {
         Region region = getRootRegion(PR_REGION_NAME);
-        ExpectedException expect = addExpectedException("CacheClosedException");
+        IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
         try {
           region.put(1, "b");
           fail("Expect CacheClosedException here");
@@ -135,14 +137,14 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     vm0.invoke(new CacheSerializableRunnable(title+"close disk store") {
       public void run2() throws CacheException {
         GemFireCacheImpl gfc = (GemFireCacheImpl)getCache();
-        pause(500);
+        Wait.pause(500);
         gfc.closeDiskStores();
         synchronized(lockObject) {
           lockObject.notify();
         }
       }
     });
-    DistributedTestCase.join(async1, MAX_WAIT, getLogWriter());
+    ThreadUtils.join(async1, MAX_WAIT);
     closeCache(vm0);
 
     // destroy
@@ -159,7 +161,7 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     async1 = vm0.invokeAsync(new CacheSerializableRunnable(title+"async destroy") {
       public void run2() throws CacheException {
         Region region = getRootRegion(PR_REGION_NAME);
-        ExpectedException expect = addExpectedException("CacheClosedException");
+        IgnoredException expect = IgnoredException.addIgnoredException("CacheClosedException");
         try {
           region.destroy(2, "b");
           fail("Expect CacheClosedException here");
@@ -178,14 +180,14 @@ public class PersistPRKRFDUnitTest extends PersistentPartitionedRegionTestBase {
     vm0.invoke(new CacheSerializableRunnable(title+"close disk store") {
       public void run2() throws CacheException {
         GemFireCacheImpl gfc = (GemFireCacheImpl)getCache();
-        pause(500);
+        Wait.pause(500);
         gfc.closeDiskStores();
         synchronized(lockObject) {
           lockObject.notify();
         }
       }
     });
-    DistributedTestCase.join(async1, MAX_WAIT, getLogWriter());
+    ThreadUtils.join(async1, MAX_WAIT);
     
     checkData(vm0, 0, 10, "a");
     checkData(vm0, 10, 11, null);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
index cacdc61..901d2d6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentColocatedPartitionedRegionDUnitTest.java
@@ -43,11 +43,15 @@ import com.gemstone.gemfire.internal.cache.InitialImageOperation.RequestImageMes
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserver;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author dsmith
@@ -67,9 +71,8 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     FileUtil.delete(getBackupDir());
-    super.tearDown2();
   }
   
   public void testColocatedPRAttributes() {
@@ -107,7 +110,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
         af.setPartitionAttributes(paf.create());
         
         //Try to colocate a persistent PR with the non persistent PR. This should fail.
-        ExpectedException exp = addExpectedException("IllegalStateException");
+        IgnoredException exp = IgnoredException.addIgnoredException("IllegalStateException");
         try {
           cache.createRegion("colocated", af.create());
           fail("should not have been able to create a persistent region colocated with a non persistent region");
@@ -290,7 +293,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
     vm1.invoke(createChildPR);
     vm2.invoke(createChildPR);
     
-    pause(4000);
+    Wait.pause(4000);
     
     assertEquals(vm0Buckets, getBucketList(vm0, PR_REGION_NAME));
     assertEquals(vm0Buckets, getBucketList(vm0, "region2"));
@@ -351,7 +354,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
         try {
           recoveryDone.await(MAX_WAIT, TimeUnit.MILLISECONDS);
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         } 
       }
     };
@@ -498,7 +501,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
             fail("timed out");
           }
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         }
       }
     };
@@ -556,7 +559,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
             fail("timed out");
           }
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         }
       }
     };
@@ -602,7 +605,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
     
     createData(vm0, 0, NUM_BUCKETS, "b");
     createData(vm0, 0, NUM_BUCKETS, "b", "region2");
-    ExpectedException expected = addExpectedException("PartitionOfflineException");
+    IgnoredException expected = IgnoredException.addIgnoredException("PartitionOfflineException");
     try {
     
     //Close the remaining members.
@@ -630,7 +633,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
     //by starting it last.
     AsyncInvocation async0 = vm0.invokeAsync(createPRs);
     AsyncInvocation async1 = vm1.invokeAsync(createPRs);
-    pause(2000);
+    Wait.pause(2000);
     AsyncInvocation async2 = vm2.invokeAsync(createPRs);
     async0.getResult(MAX_WAIT);
     async1.getResult(MAX_WAIT);
@@ -714,7 +717,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
             fail("timed out");
           }
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         }
       }
     };
@@ -779,7 +782,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
             fail("timed out");
           }
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         }
       }
     };
@@ -804,8 +807,8 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
    * @throws Throwable 
    */
   public void replaceOfflineMemberAndRestartCreateColocatedPRLate(SerializableRunnable createParentPR, SerializableRunnable createChildPR) throws Throwable {
-    addExpectedException("PartitionOfflineException");
-    addExpectedException("RegionDestroyedException");
+    IgnoredException.addIgnoredException("PartitionOfflineException");
+    IgnoredException.addIgnoredException("RegionDestroyedException");
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -860,14 +863,14 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
     //by starting it last.
     AsyncInvocation async2 = vm2.invokeAsync(createParentPR);
     AsyncInvocation async1 = vm1.invokeAsync(createParentPR);
-    pause(2000);
+    Wait.pause(2000);
     AsyncInvocation async0 = vm0.invokeAsync(createParentPR);
     async0.getResult(MAX_WAIT);
     async1.getResult(MAX_WAIT);
     async2.getResult(MAX_WAIT);
     
     //Wait for async tasks
-    pause(2000);
+    Wait.pause(2000);
     
     //Recreate the child region. 
     async2 = vm2.invokeAsync(createChildPR);
@@ -975,7 +978,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
       }
     });
     
-    ExpectedException ex = addExpectedException("PartitionOfflineException", vm1);
+    IgnoredException ex = IgnoredException.addIgnoredException("PartitionOfflineException", vm1);
     try {
       
     //Do a rebalance to create buckets in vm1. THis will cause vm0 to disconnect
@@ -995,7 +998,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
     vm0.invoke(new SerializableCallable() {
       
       public Object call() throws Exception {
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           public boolean done() {
             InternalDistributedSystem ds = system;
             return ds == null || !ds.isConnected();
@@ -1292,7 +1295,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
         try {
           observer.waitForCreate();
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         }
       }
     };
@@ -1415,7 +1418,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
     closeCache();
     
     //Restart colocated with "region2"
-    ExpectedException ex = addExpectedException("DiskAccessException|IllegalStateException");
+    IgnoredException ex = IgnoredException.addIgnoredException("DiskAccessException|IllegalStateException");
     try {
       createColocatedPRs("region2");
       fail("Should have received an illegal state exception");
@@ -1437,7 +1440,7 @@ public class PersistentColocatedPartitionedRegionDUnitTest extends
     
     //Restart uncolocated. We don't allow changing
     //from uncolocated to colocated.
-    ex = addExpectedException("DiskAccessException|IllegalStateException");
+    ex = IgnoredException.addIgnoredException("DiskAccessException|IllegalStateException");
     try {
       createColocatedPRs(null);
       fail("Should have received an illegal state exception");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
index 794d418..abf546d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionDUnitTest.java
@@ -74,13 +74,19 @@ import com.gemstone.gemfire.internal.cache.InitialImageOperation.RequestImageMes
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.partitioned.ManageBucketMessage.ManageBucketReplyMessage;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.RMIException;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests the basic use cases for PR persistence.
@@ -143,8 +149,8 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     createPR(vm0, 0, 0, 5);
     createData(vm0, 0, 5, "a");
     closeCache(vm0);
-    ExpectedException expect = addExpectedException("IllegalStateException", vm0);
-    expect = addExpectedException("DiskAccessException", vm0);
+    IgnoredException expect = IgnoredException.addIgnoredException("IllegalStateException", vm0);
+    expect = IgnoredException.addIgnoredException("DiskAccessException", vm0);
     try {
       createPR(vm0, 0, 0, 2);
       fail("Expect to get java.lang.IllegalStateException, but it did not");
@@ -254,8 +260,8 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
       }
     });
 
-    ExpectedException expected1 = addExpectedException("Fatal error from asynch");
-    ExpectedException expected2 = addExpectedException("ToDataException");
+    IgnoredException expected1 = IgnoredException.addIgnoredException("Fatal error from asynch");
+    IgnoredException expected2 = IgnoredException.addIgnoredException("ToDataException");
     try {
       int redundancy=1;
       createPR(vm0, redundancy, -1, 113, false);
@@ -422,7 +428,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     createData(vm0, numBuckets, 113, "b");
     checkData(vm0, numBuckets, 113, "b");
     
-    ExpectedException ex = addExpectedException(RevokedPersistentDataException.class.getName(), vm1);
+    IgnoredException ex = IgnoredException.addIgnoredException(RevokedPersistentDataException.class.getName(), vm1);
     try {
       createPR(vm1, 1);
       fail("Should have recieved a SplitDistributedSystemException");
@@ -437,7 +443,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
   }
   
   public void testRevokeBeforeStartup() throws Throwable {
-    addExpectedException("RevokeFailedException");
+    IgnoredException.addIgnoredException("RevokeFailedException");
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -491,7 +497,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     createData(vm0, numBuckets, 113, "b");
     checkData(vm0, numBuckets, 113, "b");
     
-    ExpectedException ex = addExpectedException(RevokedPersistentDataException.class.getName(), vm1);
+    IgnoredException ex = IgnoredException.addIgnoredException(RevokedPersistentDataException.class.getName(), vm1);
     try {
       createPR(vm1, 1);
       fail("Should have recieved a SplitDistributedSystemException");
@@ -537,7 +543,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     final int aVM1Bucket = vm1Buckets.iterator().next();
     closeCache(vm1);
 
-    ExpectedException ex = addExpectedException("PartitionOfflineException");
+    IgnoredException ex = IgnoredException.addIgnoredException("PartitionOfflineException");
     try { 
       checkReadWriteOperationsWithOfflineMember(vm0, aVM0Bucket, aVM1Bucket);
       //Make sure that a newly created member is informed about the offline member
@@ -574,7 +580,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
       }
     }
 
-    ExpectedException expect = addExpectedException("PartitionOfflineException", vm0);
+    IgnoredException expect = IgnoredException.addIgnoredException("PartitionOfflineException", vm0);
     //Try a function execution
     vm0.invoke(new SerializableRunnable("Test ways to read") {
       public void run() {
@@ -803,7 +809,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
       //This should work, because this bucket is still available.
       checkData(vm0, aVM0Bucket, aVM0Bucket + 1, "a");
       
-      ExpectedException expect = addExpectedException("PartitionOfflineException", vm0);
+      IgnoredException expect = IgnoredException.addIgnoredException("PartitionOfflineException", vm0);
       try {
         checkData(vm0, aVM1Bucket, aVM1Bucket + 1, "a");
         fail("Should not have been able to read from missing buckets!");
@@ -836,7 +842,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
       createData(vm2, aVM1Bucket, aVM1Bucket + 1, "a");
       checkData(vm2, aVM1Bucket, aVM1Bucket + 1, "a");
       
-      ExpectedException ex = addExpectedException(RevokedPersistentDataException.class.getName(), vm1);
+      IgnoredException ex = IgnoredException.addIgnoredException(RevokedPersistentDataException.class.getName(), vm1);
       try {
         createPR(vm1, 0);
         fail("Should have recieved a RevokedPersistentDataException");
@@ -884,7 +890,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
       Set<Integer> vm2Buckets = getBucketList(vm2);
       assertEquals(vm1Buckets, vm2Buckets);
       
-      ExpectedException ex = addExpectedException(RevokedPersistentDataException.class.getName(), vm1);
+      IgnoredException ex = IgnoredException.addIgnoredException(RevokedPersistentDataException.class.getName(), vm1);
       try {
         createPR(vm1, 1);
         fail("Should have recieved a SplitDistributedSystemException");
@@ -952,7 +958,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
       
       //VM2 should pick up the slack
       
-      waitForCriterion(new WaitCriterion() {
+      Wait.waitForCriterion(new WaitCriterion() {
         
         public boolean done() {
           Set<Integer> vm2Buckets = getBucketList(vm2);
@@ -1336,11 +1342,11 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
   
   public void testRegisterInterestNoDataStores() {
     //Closing the client may log a warning on the server
-    addExpectedException("Connection reset");
-    addExpectedException("SocketTimeoutException");
-    addExpectedException("ServerConnectivityException");
-    addExpectedException("Socket Closed");
-    addExpectedException("Unexpected IOException");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("SocketTimeoutException");
+    IgnoredException.addIgnoredException("ServerConnectivityException");
+    IgnoredException.addIgnoredException("Socket Closed");
+    IgnoredException.addIgnoredException("Unexpected IOException");
     final Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -1381,7 +1387,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
           Cache cache = getCache();
 
           PoolFactory pf = PoolManager.createFactory();
-          pf.addServer(getServerHostName(host), serverPort);
+          pf.addServer(NetworkUtils.getServerHostName(host), serverPort);
           pf.setSubscriptionEnabled(true);
           pf.create("pool");
           AttributesFactory af = new AttributesFactory();
@@ -1444,7 +1450,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
               DistributedTestCase.disconnectFromDS();
               
               await().atMost(30, SECONDS).until(() -> {return (cache == null || cache.isClosed());});
-              getLogWriter().info("Cache is confirmed closed");
+              LogWriterUtils.getLogWriter().info("Cache is confirmed closed");
             }
           }
         });
@@ -1655,7 +1661,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
       }
     });
     
-    getLogWriter().info("Creating region in VM0");
+    LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPR(vm0, 1, 0, 1);
     
     //Make sure we create a bucket
@@ -1663,9 +1669,9 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     
     //This should recover redundancy, which should cause vm0 to disconnect
     
-    ExpectedException ex = addExpectedException("PartitionOfflineException");
+    IgnoredException ex = IgnoredException.addIgnoredException("PartitionOfflineException");
     try { 
-    getLogWriter().info("Creating region in VM1");
+    LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPR(vm1, 1, 0, 1);
     
     //Make sure get a partition offline exception
@@ -1746,7 +1752,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
         try {
           recoveryObserver.recoveryDone.await();
         } catch (InterruptedException e) {
-          fail("Interrupted", e);
+          Assert.fail("Interrupted", e);
         }
         InternalResourceManager.setResourceObserver(null);
       }
@@ -1821,7 +1827,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
    * to make sure that later we can recover redundancy.
    */
   public void testCrashDuringBucketGII() {
-    addExpectedException("PartitionOfflineException");
+    IgnoredException.addIgnoredException("PartitionOfflineException");
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -1875,7 +1881,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
    * @throws InterruptedException 
    */
   public void testCrashDuringBucketGII2() throws InterruptedException {
-    addExpectedException("PartitionOfflineException");
+    IgnoredException.addIgnoredException("PartitionOfflineException");
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     final VM vm1 = host.getVM(1);
@@ -1933,7 +1939,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 30 * 1000, 200, true);
     assertEquals(Collections.singleton(0), getBucketList(vm1));
   }
   
@@ -1956,7 +1962,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     createData(vm1, 1, 2, "a");
     
     //this should throw a conflicting data exception.
-    ExpectedException expect = addExpectedException("ConflictingPersistentDataException", vm0);
+    IgnoredException expect = IgnoredException.addIgnoredException("ConflictingPersistentDataException", vm0);
     try {
       createPR(vm0, 0);
       fail("should have seen a conflicting data exception");
@@ -1974,7 +1980,7 @@ public class PersistentPartitionedRegionDUnitTest extends PersistentPartitionedR
     //view from vm0 because vm0 was in conflict!
     //In fact, this is a bit of a problem, because now vm1 is dependent
     //on vm vm0.
-    expect = addExpectedException("PartitionOfflineException", vm1);
+    expect = IgnoredException.addIgnoredException("PartitionOfflineException", vm1);
     try {
       createData(vm1, 0, 1, "a");
       fail("Should have seen a PartitionOfflineException for bucket 0");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionTestBase.java
index 267136a..3b89271 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionTestBase.java
@@ -56,10 +56,15 @@ import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserver;
 import com.gemstone.gemfire.internal.cache.persistence.PersistenceAdvisor;
 import com.gemstone.gemfire.internal.cache.persistence.PersistentMemberID;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author dsmith
@@ -84,7 +89,7 @@ public abstract class PersistentPartitionedRegionTestBase extends CacheTestCase
   public void setUp() throws Exception {
     super.setUp();
     disconnectAllFromDS();
-    invokeInEveryVM(PersistentPartitionedRegionTestBase.class,"setRegionName", new Object[]{getUniqueName()});
+    Invoke.invokeInEveryVM(PersistentPartitionedRegionTestBase.class,"setRegionName", new Object[]{getUniqueName()});
     setRegionName(getUniqueName());
   }
   
@@ -154,7 +159,7 @@ public abstract class PersistentPartitionedRegionTestBase extends CacheTestCase
         try {
           rf.start().getResults();
         } catch (Exception e) {
-          fail("interupted", e);
+          Assert.fail("interupted", e);
         }
       }
     });
@@ -198,7 +203,7 @@ public abstract class PersistentPartitionedRegionTestBase extends CacheTestCase
 
   protected void createData(VM vm, final int startKey, final int endKey,
       final String value) {
-    getLogWriter().info("createData invoked.  PR_REGION_NAME is " + PR_REGION_NAME);
+    LogWriterUtils.getLogWriter().info("createData invoked.  PR_REGION_NAME is " + PR_REGION_NAME);
         createData(vm, startKey, endKey,value, PR_REGION_NAME);
       }
 
@@ -208,7 +213,7 @@ public abstract class PersistentPartitionedRegionTestBase extends CacheTestCase
           
           public void run() {
             Cache cache = getCache();
-            getLogWriter().info("creating data in " + regionName);
+            LogWriterUtils.getLogWriter().info("creating data in " + regionName);
             Region region = cache.getRegion(regionName);
             
             for(int i =startKey; i < endKey; i++) {
@@ -367,7 +372,7 @@ public abstract class PersistentPartitionedRegionTestBase extends CacheTestCase
         try {
           recoveryDone.await(MAX_WAIT, TimeUnit.MILLISECONDS);
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         }
       }
     };
@@ -410,7 +415,7 @@ public abstract class PersistentPartitionedRegionTestBase extends CacheTestCase
           try {
             recoveryDone.await();
           } catch (InterruptedException e) {
-            fail("Interrupted", e);
+            Assert.fail("Interrupted", e);
           }
         }
       }
@@ -474,7 +479,7 @@ public abstract class PersistentPartitionedRegionTestBase extends CacheTestCase
       public Object call() throws Exception {
         Cache cache = getCache();
         final PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           
           public boolean done() {
             return expectedBuckets.equals(getActualBuckets());
@@ -553,7 +558,7 @@ public abstract class PersistentPartitionedRegionTestBase extends CacheTestCase
                 }
               }
             };
-            waitForCriterion(wc, MAX_WAIT, 500, true);
+            Wait.waitForCriterion(wc, MAX_WAIT, 500, true);
           } finally {
             adminDS.disconnect();
           }
@@ -685,7 +690,7 @@ public abstract class PersistentPartitionedRegionTestBase extends CacheTestCase
         Cache cache = getCache();
         PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName);
         final PartitionedRegionDataStore dataStore = region.getDataStore();
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
     
           public boolean done() {
             Set<Integer> vm2Buckets = dataStore.getAllLocalBucketIds();
@@ -707,7 +712,7 @@ public abstract class PersistentPartitionedRegionTestBase extends CacheTestCase
       public void run() {
         Cache cache = getCache();
         final Region region = cache.getRegion(regionName);
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           
           public boolean done() {
             PartitionRegionInfo info = PartitionRegionHelper.getPartitionRegionInfo(region);
@@ -785,7 +790,7 @@ public abstract class PersistentPartitionedRegionTestBase extends CacheTestCase
     BufferedReader br = new BufferedReader(new InputStreamReader(is));
     String line;
     while((line = br.readLine()) != null) {
-      getLogWriter().fine("OUTPUT:" + line);
+      LogWriterUtils.getLogWriter().fine("OUTPUT:" + line);
       //TODO validate output
     };
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionWithTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionWithTransactionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionWithTransactionDUnitTest.java
index e92d6b1..90118d1 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionWithTransactionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/PersistentPartitionedRegionWithTransactionDUnitTest.java
@@ -23,6 +23,8 @@ import com.gemstone.gemfire.internal.cache.DiskStoreImpl;
 import com.gemstone.gemfire.internal.cache.TXManagerImpl;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -40,24 +42,19 @@ public class PersistentPartitionedRegionWithTransactionDUnitTest extends Persist
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    invokeInEveryVM(new SerializableRunnable() {
-      
+  protected final void postTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       public void run() {
         TXManagerImpl.ALLOW_PERSISTENT_TRANSACTIONS = false;
         System.setProperty(DiskStoreImpl.RECOVER_VALUES_SYNC_PROPERTY_NAME, "false");
       }
     });
-    
   }
 
-
-
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(new SerializableRunnable() {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       
       public void run() {
         TXManagerImpl.ALLOW_PERSISTENT_TRANSACTIONS = true;
@@ -134,12 +131,12 @@ public class PersistentPartitionedRegionWithTransactionDUnitTest extends Persist
   @Override
   protected void createData(VM vm, final int startKey, final int endKey, final String value,
       final String regionName) {
-    getLogWriter().info("creating runnable to create data for region " + regionName);
+    LogWriterUtils.getLogWriter().info("creating runnable to create data for region " + regionName);
     SerializableRunnable createData = new SerializableRunnable() {
       
       public void run() {
         Cache cache = getCache();
-        getLogWriter().info("getting region " + regionName);
+        LogWriterUtils.getLogWriter().info("getting region " + regionName);
         Region region = cache.getRegion(regionName);
         
         for(int i =startKey; i < endKey; i++) {
@@ -171,7 +168,7 @@ public class PersistentPartitionedRegionWithTransactionDUnitTest extends Persist
       
       public void run() {
         Cache cache = getCache();
-        getLogWriter().info("checking data in " + regionName);
+        LogWriterUtils.getLogWriter().info("checking data in " + regionName);
         Region region = cache.getRegion(regionName);
         
         for(int i =startKey; i < endKey; i++) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/ShutdownAllDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/ShutdownAllDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/ShutdownAllDUnitTest.java
index 4918f00..cd68c35 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/ShutdownAllDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/ShutdownAllDUnitTest.java
@@ -55,13 +55,16 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.PutAllPartialResultException;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserver;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.RMIException;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Tests the basic use cases for PR persistence.
@@ -142,7 +145,7 @@ public class ShutdownAllDUnitTest extends CacheTestCase {
     createData(vm0, 0, numBuckets, "a", "region");
 
     vm0.invoke(addExceptionTag1(expectedExceptions));
-    invokeInEveryVM(new SerializableRunnable("set TestInternalGemFireError") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("set TestInternalGemFireError") {
       public void run() {
         System.setProperty("TestInternalGemFireError", "true");
       }
@@ -151,7 +154,7 @@ public class ShutdownAllDUnitTest extends CacheTestCase {
     
     assertTrue(InternalDistributedSystem.getExistingSystems().isEmpty());
     
-    invokeInEveryVM(new SerializableRunnable("reset TestInternalGemFireError") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("reset TestInternalGemFireError") {
       public void run() {
         System.setProperty("TestInternalGemFireError", "false");
       }
@@ -178,7 +181,7 @@ public class ShutdownAllDUnitTest extends CacheTestCase {
     
     vm0.invoke(addExceptionTag1(expectedExceptions));
     vm1.invoke(addExceptionTag1(expectedExceptions));
-    invokeInEveryVM(new SerializableRunnable("set TestInternalGemFireError") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("set TestInternalGemFireError") {
       public void run() {
         System.setProperty("TestInternalGemFireError", "true");
       }
@@ -187,7 +190,7 @@ public class ShutdownAllDUnitTest extends CacheTestCase {
     
     assertTrue(InternalDistributedSystem.getExistingSystems().isEmpty());
     
-    invokeInEveryVM(new SerializableRunnable("reset TestInternalGemFireError") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("reset TestInternalGemFireError") {
       public void run() {
         System.setProperty("TestInternalGemFireError", "false");
       }
@@ -483,7 +486,7 @@ public class ShutdownAllDUnitTest extends CacheTestCase {
     });
     
     //wait for shutdown to finish
-    pause(10000);
+    Wait.pause(10000);
     
     // restart vm0
     AsyncInvocation a0 = createRegionAsync(vm0, "region", "disk", true, 1);
@@ -546,7 +549,7 @@ public class ShutdownAllDUnitTest extends CacheTestCase {
     AsyncInvocation a0 = createRegionAsync(vm0, "region", "disk", true, 1);
     
     //Wait a bit for the initialization to get stuck
-    pause(20000);
+    Wait.pause(20000);
     assertTrue(a0.isAlive());
     
     //Do another shutdown all, with a member offline and another stuck
@@ -718,7 +721,7 @@ public class ShutdownAllDUnitTest extends CacheTestCase {
           try {
             recoveryDone.await();
           } catch (InterruptedException e) {
-            fail("Interrupted", e);
+            Assert.fail("Interrupted", e);
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/StreamingPartitionOperationOneDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/StreamingPartitionOperationOneDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/StreamingPartitionOperationOneDUnitTest.java
index b9e8ebe..f5e0b71 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/StreamingPartitionOperationOneDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/StreamingPartitionOperationOneDUnitTest.java
@@ -47,6 +47,7 @@ import com.gemstone.gemfire.distributed.internal.ReplyProcessor21;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.Token;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -108,7 +109,7 @@ public class StreamingPartitionOperationOneDUnitTest extends CacheTestCase {
       throw e;
     }
     catch (Throwable t) {
-      fail("getPartitionedDataFrom failed", t);
+      Assert.fail("getPartitionedDataFrom failed", t);
     }
     assertTrue(streamOp.dataValidated);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningDUnitTest.java
index 30063d4..ce42945 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/partitioned/fixed/FixedPartitioningDUnitTest.java
@@ -23,9 +23,11 @@ import com.gemstone.gemfire.cache.DuplicatePrimaryPartitionException;
 import com.gemstone.gemfire.cache.EntryNotFoundException;
 import com.gemstone.gemfire.cache.FixedPartitionAttributes;
 import com.gemstone.gemfire.cache.partition.PartitionNotAvailableException;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.dunit.Host;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.ExpectedException;
 
 /**
  * This Dunit test class have multiple tests to tests different validations of
@@ -53,10 +55,6 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   /**
    * This test validates that null partition name cannot be added in
    * FixedPartitionAttributes
@@ -77,7 +75,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
       if (!((illegal.getCause() instanceof IllegalStateException) && (illegal
           .getCause().getMessage()
           .contains("Fixed partition name cannot be null")))) {
-        fail("Expected IllegalStateException ", illegal);
+        Assert.fail("Expected IllegalStateException ", illegal);
       }
     }
   }
@@ -107,7 +105,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
       if (!((illegal.getCause() instanceof IllegalStateException) && (illegal
           .getCause().getMessage()
           .contains("can be added only once in FixedPartitionAttributes")))) {
-        fail("Expected IllegalStateException ", illegal);
+        Assert.fail("Expected IllegalStateException ", illegal);
       }
     }
   }
@@ -134,7 +132,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     catch (Exception illegal) {
       if (!((illegal.getCause() instanceof IllegalStateException) && (illegal
           .getCause().getMessage().contains("can not be defined for accessor")))) {
-        fail("Expected IllegalStateException ", illegal);
+        Assert.fail("Expected IllegalStateException ", illegal);
       }
     }
   }
@@ -147,7 +145,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
    */
 
   public void testSamePartitionName_Primary_OnTwoMembers() {
-    ExpectedException ex = addExpectedException("DuplicatePrimaryPartitionException");
+    IgnoredException ex = IgnoredException.addIgnoredException("DuplicatePrimaryPartitionException");
     try {
       member1.invoke(FixedPartitioningTestBase.class, "createCacheOnMember");
       FixedPartitionAttributes fpa1 = FixedPartitionAttributes
@@ -179,7 +177,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
       if (!((duplicate.getCause() instanceof DuplicatePrimaryPartitionException) && (duplicate
           .getCause().getMessage()
           .contains("can not be defined as primary on more than one node")))) {
-        fail("Expected DuplicatePrimaryPartitionException ", duplicate);
+        Assert.fail("Expected DuplicatePrimaryPartitionException ", duplicate);
       }
     } finally {
       ex.remove();
@@ -192,7 +190,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
    */
 
   public void testSamePartitionName_DifferentNumBuckets() {
-    ExpectedException ex = addExpectedException("IllegalStateException");
+    IgnoredException ex = IgnoredException.addIgnoredException("IllegalStateException");
     try {
       member1.invoke(FixedPartitioningTestBase.class, "createCacheOnMember");
       FixedPartitionAttributes fpa1 = FixedPartitionAttributes
@@ -220,7 +218,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     catch (Exception illegal) {
       if (!((illegal.getCause() instanceof IllegalStateException) && (illegal
           .getCause().getMessage().contains("num-buckets are not same")))) {
-        fail("Expected IllegalStateException ", illegal);
+        Assert.fail("Expected IllegalStateException ", illegal);
       }
     } finally {
       ex.remove();
@@ -235,7 +233,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
    */
 
   public void testNumberOfPartitions() {
-    ExpectedException expected = addExpectedException("IllegalStateException");
+    IgnoredException expected = IgnoredException.addIgnoredException("IllegalStateException");
     try {
       member1.invoke(FixedPartitioningTestBase.class, "createCacheOnMember");
       member1.invoke(FixedPartitioningTestBase.class, "createRegionWithPartitionAttributes",
@@ -282,7 +280,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
       if (!((ex.getCause() instanceof IllegalStateException) && (ex.getCause()
           .getMessage()
           .contains("should never exceed number of redundant copies")))) {
-        fail("Expected IllegalStateException ", ex);
+        Assert.fail("Expected IllegalStateException ", ex);
       }
     } finally {
       expected.remove();
@@ -295,7 +293,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
    */
 
   public void testNumBuckets_totalNumBuckets() {
-    ExpectedException expected = addExpectedException("IllegalStateException");
+    IgnoredException expected = IgnoredException.addIgnoredException("IllegalStateException");
     try {
       member1.invoke(FixedPartitioningTestBase.class, "createCacheOnMember");
       member1.invoke(FixedPartitioningTestBase.class, "createRegionWithPartitionAttributes",
@@ -330,7 +328,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
       if (!((ex.getCause() instanceof IllegalStateException) && (ex.getCause()
           .getMessage()
           .contains("for different primary partitions should not be greater than total-num-buckets ")))) {
-        fail("Expected IllegalStateException ", ex);
+        Assert.fail("Expected IllegalStateException ", ex);
       }
     } finally {
       expected.remove();
@@ -372,7 +370,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     }
     catch (Exception ex) {
       if (!((ex.getCause() instanceof PartitionNotAvailableException))) {
-        fail("Expected PartitionNotAvailableException ", ex);
+        Assert.fail("Expected PartitionNotAvailableException ", ex);
       }
     }
   }
@@ -384,7 +382,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
    */
   
   public void test_DataStoreWithoutPartition_DataStoreWithPartition() {
-    ExpectedException expected = addExpectedException("IllegalStateException");
+    IgnoredException expected = IgnoredException.addIgnoredException("IllegalStateException");
     try {
       member1.invoke(FixedPartitioningTestBase.class, "createCacheOnMember");
       member1.invoke(FixedPartitioningTestBase.class, "createRegionWithPartitionAttributes",
@@ -402,7 +400,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     }
     catch (Exception ex) {
       if (!((ex.getCause() instanceof IllegalStateException))) {
-        fail("Expected IllegalStateException ", ex);
+        Assert.fail("Expected IllegalStateException ", ex);
       }
     } finally {
       expected.remove();
@@ -416,7 +414,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
    */
 
   public void test_DataStoreWithPartition_DataStoreWithoutPartition() {
-    ExpectedException expected = addExpectedException("IllegalStateException");
+    IgnoredException expected = IgnoredException.addIgnoredException("IllegalStateException");
     try {
       member2.invoke(FixedPartitioningTestBase.class, "createCacheOnMember");
       FixedPartitionAttributes fpa1 = FixedPartitionAttributes
@@ -434,7 +432,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     }
     catch (Exception ex) {
       if (!((ex.getCause() instanceof IllegalStateException))) {
-        fail("Expected IllegalStateException ", ex);
+        Assert.fail("Expected IllegalStateException ", ex);
       }
     } finally {
       expected.remove();
@@ -668,7 +666,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     }
     catch (Exception ex) {
       if (!((ex.getCause() instanceof EntryNotFoundException))) {
-        fail("Expected EntryNotFoundException ", ex);
+        Assert.fail("Expected EntryNotFoundException ", ex);
       }
     }
     
@@ -727,7 +725,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     }
     catch (Exception ex) {
       if (!((ex.getCause() instanceof IllegalStateException))) {
-        fail("Expected IllegalStateException ", ex);
+        Assert.fail("Expected IllegalStateException ", ex);
       }
     }
   }
@@ -876,7 +874,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     }
     catch (Exception ex) {
       if (!((ex.getCause() instanceof IllegalStateException))) {
-        fail("Expected IllegalStateException ", ex);
+        Assert.fail("Expected IllegalStateException ", ex);
       }
     }
   }
@@ -931,7 +929,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     }
     catch (Exception ex) {
       if (!((ex.getCause() instanceof IllegalStateException))) {
-        fail("Expected IllegalStateException ", ex);
+        Assert.fail("Expected IllegalStateException ", ex);
       }
     }
   }
@@ -1208,7 +1206,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     createRegionWithPartitionAttributes("Quarter", fpaList, 3, 40, 12,
         new QuarterPartitionResolver(), null, false);
 
-    pause(1000);
+    Wait.pause(1000);
 
     member1.invoke(FixedPartitioningTestBase.class, "checkPrimaryBucketsForQuarter",
         new Object[] { 3, 0 });
@@ -1328,7 +1326,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
         new Object[] { 9, 3 });
 
     member4.invoke(FixedPartitioningTestBase.class, "closeCache");
-    pause(1000);
+    Wait.pause(1000);
 
     member1.invoke(FixedPartitioningTestBase.class,
         "checkPrimarySecondaryData_TwoSecondaries", new Object[] { Quarter1,
@@ -1438,7 +1436,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
         new Object[] { 6, 3 });
 
     member4.invoke(FixedPartitioningTestBase.class, "closeCache");
-    pause(1000);
+    Wait.pause(1000);
 
     member3.invoke(FixedPartitioningTestBase.class, "checkPrimaryBucketsForQuarterAfterCacheClosed",
         new Object[] { 6, 6 });
@@ -1456,7 +1454,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
         new Object[] { "Quarter", fpaList, 1, 40, 12,
             new QuarterPartitionResolver(), null, false });
 
-    pause(1000);
+    Wait.pause(1000);
 
     member1.invoke(FixedPartitioningTestBase.class, "checkPrimarySecondaryData",
         new Object[] { Quarter1, true });
@@ -1543,7 +1541,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
 
     member4.invoke(FixedPartitioningTestBase.class, "closeCache");
     member2.invoke(FixedPartitioningTestBase.class, "closeCache");
-    pause(1000);
+    Wait.pause(1000);
 
     member3.invoke(FixedPartitioningTestBase.class, "checkPrimaryBucketsForQuarterAfterCacheClosed",
         new Object[] { 6, 6 });
@@ -1572,7 +1570,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     member2.invoke(FixedPartitioningTestBase.class, "createRegionWithPartitionAttributes",
         new Object[] { "Quarter", fpaList, 1, 40, 12,
             new QuarterPartitionResolver(), null, false });
-    pause(1000);
+    Wait.pause(1000);
 
     member1.invoke(FixedPartitioningTestBase.class, "checkPrimarySecondaryData",
         new Object[] { Quarter1, true });
@@ -1594,7 +1592,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
     
     member4.invoke(FixedPartitioningTestBase.class, "doRebalance");
     
-    pause(2000);
+    Wait.pause(2000);
     member1.invoke(FixedPartitioningTestBase.class, "checkPrimaryBucketsForQuarter",
         new Object[] { 6, 3 });
     member2.invoke(FixedPartitioningTestBase.class, "checkPrimaryBucketsForQuarter",
@@ -1674,7 +1672,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
 
     member3.invoke(FixedPartitioningTestBase.class, "closeCache");
     
-    pause(1000);  
+    Wait.pause(1000);  
     
     member1.invoke(FixedPartitioningTestBase.class, "checkStartingBucketIDs_Nodedown");
     member2.invoke(FixedPartitioningTestBase.class, "checkStartingBucketIDs_Nodedown");
@@ -1685,7 +1683,7 @@ public class FixedPartitioningDUnitTest extends FixedPartitioningTestBase {
         new Object[] { "Quarter", fpaList, 2, 40, 12,
             new QuarterPartitionResolver(), null, false });
     
-    pause(3000);
+    Wait.pause(3000);
     
     member1.invoke(FixedPartitioningTestBase.class, "checkStartingBucketIDs_Nodeup");
     member2.invoke(FixedPartitioningTestBase.class, "checkStartingBucketIDs_Nodeup");


[19/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
index 9258c49..55f9112 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventQueueStatsDUnitTest.java
@@ -21,6 +21,7 @@ import java.util.Map;
 
 import com.gemstone.gemfire.internal.cache.wan.AsyncEventQueueTestBase;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
 
@@ -56,20 +57,20 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, false, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
         new Object[] { "ln", 1000 });// primary sender
-    pause(2000);//give some time for system to become stable
+    Wait.pause(2000);//give some time for system to become stable
     
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {
         "ln", 0, 1000, 1000, 1000 });
@@ -111,22 +112,22 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
       false, 100, 100, false, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln1,ln2", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln1,ln2", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln1,ln2", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln1,ln2", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln1,ln2", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln1,ln2", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln1,ln2", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln1,ln2", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
     
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
         new Object[] { "ln1", 1000 });
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
         new Object[] { "ln2", 1000 });
-    pause(2000);//give some time for system to become stable
+    Wait.pause(2000);//give some time for system to become stable
 
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {
         "ln1", 0, 1000, 1000, 1000 });
@@ -164,17 +165,17 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
       false, 100, 100, false, false, null, false });
     
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR", "ln", isOffHeap() });
+        getTestMethodName() + "_RR", "ln", isOffHeap() });
     
     AsyncInvocation inv1 = vm5.invokeAsync(AsyncEventQueueTestBase.class, "doPuts",
-        new Object[] { testName + "_RR", 10000 });
-    pause(2000);
+        new Object[] { getTestMethodName() + "_RR", 10000 });
+    Wait.pause(2000);
     AsyncInvocation inv2 = vm4.invokeAsync(AsyncEventQueueTestBase.class, "killAsyncEventQueue", new Object[] { "ln" });
     Boolean isKilled = Boolean.FALSE;
     try {
@@ -190,7 +191,7 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
     }
     inv1.join();
     inv2.join();
-    pause(2000);//give some time for system to become stable
+    Wait.pause(2000);//give some time for system to become stable
     vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats_Failover", new Object[] {"ln", 10000});
   }
 
@@ -213,33 +214,33 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
 
     //create one RR (RR_1) on local site
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR_1", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_1", "ln", isOffHeap() });
 
     //create another RR (RR_2) on local site
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-        testName + "_RR_2", "ln", isOffHeap() });
+        getTestMethodName() + "_RR_2", "ln", isOffHeap() });
     
     //start puts in RR_1 in another thread
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR_1", 1000 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR_1", 1000 });
     //do puts in RR_2 in main thread
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPutsFrom", new Object[] { testName + "_RR_2", 1000, 1500 });
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPutsFrom", new Object[] { getTestMethodName() + "_RR_2", 1000, 1500 });
     
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
         new Object[] { "ln", 1500 });
         
-    pause(2000);//give some time for system to become stable
+    Wait.pause(2000);//give some time for system to become stable
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {"ln",
       0, 1500, 1500, 1500});
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueUnprocessedStats", new Object[] {"ln", 0});
@@ -266,19 +267,19 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, true, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
     //pause at least for the batchTimeInterval to make sure that the AsyncEventQueue is actually paused
-    pause(2000);
+    Wait.pause(2000);
 
     final Map keyValues = new HashMap();
     final Map updateKeyValues = new HashMap();
@@ -286,7 +287,7 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
       keyValues.put(i, i);
     }
     
-    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] { testName + "_RR", keyValues });
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] { getTestMethodName() + "_RR", keyValues });
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] { "ln", keyValues.size() });
     
     for(int i=0;i<500;i++) {
@@ -295,12 +296,12 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
     
     // Put the update events and check the queue size.
     // There should be no conflation with the previous create events.
-    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] { testName + "_RR", updateKeyValues });    
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] { getTestMethodName() + "_RR", updateKeyValues });    
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] { "ln", keyValues.size() + updateKeyValues.size() });
     
     // Put the update events again and check the queue size.
     // There should be conflation with the previous update events.
-    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] { testName + "_RR", updateKeyValues });    
+    vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] { getTestMethodName() + "_RR", updateKeyValues });    
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] { "ln", keyValues.size() + updateKeyValues.size() });
     
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
@@ -310,7 +311,7 @@ public class AsyncEventQueueStatsDUnitTest extends AsyncEventQueueTestBase {
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
         new Object[] { "ln", 1000 });
     
-    pause(2000);// give some time for system to become stable
+    Wait.pause(2000);// give some time for system to become stable
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueStats", new Object[] {
         "ln", 0, 2000, 2000, 1000 });
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueConflatedStats",

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
index 0e6efa0..9398628 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/concurrent/ConcurrentAsyncEventQueueDUnitTest.java
@@ -19,6 +19,7 @@ package com.gemstone.gemfire.internal.cache.wan.concurrent;
 import com.gemstone.gemfire.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl;
 import com.gemstone.gemfire.cache.wan.GatewaySender.OrderPolicy;
 import com.gemstone.gemfire.internal.cache.wan.AsyncEventQueueTestBase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 
 /**
@@ -105,15 +106,15 @@ public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase
         false, 100, 10, true, false, null, false, 3, OrderPolicy.KEY });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         100 });
     
     vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
@@ -163,19 +164,19 @@ public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase
         false, 100, 10, true, false, null, false, 3, OrderPolicy.THREAD });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
-    AsyncInvocation inv1 = vm4.invokeAsync(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    AsyncInvocation inv1 = vm4.invokeAsync(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         50 });
-    AsyncInvocation inv2 = vm4.invokeAsync(AsyncEventQueueTestBase.class, "doNextPuts", new Object[] { testName + "_RR",
+    AsyncInvocation inv2 = vm4.invokeAsync(AsyncEventQueueTestBase.class, "doNextPuts", new Object[] { getTestMethodName() + "_RR",
       50, 100 });
-    AsyncInvocation inv3 = vm4.invokeAsync(AsyncEventQueueTestBase.class, "doNextPuts", new Object[] { testName + "_RR",
+    AsyncInvocation inv3 = vm4.invokeAsync(AsyncEventQueueTestBase.class, "doNextPuts", new Object[] { getTestMethodName() + "_RR",
       100, 150 });
     
     try {
@@ -183,7 +184,7 @@ public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase
       inv2.join();
       inv3.join();
     } catch (InterruptedException ie) {
-      fail(
+      Assert.fail(
           "Cought interrupted exception while waiting for the task tgo complete.",
           ie);
     }
@@ -235,15 +236,15 @@ public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase
         true, 100, 10, true, false, null, false, 3, OrderPolicy.KEY });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
         100 });
     
     vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
@@ -301,15 +302,15 @@ public class ConcurrentAsyncEventQueueDUnitTest extends AsyncEventQueueTestBase
             OrderPolicy.PARTITION });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
         100 });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
index 5b9d3bd..0035d16 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/misc/CommonParallelAsyncEventQueueDUnitTest.java
@@ -17,6 +17,8 @@
 package com.gemstone.gemfire.internal.cache.wan.misc;
 
 import com.gemstone.gemfire.internal.cache.wan.AsyncEventQueueTestBase;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 
 /**
  * @author skumar
@@ -35,24 +37,24 @@ public class CommonParallelAsyncEventQueueDUnitTest extends AsyncEventQueueTestB
   }
     
   public void testSameSenderWithNonColocatedRegions() throws Exception {
-    addExpectedException("cannot have the same parallel async");
+    IgnoredException.addIgnoredException("cannot have the same parallel async");
     Integer lnPort = (Integer)vm0.invoke(AsyncEventQueueTestBase.class,
         "createFirstLocatorWithDSId", new Object[] { 1 });
     vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
     vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueue", new Object[] { "ln",
       true, 100, 100, false, false, null, false });
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR1", "ln", isOffHeap()  });
+        new Object[] { getTestMethodName() + "_PR1", "ln", isOffHeap()  });
     try {
       vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-          new Object[] { testName + "_PR2", "ln", isOffHeap()  });
+          new Object[] { getTestMethodName() + "_PR2", "ln", isOffHeap()  });
       fail("Expected IllegateStateException : cannot have the same parallel gateway sender");
     }
     catch (Exception e) {
       if (!(e.getCause() instanceof IllegalStateException)
           || !(e.getCause().getMessage()
               .contains("cannot have the same parallel async event queue id"))) {
-        fail("Expected IllegalStateException", e);
+        Assert.fail("Expected IllegalStateException", e);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheConfigDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheConfigDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheConfigDUnitTest.java
index 84c9193..653a376 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheConfigDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheConfigDUnitTest.java
@@ -26,7 +26,8 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.compression.Compressor;
 import com.gemstone.gemfire.compression.SnappyCompressor;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
@@ -66,11 +67,6 @@ public class CompressionCacheConfigDUnitTest extends CacheTestCase {
     super.setUp();
   }
   
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   /**
    * Asserts that a member is successfully initialized with a compressed region when
    * a compressor is included in the region attributes.
@@ -96,9 +92,9 @@ public class CompressionCacheConfigDUnitTest extends CacheTestCase {
    * @throws Exception
    */
   public void testCreateCacheWithBadCompressor() throws Exception {
-    addExpectedException("Unable to load class BAD_COMPRESSOR");
+    IgnoredException.addIgnoredException("Unable to load class BAD_COMPRESSOR");
     File cacheXml = createCacheXml(BAD_COMPRESSOR);
-    ExpectedException expectedException = DistributedTestCase.addExpectedException("While reading Cache XML file");
+    IgnoredException expectedException = IgnoredException.addIgnoredException("While reading Cache XML file");
     try {
       assertFalse(createCacheOnVM(getVM(0), cacheXml.getCanonicalPath()));
     } finally {
@@ -138,15 +134,15 @@ public class CompressionCacheConfigDUnitTest extends CacheTestCase {
           disconnectFromDS();
           Properties props = new Properties();
           props.setProperty("cache-xml-file",cacheXml);
-          getLogWriter().info("<ExpectedException action=add>ClassNotFoundException</ExpectedException>");
+          LogWriterUtils.getLogWriter().info("<ExpectedException action=add>ClassNotFoundException</ExpectedException>");
           getSystem(props);
           assertNotNull(getCache());
           return Boolean.TRUE;
         } catch(Exception e) {
-          getLogWriter().error("Could not create the cache", e);
+          LogWriterUtils.getLogWriter().error("Could not create the cache", e);
           return Boolean.FALSE;
         } finally {
-          getLogWriter().info("<ExpectedException action=remove>ClassNotFoundException</ExpectedException>");
+          LogWriterUtils.getLogWriter().info("<ExpectedException action=remove>ClassNotFoundException</ExpectedException>");
         }
       }      
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheListenerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheListenerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheListenerDUnitTest.java
index 3ff3e9b..84a9faf 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheListenerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheListenerDUnitTest.java
@@ -179,15 +179,18 @@ public class CompressionCacheListenerDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    preTearDownCompressionCacheListenerDUnitTest();
+    
     try {
       SnappyCompressor.getDefaultInstance();
       cleanup(getVM(TEST_VM));
     } catch (Throwable t) {
       // Not a supported OS
     }
-
-    super.tearDown2();
+  }
+  
+  protected void preTearDownCompressionCacheListenerDUnitTest() throws Exception {
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheListenerOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheListenerOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheListenerOffHeapDUnitTest.java
index cf8583a..ec828fd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheListenerOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionCacheListenerOffHeapDUnitTest.java
@@ -21,6 +21,7 @@ import java.util.Properties;
 import com.gemstone.gemfire.compression.SnappyCompressor;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 @SuppressWarnings("serial")
@@ -39,7 +40,7 @@ public class CompressionCacheListenerOffHeapDUnitTest extends
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCompressionCacheListenerDUnitTest() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -49,12 +50,8 @@ public class CompressionCacheListenerOffHeapDUnitTest extends
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionConfigDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionConfigDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionConfigDUnitTest.java
index 8061b3c..cd71a2c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionConfigDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionConfigDUnitTest.java
@@ -27,11 +27,14 @@ import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.compression.Compressor;
 import com.gemstone.gemfire.compression.SnappyCompressor;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.dunit.standalone.DUnitLauncher;
 
 /**
@@ -73,11 +76,6 @@ public class CompressionRegionConfigDUnitTest extends CacheTestCase {
     super.setUp();
   }
   
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   /**
    * Sanity check using two peers sharing a replicated region.
    * @throws Exception
@@ -236,7 +234,7 @@ public class CompressionRegionConfigDUnitTest extends CacheTestCase {
    * @param key the key to wait on.
    */
   private void waitOnPut(final VM vm, final String key) {
-    DistributedTestCase.waitForCriterion(new DistributedTestCase.WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       @Override
       public boolean done() {
         return (getUsingVM(vm, key) != null);
@@ -371,7 +369,7 @@ public class CompressionRegionConfigDUnitTest extends CacheTestCase {
         try {
           assertNotNull(createServerRegion(name,dataPolicy,compressor));
         } catch(Exception e) {
-          getLogWriter().error("Could not create the compressed region", e);
+          LogWriterUtils.getLogWriter().error("Could not create the compressed region", e);
           return Boolean.FALSE;
         }
         
@@ -394,7 +392,7 @@ public class CompressionRegionConfigDUnitTest extends CacheTestCase {
         try {
           assertNotNull(createRegion(name,dataPolicy,compressor));
         } catch(Exception e) {
-          getLogWriter().error("Could not create the compressed region", e);
+          LogWriterUtils.getLogWriter().error("Could not create the compressed region", e);
           return Boolean.FALSE;
         }
         
@@ -417,7 +415,7 @@ public class CompressionRegionConfigDUnitTest extends CacheTestCase {
         try {
           assertNotNull(createRegion(name,dataPolicy,compressor,diskStoreName));
         } catch(Exception e) {
-          getLogWriter().error("Could not create the compressed region", e);
+          LogWriterUtils.getLogWriter().error("Could not create the compressed region", e);
           return Boolean.FALSE;
         }
         
@@ -441,7 +439,7 @@ public class CompressionRegionConfigDUnitTest extends CacheTestCase {
         try {
           assertNotNull(createClientRegion(name,compressor,shortcut));
         } catch(Exception e) {
-          getLogWriter().error("Could not create the compressed region", e);
+          LogWriterUtils.getLogWriter().error("Could not create the compressed region", e);
           return Boolean.FALSE;
         }
         
@@ -523,7 +521,7 @@ public class CompressionRegionConfigDUnitTest extends CacheTestCase {
     } 
     // Running in hydra
     else {
-      return getDUnitLocatorPort();
+      return DistributedTestUtils.getDUnitLocatorPort();
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionFactoryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionFactoryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionFactoryDUnitTest.java
index 37dbddf..7a0793b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionFactoryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionFactoryDUnitTest.java
@@ -59,11 +59,6 @@ public class CompressionRegionFactoryDUnitTest extends CacheTestCase {
     super.setUp();
   }
   
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-  
   /**
    * Asserts that a region is created when a valid compressor is used.
    * Asserts that the region attributes contain the correct compressor value. 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionOperationsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionOperationsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionOperationsDUnitTest.java
index e8153e2..32164bd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionOperationsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionOperationsDUnitTest.java
@@ -147,7 +147,18 @@ public class CompressionRegionOperationsDUnitTest extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Error error = null;
+    Exception exception = null;
+    
+    try {
+      preTearDownCompressionRegionOperationsDUnitTest();
+    } catch (Error e) {
+      error = e;
+    } catch (Exception e) {
+      exception = e;
+    }
+    
     try {
       SnappyCompressor.getDefaultInstance();
       cleanup(getVM(TEST_VM));
@@ -155,8 +166,16 @@ public class CompressionRegionOperationsDUnitTest extends CacheTestCase {
       // Not a supported OS
     }
     
-    super.tearDown2();
-  }  
+    if (error != null) {
+      throw error;
+    }
+    if (exception != null) {
+      throw exception;
+    }
+  }
+  
+  protected void preTearDownCompressionRegionOperationsDUnitTest() throws Exception {
+  }
 
   /**
    * Invokes basic get/put operations tests on the test vm.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionOperationsOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionOperationsOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionOperationsOffHeapDUnitTest.java
index 8c66f96..d76976f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionOperationsOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/compression/CompressionRegionOperationsOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.compression.Compressor;
 import com.gemstone.gemfire.compression.SnappyCompressor;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 public class CompressionRegionOperationsOffHeapDUnitTest extends
@@ -30,8 +31,9 @@ public class CompressionRegionOperationsOffHeapDUnitTest extends
   public CompressionRegionOperationsOffHeapDUnitTest(String name) {
     super(name);
   }
+  
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCompressionRegionOperationsDUnitTest() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -41,12 +43,8 @@ public class CompressionRegionOperationsOffHeapDUnitTest extends
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/ExceptionsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/ExceptionsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/ExceptionsDUnitTest.java
index 8015b6a..492ec61 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/ExceptionsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/ExceptionsDUnitTest.java
@@ -38,6 +38,7 @@ import com.gemstone.gemfire.internal.OSProcess;
 import com.gemstone.gemfire.internal.jta.CacheUtils;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.util.test.TestUtil;
 
@@ -60,7 +61,7 @@ public class ExceptionsDUnitTest extends DistributedTestCase {
       //
       //    sb.append(lineSep);
     }
-    getLogWriter().fine("***********\n " + sb);
+    LogWriterUtils.getLogWriter().fine("***********\n " + sb);
     return sb.toString();
   }
 
@@ -110,10 +111,10 @@ public class ExceptionsDUnitTest extends DistributedTestCase {
      * value=\"83f0069202c571faf1ae6c42b4ad46030e4e31c17409e19a\"/>";
      */
     int n1 = str.indexOf(search);
-    getLogWriter().fine("Start Index = " + n1);
+    LogWriterUtils.getLogWriter().fine("Start Index = " + n1);
     int n2 = str.indexOf(last_search, n1);
     StringBuffer sbuff = new StringBuffer(str);
-    getLogWriter().fine("END Index = " + n2);
+    LogWriterUtils.getLogWriter().fine("END Index = " + n2);
     String modified_str = sbuff.replace(n1, n2, new_str).toString();
     return modified_str;
   }
@@ -173,7 +174,7 @@ public class ExceptionsDUnitTest extends DistributedTestCase {
       if (ds != null) ds.disconnect();
     }
     catch (Exception e) {
-      getLogWriter().fine("Error in disconnecting from Distributed System");
+      LogWriterUtils.getLogWriter().fine("Error in disconnecting from Distributed System");
     }
   }
 
@@ -184,7 +185,8 @@ public class ExceptionsDUnitTest extends DistributedTestCase {
     vm0.invoke(ExceptionsDUnitTest.class, "init");
   }
 
-  public void tearDown2() throws NamingException, SQLException {
+  @Override
+  protected final void preTearDown() throws Exception {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     vm0.invoke(ExceptionsDUnitTest.class, "closeCache");
@@ -234,7 +236,7 @@ public class ExceptionsDUnitTest extends DistributedTestCase {
               + "occur");
     }
     catch (Exception e) {
-      getLogWriter().fine("Exception caught in runTest1 due to : " + e);
+      LogWriterUtils.getLogWriter().fine("Exception caught in runTest1 due to : " + e);
       fail("failed in runTest1 due to " + e);
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/IdleTimeOutDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/IdleTimeOutDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/IdleTimeOutDUnitTest.java
index c340ae3..96e5a8d 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/IdleTimeOutDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/IdleTimeOutDUnitTest.java
@@ -37,9 +37,12 @@ import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.OSProcess;
 import com.gemstone.gemfire.internal.jta.CacheUtils;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.util.test.TestUtil;
 
@@ -62,7 +65,7 @@ public class IdleTimeOutDUnitTest extends DistributedTestCase {
       //
       //    sb.append(lineSep);
     }
-    getLogWriter().info("***********\n " + sb);
+    LogWriterUtils.getLogWriter().info("***********\n " + sb);
     return sb.toString();
   }
 
@@ -112,22 +115,22 @@ public class IdleTimeOutDUnitTest extends DistributedTestCase {
      * value=\"83f0069202c571faf1ae6c42b4ad46030e4e31c17409e19a\"/>";
      */
     int n1 = str.indexOf(search);
-    getLogWriter().info("Start Index = " + n1);
+    LogWriterUtils.getLogWriter().info("Start Index = " + n1);
     int n2 = str.indexOf(last_search, n1);
     StringBuffer sbuff = new StringBuffer(str);
-    getLogWriter().info("END Index = " + n2);
+    LogWriterUtils.getLogWriter().info("END Index = " + n2);
     String modified_str = sbuff.replace(n1, n2, new_str).toString();
     return modified_str;
   }
 
   public static String init(String className) throws Exception {
-    getLogWriter().fine("PATH11 ");
+    LogWriterUtils.getLogWriter().fine("PATH11 ");
     Properties props = new Properties();
     String path = System.getProperty("CACHEXMLFILE");
-    getLogWriter().fine("PATH2 " + path);
+    LogWriterUtils.getLogWriter().fine("PATH2 " + path);
     int pid = OSProcess.getId();
     path = File.createTempFile("dunit-cachejta_", ".xml").getAbsolutePath();
-    getLogWriter().fine("PATH " + path);
+    LogWriterUtils.getLogWriter().fine("PATH " + path);
     /** * Return file as string and then modify the string accordingly ** */
     String file_as_str = readFile(TestUtil.getResourcePath(CacheUtils.class, "cachejta.xml"));
     file_as_str = file_as_str.replaceAll("newDB", "newDB_" + pid);
@@ -169,7 +172,7 @@ public class IdleTimeOutDUnitTest extends DistributedTestCase {
     String sql = "create table "
         + tableName
         + " (id integer NOT NULL, name varchar(50), CONSTRAINT "+tableName+"_key PRIMARY KEY(id))";
-    getLogWriter().info(sql);
+    LogWriterUtils.getLogWriter().info(sql);
     Connection conn = ds.getConnection();
     Statement sm = conn.createStatement();
     sm.execute(sql);
@@ -178,7 +181,7 @@ public class IdleTimeOutDUnitTest extends DistributedTestCase {
     for (int i = 1; i <= 10; i++) {
       sql = "insert into " + tableName + " values (" + i + ",'name" + i + "')";
       sm.addBatch(sql);
-      getLogWriter().info(sql);
+      LogWriterUtils.getLogWriter().info(sql);
     }
     sm.executeBatch();
     conn.close();
@@ -190,18 +193,18 @@ public class IdleTimeOutDUnitTest extends DistributedTestCase {
       Context ctx = cache.getJNDIContext();
       DataSource ds = (DataSource) ctx.lookup("java:/SimpleDataSource");
       Connection conn = ds.getConnection();
-      getLogWriter().info(" trying to drop table: " + tableName);
+      LogWriterUtils.getLogWriter().info(" trying to drop table: " + tableName);
       String sql = "drop table " + tableName;
       Statement sm = conn.createStatement();
       sm.execute(sql);
       conn.close();
     }
     catch (NamingException ne) {
-      getLogWriter().info("destroy table naming exception: " + ne);
+      LogWriterUtils.getLogWriter().info("destroy table naming exception: " + ne);
       throw ne;
     }
     catch (SQLException se) {
-      getLogWriter().info("destroy table sql exception: " + se);
+      LogWriterUtils.getLogWriter().info("destroy table sql exception: " + se);
       throw se;
     }
     finally {
@@ -237,7 +240,7 @@ public class IdleTimeOutDUnitTest extends DistributedTestCase {
       ds.disconnect();
     }
     catch (Exception e) {
-      getLogWriter().info("Error in disconnecting from Distributed System");
+      LogWriterUtils.getLogWriter().info("Error in disconnecting from Distributed System");
     }
   }
 
@@ -250,11 +253,9 @@ public class IdleTimeOutDUnitTest extends DistributedTestCase {
     vm0.invoke(IdleTimeOutDUnitTest.class, "init", o);
   }
 
-  public void tearDown2() throws NamingException, SQLException {
+  @Override
+  protected final void preTearDown() throws Exception {
     VM vm0 = Host.getHost(0).getVM(0);
-    // destroyTable call disabled due to high rate of failure - see internal ticket #52274
-//    vm0.invoke(IdleTimeOutDUnitTest.class, "destroyTable");
-    // if destroyTable is fixed then this closeCache is not necessary and can be removed
     vm0.invoke(IdleTimeOutDUnitTest.class, "closeCache");
   }
 
@@ -264,9 +265,9 @@ public class IdleTimeOutDUnitTest extends DistributedTestCase {
     vm0.invoke(IdleTimeOutDUnitTest.class, "runTest1");
     AsyncInvocation asyncObj = vm0.invokeAsync(IdleTimeOutDUnitTest.class,
         "runTest2");
-    DistributedTestCase.join(asyncObj, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyncObj, 30 * 1000);
     if(asyncObj.exceptionOccurred()){
-      fail("asyncObj failed", asyncObj.getException());
+      Assert.fail("asyncObj failed", asyncObj.getException());
     }				   
   }
 
@@ -278,27 +279,27 @@ public class IdleTimeOutDUnitTest extends DistributedTestCase {
       ds = (DataSource) ctx.lookup("java:/XAPooledDataSource");
     }
     catch (NamingException e) {
-      getLogWriter().info("Naming Exception caught in lookup: " + e);
+      LogWriterUtils.getLogWriter().info("Naming Exception caught in lookup: " + e);
       fail("failed in naming lookup: " + e);
       return;
     }
     catch (Exception e) {
-      getLogWriter().info("Exception caught during naming lookup: " + e);
+      LogWriterUtils.getLogWriter().info("Exception caught during naming lookup: " + e);
       fail("failed in naming lookup: " + e);
       return;
     }
     try {
       for (int count = 0; count < MAX_CONNECTIONS; count++) {
         ds.getConnection();
-        getLogWriter().info("runTest1 :acquired connection #" + count);
+        LogWriterUtils.getLogWriter().info("runTest1 :acquired connection #" + count);
       }
     }
     catch (SQLException e) {
-      getLogWriter().info("Success SQLException caught in runTest1: " + e);
+      LogWriterUtils.getLogWriter().info("Success SQLException caught in runTest1: " + e);
       fail("runTest1 SQL Exception caught: " + e);
     }
     catch (Exception e) {
-      getLogWriter().info("Exception caught in runTest1: " + e);
+      LogWriterUtils.getLogWriter().info("Exception caught in runTest1: " + e);
       fail("Exception caught in runTest1: " + e);
       e.printStackTrace();
     }
@@ -314,12 +315,12 @@ public class IdleTimeOutDUnitTest extends DistributedTestCase {
       ds = (DataSource) ctx.lookup("java:/XAPooledDataSource");
     }
     catch (NamingException e) {
-      getLogWriter().info("Exception caught during naming lookup: " + e);
+      LogWriterUtils.getLogWriter().info("Exception caught during naming lookup: " + e);
       fail("failed in naming lookup: " + e);
       return;
     }
     catch (Exception e) {
-      getLogWriter().info("Exception caught during naming lookup: " + e);
+      LogWriterUtils.getLogWriter().info("Exception caught during naming lookup: " + e);
       fail("failed in because of unhandled excpetion: " + e);
       return;
     }
@@ -327,16 +328,16 @@ public class IdleTimeOutDUnitTest extends DistributedTestCase {
       for (int count = 0; count < MAX_CONNECTIONS; count++) {
         Connection con = ds.getConnection();
         assertNotNull("Connection object is null", con);
-        getLogWriter().info("runTest2 :acquired connection #" + count);
+        LogWriterUtils.getLogWriter().info("runTest2 :acquired connection #" + count);
       }
     }
     catch (SQLException sqle) {
-      getLogWriter().info("SQLException caught in runTest2: " + sqle);
+      LogWriterUtils.getLogWriter().info("SQLException caught in runTest2: " + sqle);
       fail("failed because of SQL exception : " + sqle);
       sqle.printStackTrace();
     }
     catch (Exception e) {
-      getLogWriter().info("Exception caught in runTest2: " + e);
+      LogWriterUtils.getLogWriter().info("Exception caught in runTest2: " + e);
       fail("failed because of unhandled exception : " + e);
       e.printStackTrace();
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/LoginTimeOutDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/LoginTimeOutDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/LoginTimeOutDUnitTest.java
index 6be7241..c5a4e02 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/LoginTimeOutDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/LoginTimeOutDUnitTest.java
@@ -40,11 +40,15 @@ import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.OSProcess;
 import com.gemstone.gemfire.internal.jta.CacheUtils;
 import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.RMIException;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.util.test.TestUtil;
 
 public class LoginTimeOutDUnitTest extends DistributedTestCase {
@@ -248,11 +252,11 @@ public class LoginTimeOutDUnitTest extends DistributedTestCase {
     VM vm0 = host.getVM(0);
     AsyncInvocation test1 = vm0.invokeAsync(LoginTimeOutDUnitTest.class, "runTest1");
     AsyncInvocation test2 = vm0.invokeAsync(LoginTimeOutDUnitTest.class, "runTest2");
-    DistributedTestCase.join(test2, 120 * 1000, getLogWriter());
+    ThreadUtils.join(test2, 120 * 1000);
     if(test2.exceptionOccurred()){
-      fail("asyncObj failed", test2.getException());
+      Assert.fail("asyncObj failed", test2.getException());
     }
-    DistributedTestCase.join(test1, 30000, getLogWriter());
+    ThreadUtils.join(test1, 30000);
   }
 
   public static void runTest1() throws Exception {
@@ -292,7 +296,7 @@ public class LoginTimeOutDUnitTest extends DistributedTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
   }
 
   public static void runTest2() throws Exception {
@@ -306,7 +310,7 @@ public class LoginTimeOutDUnitTest extends DistributedTestCase {
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 60 * 1000, 200, true);
       
       DataSource ds = null;
       try {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/MaxPoolSizeDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/MaxPoolSizeDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/MaxPoolSizeDUnitTest.java
index 5e31e12..1ab5b2c 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/MaxPoolSizeDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/MaxPoolSizeDUnitTest.java
@@ -37,9 +37,12 @@ import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.OSProcess;
 import com.gemstone.gemfire.internal.jta.CacheUtils;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.util.test.TestUtil;
 
@@ -62,7 +65,7 @@ public class MaxPoolSizeDUnitTest extends DistributedTestCase {
       //
       //    sb.append(lineSep);
     }
-    getLogWriter().fine("***********\n " + sb);
+    LogWriterUtils.getLogWriter().fine("***********\n " + sb);
     return sb.toString();
   }
 
@@ -112,20 +115,20 @@ public class MaxPoolSizeDUnitTest extends DistributedTestCase {
      * value=\"83f0069202c571faf1ae6c42b4ad46030e4e31c17409e19a\"/>";
      */
     int n1 = str.indexOf(search);
-    getLogWriter().fine("Start Index = " + n1);
+    LogWriterUtils.getLogWriter().fine("Start Index = " + n1);
     int n2 = str.indexOf(last_search, n1);
     StringBuffer sbuff = new StringBuffer(str);
-    getLogWriter().fine("END Index = " + n2);
+    LogWriterUtils.getLogWriter().fine("END Index = " + n2);
     String modified_str = sbuff.replace(n1, n2, new_str).toString();
     return modified_str;
   }
 
   public static String init(String className) throws Exception {
-    getLogWriter().fine("PATH11 ");
+    LogWriterUtils.getLogWriter().fine("PATH11 ");
     Properties props = new Properties();
     int pid = OSProcess.getId();
     String path = File.createTempFile("dunit-cachejta_", ".xml").getAbsolutePath();
-    getLogWriter().fine("PATH " + path);
+    LogWriterUtils.getLogWriter().fine("PATH " + path);
     /** * Return file as string and then modify the string accordingly ** */
     String file_as_str = readFile(TestUtil.getResourcePath(CacheUtils.class, "cachejta.xml"));
     file_as_str = file_as_str.replaceAll("newDB", "newDB_" + pid);
@@ -167,7 +170,7 @@ public class MaxPoolSizeDUnitTest extends DistributedTestCase {
     String sql = "create table "
         + tableName
         + " (id integer NOT NULL, name varchar(50), CONSTRAINT "+tableName+"_key PRIMARY KEY(id))";
-    getLogWriter().fine(sql);
+    LogWriterUtils.getLogWriter().fine(sql);
     Connection conn = ds.getConnection();
     Statement sm = conn.createStatement();
     sm.execute(sql);
@@ -176,7 +179,7 @@ public class MaxPoolSizeDUnitTest extends DistributedTestCase {
     for (int i = 1; i <= 10; i++) {
       sql = "insert into " + tableName + " values (" + i + ",'name" + i + "')";
       sm.addBatch(sql);
-      getLogWriter().fine(sql);
+      LogWriterUtils.getLogWriter().fine(sql);
     }
     sm.executeBatch();
     conn.close();
@@ -188,19 +191,19 @@ public class MaxPoolSizeDUnitTest extends DistributedTestCase {
       Context ctx = cache.getJNDIContext();
       DataSource ds = (DataSource) ctx.lookup("java:/SimpleDataSource");
       Connection conn = ds.getConnection();
-      getLogWriter().fine(" trying to drop table: " + tableName);
+      LogWriterUtils.getLogWriter().fine(" trying to drop table: " + tableName);
       String sql = "drop table " + tableName;
       Statement sm = conn.createStatement();
       sm.execute(sql);
       conn.close();
     }
     catch (NamingException ne) {
-      getLogWriter().fine("destroy table naming exception: " + ne);
+      LogWriterUtils.getLogWriter().fine("destroy table naming exception: " + ne);
       throw ne;
     }
     catch (SQLException se) {
       if (!se.getMessage().contains("A lock could not be obtained within the time requested")) {
-        getLogWriter().fine("destroy table sql exception: " + se);
+        LogWriterUtils.getLogWriter().fine("destroy table sql exception: " + se);
         throw se;
       } else {
         // disregard - this happens sometimes on unit test runs on slower
@@ -238,7 +241,7 @@ public class MaxPoolSizeDUnitTest extends DistributedTestCase {
       ds.disconnect();
     }
     catch (Exception e) {
-      getLogWriter().fine("Error in disconnecting from Distributed System");
+      LogWriterUtils.getLogWriter().fine("Error in disconnecting from Distributed System");
     }
   }
 
@@ -252,7 +255,8 @@ public class MaxPoolSizeDUnitTest extends DistributedTestCase {
     vm0.invoke(MaxPoolSizeDUnitTest.class, "init", o);
   }
 
-  public void tearDown2() throws NamingException, SQLException {
+  @Override
+  protected final void preTearDown() throws Exception {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     try {
@@ -267,9 +271,9 @@ public class MaxPoolSizeDUnitTest extends DistributedTestCase {
     VM vm0 = host.getVM(0);
     AsyncInvocation asyncObj = vm0.invokeAsync(MaxPoolSizeDUnitTest.class,
         "runTest1");
-    DistributedTestCase.join(asyncObj, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyncObj, 30 * 1000);
     if(asyncObj.exceptionOccurred()){
-      fail("asyncObj failed", asyncObj.getException());
+      Assert.fail("asyncObj failed", asyncObj.getException());
     }
   }
 
@@ -282,28 +286,28 @@ public class MaxPoolSizeDUnitTest extends DistributedTestCase {
       ds = (DataSource) ctx.lookup("java:/XAPooledDataSource");
     }
     catch (NamingException e) {
-      getLogWriter().fine("Naming Exception caught in lookup: " + e);
+      LogWriterUtils.getLogWriter().fine("Naming Exception caught in lookup: " + e);
       fail("failed in naming lookup: " + e);
       return;
     }
     catch (Exception e) {
-      getLogWriter().fine("Exception caught during naming lookup: " + e);
+      LogWriterUtils.getLogWriter().fine("Exception caught during naming lookup: " + e);
       fail("failed in naming lookup: " + e);
       return;
     }
     try {
       for (count = 0; count < MAX_CONNECTIONS; count++) {
         ds.getConnection();
-        getLogWriter().fine("Thread 1 acquired connection #" + count);
+        LogWriterUtils.getLogWriter().fine("Thread 1 acquired connection #" + count);
       }
       fail("expected max connect exception");
     }
     catch (SQLException e) {
       if (count < (MAX_CONNECTIONS-1)) {
-        fail("runTest1 SQL Exception", e);
+        Assert.fail("runTest1 SQL Exception", e);
       }
       else {
-        getLogWriter().fine("Success SQLException caught at connection #"
+        LogWriterUtils.getLogWriter().fine("Success SQLException caught at connection #"
             + count);
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TransactionTimeOutDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TransactionTimeOutDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TransactionTimeOutDUnitTest.java
index 22b8bff..fead02e 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TransactionTimeOutDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TransactionTimeOutDUnitTest.java
@@ -41,9 +41,12 @@ import com.gemstone.gemfire.internal.OSProcess;
 import com.gemstone.gemfire.internal.datasource.GemFireTransactionDataSource;
 import com.gemstone.gemfire.internal.jta.CacheUtils;
 import com.gemstone.gemfire.internal.jta.UserTransactionImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.util.test.TestUtil;
 
@@ -123,7 +126,8 @@ public class TransactionTimeOutDUnitTest extends DistributedTestCase {
     vm0.invoke(TransactionTimeOutDUnitTest.class, "init");
   }
 
-  public  void tearDown2() throws NamingException, SQLException {
+  @Override
+  protected final void preTearDown() throws Exception {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     vm0.invoke(TransactionTimeOutDUnitTest.class, "closeCache");
@@ -135,13 +139,13 @@ public class TransactionTimeOutDUnitTest extends DistributedTestCase {
     AsyncInvocation async1 = vm0.invokeAsync(TransactionTimeOutDUnitTest.class, "runTest1");
     AsyncInvocation async2 =vm0.invokeAsync(TransactionTimeOutDUnitTest.class, "runTest2");
     
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
-    DistributedTestCase.join(async2, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
+    ThreadUtils.join(async2, 30 * 1000);
     if(async1.exceptionOccurred()){
-      fail("async1 failed", async1.getException());
+      Assert.fail("async1 failed", async1.getException());
     }
     if(async2.exceptionOccurred()){
-      fail("async2 failed", async2.getException());
+      Assert.fail("async2 failed", async2.getException());
     }
   }
 
@@ -213,7 +217,7 @@ public class TransactionTimeOutDUnitTest extends DistributedTestCase {
       return;
     }
     catch (Exception e) {
-      getLogWriter().fine("Exception caught " + e);
+      LogWriterUtils.getLogWriter().fine("Exception caught " + e);
       fail("failed in naming lookup: " + e);
       return;
     }
@@ -239,7 +243,7 @@ public class TransactionTimeOutDUnitTest extends DistributedTestCase {
       return;
     }
     catch (Exception e) {
-      getLogWriter().fine("Exception caught " + e);
+      LogWriterUtils.getLogWriter().fine("Exception caught " + e);
       fail("failed in naming lookup: " + e);
       return;
     }
@@ -492,7 +496,7 @@ public class TransactionTimeOutDUnitTest extends DistributedTestCase {
       //
       //    sb.append(lineSep);
     }
-    getLogWriter().fine("***********\n " + sb);
+    LogWriterUtils.getLogWriter().fine("***********\n " + sb);
     return sb.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TxnManagerMultiThreadDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TxnManagerMultiThreadDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TxnManagerMultiThreadDUnitTest.java
index 83e29af..fab159d 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TxnManagerMultiThreadDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TxnManagerMultiThreadDUnitTest.java
@@ -42,9 +42,12 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.OSProcess;
 import com.gemstone.gemfire.internal.jta.CacheUtils;
 import com.gemstone.gemfire.internal.jta.JTAUtils;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.util.test.TestUtil;
 
@@ -184,7 +187,7 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
     String jtest = System.getProperty("JTESTS");
     int pid = OSProcess.getId();
     String path = File.createTempFile("dunit-cachejta_", ".xml").getAbsolutePath();
-    getLogWriter().fine("PATH " + path);
+    LogWriterUtils.getLogWriter().fine("PATH " + path);
     /** * Return file as string and then modify the string accordingly ** */
     String file_as_str = readFile(TestUtil.getResourcePath(CacheUtils.class, "cachejta.xml"));
     file_as_str = file_as_str.replaceAll("newDB", "newDB_" + pid);
@@ -210,7 +213,7 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception e) {
-      getLogWriter().info("", e);
+      LogWriterUtils.getLogWriter().info("", e);
       throw new Exception("" + e);
     }
     return tableName;
@@ -225,7 +228,7 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
     String sql = "create table "
         + tableName
         + " (id integer NOT NULL, name varchar(50), CONSTRAINT "+tableName+"_key PRIMARY KEY(id))";
-    getLogWriter().fine(sql);
+    LogWriterUtils.getLogWriter().fine(sql);
     Connection conn = ds.getConnection();
     Statement sm = conn.createStatement();
     sm.execute(sql);
@@ -234,7 +237,7 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
     for (int i = 1; i <= 10; i++) {
       sql = "insert into " + tableName + " values (" + i + ",'name" + i + "')";
       sm.addBatch(sql);
-      getLogWriter().fine(sql);
+      LogWriterUtils.getLogWriter().fine(sql);
     }
     sm.executeBatch();
     conn.close();
@@ -252,28 +255,28 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
     }
     try {
       String tableName = tblName;
-      getLogWriter().fine("Destroying table: " + tableName);
+      LogWriterUtils.getLogWriter().fine("Destroying table: " + tableName);
       cache = TxnManagerMultiThreadDUnitTest.getCache();
       Context ctx = cache.getJNDIContext();
       DataSource ds = (DataSource) ctx.lookup("java:/SimpleDataSource");
       Connection conn = ds.getConnection();
-      getLogWriter().fine(" trying to drop table: " + tableName);
+      LogWriterUtils.getLogWriter().fine(" trying to drop table: " + tableName);
       String sql = "drop table " + tableName;
       Statement sm = conn.createStatement();
       sm.execute(sql);
       conn.close();
-      getLogWriter().fine("destroyTable is Successful!");
+      LogWriterUtils.getLogWriter().fine("destroyTable is Successful!");
     }
     catch (NamingException ne) {
-      getLogWriter().fine("destroy table naming exception: " + ne);
+      LogWriterUtils.getLogWriter().fine("destroy table naming exception: " + ne);
       throw ne;
     }
     catch (SQLException se) {
-      getLogWriter().fine("destroy table sql exception: " + se);
+      LogWriterUtils.getLogWriter().fine("destroy table sql exception: " + se);
       throw se;
     }
     finally {
-      getLogWriter().fine("Closing cache...");
+      LogWriterUtils.getLogWriter().fine("Closing cache...");
       closeCache();
     }
   }//end of destroyTable
@@ -289,7 +292,7 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception e) {
-      getLogWriter().warning("exception while creating cache", e);
+      LogWriterUtils.getLogWriter().warning("exception while creating cache", e);
     }
   }//end of startCache
 
@@ -297,18 +300,18 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
     try {
       if (!cache.isClosed()) {
         cache.close();
-        getLogWriter().fine("Cache closed");
+        LogWriterUtils.getLogWriter().fine("Cache closed");
       }
     }
     catch (Exception e) {
-      getLogWriter().warning("exception while closing cache", e);
+      LogWriterUtils.getLogWriter().warning("exception while closing cache", e);
     }
     try {
       CacheUtils.ds.disconnect();
-      getLogWriter().fine("Disconnected from Distribuited System");
+      LogWriterUtils.getLogWriter().fine("Disconnected from Distribuited System");
     }
     catch (Exception e) {
-      getLogWriter().fine("Error in disconnecting from Distributed System");
+      LogWriterUtils.getLogWriter().fine("Error in disconnecting from Distributed System");
     }
   }//end of closeCache
 
@@ -348,11 +351,12 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
       /*int rowsDeleted = */jtaObj.deleteRows(tblName_delRows);
     }
     catch (Exception e) {
-      getLogWriter().warning("Error: while deleting rows from database using JTAUtils", e);
+      LogWriterUtils.getLogWriter().warning("Error: while deleting rows from database using JTAUtils", e);
     }
   }//end of delRows
 
-  public void tearDown2() throws java.lang.Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     VM vm0 = Host.getHost(0).getVM(0);
     //get tableName to pass to destroyTable
     String tableName = CacheUtils.getTableName();
@@ -405,11 +409,11 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
     //get how many rows actually got committed
     try {
       int rows = jtaObj.getRows(tblName);
-      getLogWriter().fine("Number of rows committed current test method  are: "
+      LogWriterUtils.getLogWriter().fine("Number of rows committed current test method  are: "
           + rows);
     }
     catch (Exception e) {
-      getLogWriter().warning("Error: while getting rows from database using JTAUtils", e);
+      LogWriterUtils.getLogWriter().warning("Error: while getting rows from database using JTAUtils", e);
     }
   }//end of getNumberOfRows
 
@@ -422,9 +426,9 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
     VM vm0 = Host.getHost(0).getVM(0);
     AsyncInvocation asyncObj1 = vm0.invokeAsync(
         TxnManagerMultiThreadDUnitTest.class, "callCommitThreads");
-    DistributedTestCase.join(asyncObj1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyncObj1, 30 * 1000);
     if(asyncObj1.exceptionOccurred()){
-      fail("asyncObj1 failed", asyncObj1.getException());
+      Assert.fail("asyncObj1 failed", asyncObj1.getException());
     }
     vm0.invoke(TxnManagerMultiThreadDUnitTest.class, "getNumberOfRows");
   }//end of testAllCommit
@@ -434,15 +438,15 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
    *  
    */
   public static void callCommitThreads() {
-    getLogWriter().fine("This is callCommitThreads method");
+    LogWriterUtils.getLogWriter().fine("This is callCommitThreads method");
     try {
-      new CommitThread("ct1", getLogWriter());
-      new CommitThread("ct2", getLogWriter());
-      new CommitThread("ct3", getLogWriter());
-      new CommitThread("ct4", getLogWriter());
+      new CommitThread("ct1", LogWriterUtils.getLogWriter());
+      new CommitThread("ct2", LogWriterUtils.getLogWriter());
+      new CommitThread("ct3", LogWriterUtils.getLogWriter());
+      new CommitThread("ct4", LogWriterUtils.getLogWriter());
     }
     catch (Exception e) {
-      getLogWriter().warning("Failed in Commit Threads", e);
+      LogWriterUtils.getLogWriter().warning("Failed in Commit Threads", e);
       fail("Failed in Commit Threads" + e);
     }
   }//end of callCommitTheads
@@ -455,24 +459,24 @@ public class TxnManagerMultiThreadDUnitTest extends DistributedTestCase {
     VM vm0 = Host.getHost(0).getVM(0);
     AsyncInvocation asyncObj1 = vm0.invokeAsync(
         TxnManagerMultiThreadDUnitTest.class, "callCommitandRollbackThreads");
-    DistributedTestCase.join(asyncObj1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyncObj1, 30 * 1000);
     if(asyncObj1.exceptionOccurred()){
-      fail("asyncObj1 failed", asyncObj1.getException());
+      Assert.fail("asyncObj1 failed", asyncObj1.getException());
     }
     vm0.invoke(TxnManagerMultiThreadDUnitTest.class, "getNumberOfRows");
   }//end of test3Commit2Rollback
 
   public static void callCommitandRollbackThreads() {
-    getLogWriter().fine("This is callCommitandRollbackThreads method");
+    LogWriterUtils.getLogWriter().fine("This is callCommitandRollbackThreads method");
     try {
-      new CommitThread("ct1", getLogWriter());
-      new CommitThread("ct2", getLogWriter());
-      new CommitThread("ct3", getLogWriter());
-      new RollbackThread("rt1", getLogWriter());
-      new RollbackThread("rt2", getLogWriter());
+      new CommitThread("ct1", LogWriterUtils.getLogWriter());
+      new CommitThread("ct2", LogWriterUtils.getLogWriter());
+      new CommitThread("ct3", LogWriterUtils.getLogWriter());
+      new RollbackThread("rt1", LogWriterUtils.getLogWriter());
+      new RollbackThread("rt2", LogWriterUtils.getLogWriter());
     }
     catch (Exception e) {
-      getLogWriter().info("Failed in Commit and Rollback threads", e);
+      LogWriterUtils.getLogWriter().info("Failed in Commit and Rollback threads", e);
       fail("Failed in Commit and Rollback Threads" + e);
     }
   }//end of callCommitandRollbackThreads

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TxnTimeOutDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TxnTimeOutDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TxnTimeOutDUnitTest.java
index 7e81aae..a9f41ac 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TxnTimeOutDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/jta/dunit/TxnTimeOutDUnitTest.java
@@ -35,9 +35,12 @@ import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.OSProcess;
 import com.gemstone.gemfire.internal.jta.CacheUtils;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.util.test.TestUtil;
 
@@ -69,7 +72,7 @@ public class TxnTimeOutDUnitTest extends DistributedTestCase {
     wr.close();
     props.setProperty("cache-xml-file", path);
 //    props.setProperty("mcast-port", "10321");
-    props.setProperty("log-level", getDUnitLogLevel());
+    props.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
     try {
 //      ds = DistributedSystem.connect(props);
       ds = (new TxnTimeOutDUnitTest("temp")).getSystem(props);
@@ -119,7 +122,8 @@ public class TxnTimeOutDUnitTest extends DistributedTestCase {
     vm0.invoke(TxnTimeOutDUnitTest.class, "init");
   }
 
-  public void tearDown2() throws NamingException, SQLException {
+  @Override
+  protected final void preTearDown() throws Exception {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     vm0.invoke(TxnTimeOutDUnitTest.class, "closeCache");
@@ -154,29 +158,29 @@ public class TxnTimeOutDUnitTest extends DistributedTestCase {
         AsyncInvocation asyncObj5 = vm0.invokeAsync(TxnTimeOutDUnitTest.class,
         "runTest3",o4);
 
-        DistributedTestCase.join(asyncObj1, 5 * 60 * 1000, getLogWriter());        
+        ThreadUtils.join(asyncObj1, 5 * 60 * 1000);        
         if(asyncObj1.exceptionOccurred()){
-          fail("asyncObj1 failed", asyncObj1.getException());
+          Assert.fail("asyncObj1 failed", asyncObj1.getException());
         }
         
-        DistributedTestCase.join(asyncObj2, 5 * 60 * 1000, getLogWriter());        
+        ThreadUtils.join(asyncObj2, 5 * 60 * 1000);        
         if(asyncObj2.exceptionOccurred()){
-          fail("asyncObj2 failed", asyncObj2.getException());
+          Assert.fail("asyncObj2 failed", asyncObj2.getException());
         }
         
-        DistributedTestCase.join(asyncObj3, 5 * 60 * 1000, getLogWriter());        
+        ThreadUtils.join(asyncObj3, 5 * 60 * 1000);        
         if(asyncObj3.exceptionOccurred()){
-          fail("asyncObj3 failed", asyncObj3.getException());
+          Assert.fail("asyncObj3 failed", asyncObj3.getException());
         }
         
-        DistributedTestCase.join(asyncObj4, 5 * 60 * 1000, getLogWriter());        
+        ThreadUtils.join(asyncObj4, 5 * 60 * 1000);        
         if(asyncObj4.exceptionOccurred()){
-          fail("asyncObj4 failed", asyncObj4.getException());
+          Assert.fail("asyncObj4 failed", asyncObj4.getException());
         }
         
-        DistributedTestCase.join(asyncObj5, 5 * 60 * 1000, getLogWriter());        
+        ThreadUtils.join(asyncObj5, 5 * 60 * 1000);        
         if(asyncObj5.exceptionOccurred()){
-          fail("asyncObj5 failed", asyncObj5.getException());
+          Assert.fail("asyncObj5 failed", asyncObj5.getException());
         }
         
   
@@ -193,14 +197,14 @@ public class TxnTimeOutDUnitTest extends DistributedTestCase {
         "runTest2");
     AsyncInvocation asyncObj2 =    vm0.invokeAsync(TxnTimeOutDUnitTest.class, "runTest1");
 
-    DistributedTestCase.join(asyncObj1, 5 * 60 * 1000, getLogWriter());
+    ThreadUtils.join(asyncObj1, 5 * 60 * 1000);
     if(asyncObj1.exceptionOccurred()){
-      fail("asyncObj1 failed", asyncObj1.getException());
+      Assert.fail("asyncObj1 failed", asyncObj1.getException());
     }
     
-    DistributedTestCase.join(asyncObj2, 5 * 60 * 1000, getLogWriter());
+    ThreadUtils.join(asyncObj2, 5 * 60 * 1000);
     if(asyncObj2.exceptionOccurred()){
-      fail("asyncObj2 failed", asyncObj2.getException());
+      Assert.fail("asyncObj2 failed", asyncObj2.getException());
     }
     
   }
@@ -227,7 +231,7 @@ public class TxnTimeOutDUnitTest extends DistributedTestCase {
           fail("Exception did not occur although was supposed to occur");
     }
     catch (Exception e) {
-      getLogWriter().fine("Exception caught " + e);
+      LogWriterUtils.getLogWriter().fine("Exception caught " + e);
       fail("failed in naming lookup: " + e);
     }
     finally {
@@ -256,7 +260,7 @@ public class TxnTimeOutDUnitTest extends DistributedTestCase {
           fail("Exception did not occur although was supposed to occur");
     }
     catch (Exception e) {
-      getLogWriter().fine("Exception caught " + e);
+      LogWriterUtils.getLogWriter().fine("Exception caught " + e);
       fail("failed in naming lookup: " + e);
     }
   }
@@ -300,7 +304,7 @@ public class TxnTimeOutDUnitTest extends DistributedTestCase {
       //
       //    sb.append(lineSep);
     }
-    getLogWriter().fine("***********\n " + sb);
+    LogWriterUtils.getLogWriter().fine("***********\n " + sb);
     return sb.toString();
   }
 } 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/DistributedSystemLogFileJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/DistributedSystemLogFileJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/DistributedSystemLogFileJUnitTest.java
index a8b664e..b3f4029 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/DistributedSystemLogFileJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/DistributedSystemLogFileJUnitTest.java
@@ -43,8 +43,8 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.logging.log4j.FastLogger;
 import com.gemstone.gemfire.internal.logging.log4j.LogWriterLogger;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -113,7 +113,7 @@ public class DistributedSystemLogFileJUnitTest {
     assertEquals("Expected " + LogWriterImpl.levelToString(InternalLogWriter.INFO_LEVEL) + " but was " + LogWriterImpl.levelToString(logWriter.getLogWriterLevel()),
         InternalLogWriter.INFO_LEVEL, logWriter.getLogWriterLevel());
     
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       @Override
       public boolean done() {
         return logFile.exists();
@@ -504,7 +504,7 @@ public class DistributedSystemLogFileJUnitTest {
     assertTrue(logWriter instanceof FastLogger);
     assertTrue(((FastLogger)logWriter).isDelegating());
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       @Override
       public boolean done() {
         return logFile.exists();
@@ -720,7 +720,7 @@ public class DistributedSystemLogFileJUnitTest {
     assertTrue(logWriter instanceof FastLogger);
     assertTrue(((FastLogger)logWriter).isDelegating());
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       @Override
       public boolean done() {
         return logFile.exists();
@@ -961,7 +961,7 @@ public class DistributedSystemLogFileJUnitTest {
     assertTrue(((FastLogger)logWriter).isDelegating());
 
     
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       @Override
       public boolean done() {
         return securityLogFile.exists() && logFile.exists();
@@ -1076,7 +1076,7 @@ public class DistributedSystemLogFileJUnitTest {
     assertTrue(((FastLogger)securityLogWriter).isDelegating());
     assertTrue(((FastLogger)logWriter).isDelegating());
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       @Override
       public boolean done() {
         return securityLogFile.exists() && logFile.exists();
@@ -1248,7 +1248,7 @@ public class DistributedSystemLogFileJUnitTest {
     assertTrue(((FastLogger)securityLogWriter).isDelegating());
     assertTrue(((FastLogger)logWriter).isDelegating());
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       @Override
       public boolean done() {
         return logFile.exists();
@@ -1397,7 +1397,7 @@ public class DistributedSystemLogFileJUnitTest {
     assertTrue(((FastLogger)securityLogWriter).isDelegating());
     assertTrue(((FastLogger)logWriter).isDelegating());
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       @Override
       public boolean done() {
         return logFile.exists();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/LocatorLogFileJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/LocatorLogFileJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/LocatorLogFileJUnitTest.java
index d817f74..ffb09ce 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/LocatorLogFileJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/LocatorLogFileJUnitTest.java
@@ -35,8 +35,8 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.logging.log4j.LogWriterLogger;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -103,7 +103,7 @@ public class LocatorLogFileJUnitTest {
         InternalLogWriter.INFO_LEVEL, logWriter.getLogWriterLevel());
     
     assertNotNull(this.locator);
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       @Override
       public boolean done() {
         return logFile.exists();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/MergeLogFilesJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/MergeLogFilesJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/MergeLogFilesJUnitTest.java
index e076d04..32d38c8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/MergeLogFilesJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/logging/MergeLogFilesJUnitTest.java
@@ -41,7 +41,7 @@ import org.junit.experimental.categories.Category;
 
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.SystemFailure;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -79,7 +79,7 @@ public class MergeLogFilesJUnitTest {
 
     for (Iterator iter = workers.iterator(); iter.hasNext(); ) {
       Worker worker = (Worker) iter.next();
-      DistributedTestCase.join(worker, 120 * 1000, null);
+      ThreadUtils.join(worker, 120 * 1000);
     }
 
     if (group.exceptionOccurred()) {



[13/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/security/SecurityTestUtil.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/security/SecurityTestUtil.java b/gemfire-core/src/test/java/com/gemstone/gemfire/security/SecurityTestUtil.java
index 2ac470c..6e24398 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/security/SecurityTestUtil.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/security/SecurityTestUtil.java
@@ -78,7 +78,12 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 import com.gemstone.gemfire.internal.logging.PureLogWriter;
 import com.gemstone.gemfire.internal.util.Callable;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Contains utility methods for setting up servers/clients for authentication
@@ -207,7 +212,7 @@ public class SecurityTestUtil extends DistributedTestCase {
 
     Integer locatorPort = new Integer(AvailablePort
         .getRandomAvailablePort(AvailablePort.SOCKET));
-    String addr = DistributedTestCase.getIPLiteral();
+    String addr = NetworkUtils.getIPLiteral();
     if (locatorString == null) {
       locatorString = addr + "[" + locatorPort + ']';
     }
@@ -287,14 +292,14 @@ public class SecurityTestUtil extends DistributedTestCase {
       authProps.setProperty(DistributionConfig.LOCATORS_NAME, locatorString);
       if (locatorPort != null) {
         authProps.setProperty(DistributionConfig.START_LOCATOR_NAME,
-            DistributedTestCase.getIPLiteral() + "[" + locatorPort.toString() + ']');
+            NetworkUtils.getIPLiteral() + "[" + locatorPort.toString() + ']');
       }
     } else {
-      authProps.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+      authProps.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     }
     authProps.setProperty(DistributionConfig.SECURITY_LOG_LEVEL_NAME, "finest");
-    getLogWriter().info("Set the server properties to: " + authProps);
-    getLogWriter().info("Set the java properties to: " + javaProps);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Set the server properties to: " + authProps);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Set the java properties to: " + javaProps);
 
     SecurityTestUtil tmpInstance = new SecurityTestUtil("temp");
     try {
@@ -305,24 +310,24 @@ public class SecurityTestUtil extends DistributedTestCase {
     }
     catch (AuthenticationRequiredException ex) {
       if (expectedResult.intValue() == AUTHREQ_EXCEPTION) {
-        getLogWriter().info("Got expected exception when starting peer: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when starting peer: " + ex);
         return new Integer(0);
       }
       else {
-        fail("Got unexpected exception when starting peer", ex);
+        Assert.fail("Got unexpected exception when starting peer", ex);
       }
     }
     catch (AuthenticationFailedException ex) {
       if (expectedResult.intValue() == AUTHFAIL_EXCEPTION) {
-        getLogWriter().info("Got expected exception when starting peer: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when starting peer: " + ex);
         return new Integer(0);
       }
       else {
-        fail("Got unexpected exception when starting peer", ex);
+        Assert.fail("Got unexpected exception when starting peer", ex);
       }
     }
     catch (Exception ex) {
-      fail("Got unexpected exception when starting peer", ex);
+      Assert.fail("Got unexpected exception when starting peer", ex);
     }
 
     if (setupDynamicRegionFactory.booleanValue()) {
@@ -348,7 +353,7 @@ public class SecurityTestUtil extends DistributedTestCase {
       server1.start();
     }
     catch (Exception ex) {
-      fail("Got unexpected exception when starting CacheServer", ex);
+      Assert.fail("Got unexpected exception when starting CacheServer", ex);
     }
     return new Integer(server1.getPort());
   }
@@ -422,7 +427,7 @@ public class SecurityTestUtil extends DistributedTestCase {
         //poolFactory.setSubscriptionEnabled(false);
       }
       pool = ClientServerTestCase.configureConnectionPoolWithNameAndFactory(factory,
-          DistributedTestCase.getIPLiteral(), portsI, subscriptionEnabled, 0,
+          NetworkUtils.getIPLiteral(), portsI, subscriptionEnabled, 0,
           numConnections == null ? -1 : numConnections.intValue(), null, null,
           poolFactory);
 
@@ -431,17 +436,17 @@ public class SecurityTestUtil extends DistributedTestCase {
       }
       tmpInstance.openCache();
       try {
-        getLogWriter().info("multi-user mode " + multiUserAuthMode);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("multi-user mode " + multiUserAuthMode);
         proxyCaches[0] = (ProxyCache)((PoolImpl) pool).createAuthenticatedCacheView(authProps);
         if (!multiUserAuthMode) {
           fail("Expected a UnsupportedOperationException but got none in single-user mode");
         }
       } catch (UnsupportedOperationException uoe) {
         if (!multiUserAuthMode) {
-          getLogWriter().info("Got expected UnsupportedOperationException in single-user mode");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected UnsupportedOperationException in single-user mode");
         }
         else {
-          fail("Got unexpected exception in multi-user mode ", uoe);
+          Assert.fail("Got unexpected exception in multi-user mode ", uoe);
         }
       }
 
@@ -462,33 +467,33 @@ public class SecurityTestUtil extends DistributedTestCase {
     catch (AuthenticationRequiredException ex) {
       if (expectedResult.intValue() == AUTHREQ_EXCEPTION
           || expectedResult.intValue() == NOFORCE_AUTHREQ_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when starting client: " + ex);
       }
       else {
-        fail("Got unexpected exception when starting client", ex);
+        Assert.fail("Got unexpected exception when starting client", ex);
       }
     }
     catch (AuthenticationFailedException ex) {
       if (expectedResult.intValue() == AUTHFAIL_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when starting client: " + ex);
       }
       else {
-        fail("Got unexpected exception when starting client", ex);
+        Assert.fail("Got unexpected exception when starting client", ex);
       }
     }
     catch (ServerRefusedConnectionException ex) {
       if (expectedResult.intValue() == CONNREFUSED_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when starting client: " + ex);
       }
       else {
-        fail("Got unexpected exception when starting client", ex);
+        Assert.fail("Got unexpected exception when starting client", ex);
       }
     }
     catch (Exception ex) {
-      fail("Got unexpected exception when starting client", ex);
+      Assert.fail("Got unexpected exception when starting client", ex);
     }
   }
 
@@ -564,7 +569,7 @@ public class SecurityTestUtil extends DistributedTestCase {
       poolFactory.setMultiuserAuthentication(multiUserAuthMode);
       poolFactory.setSubscriptionEnabled(true);
       pool = ClientServerTestCase.configureConnectionPoolWithNameAndFactory(factory,
-          DistributedTestCase.getIPLiteral(), portsI, true, 1,
+          NetworkUtils.getIPLiteral(), portsI, true, 1,
           numConnections == null ? -1 : numConnections.intValue(), null, null,
           poolFactory);
 
@@ -591,33 +596,33 @@ public class SecurityTestUtil extends DistributedTestCase {
     catch (AuthenticationRequiredException ex) {
       if (expectedResult.intValue() == AUTHREQ_EXCEPTION
           || expectedResult.intValue() == NOFORCE_AUTHREQ_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when starting client: " + ex);
       }
       else {
-        fail("Got unexpected exception when starting client", ex);
+        Assert.fail("Got unexpected exception when starting client", ex);
       }
     }
     catch (AuthenticationFailedException ex) {
       if (expectedResult.intValue() == AUTHFAIL_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when starting client: " + ex);
       }
       else {
-        fail("Got unexpected exception when starting client", ex);
+        Assert.fail("Got unexpected exception when starting client", ex);
       }
     }
     catch (ServerRefusedConnectionException ex) {
       if (expectedResult.intValue() == CONNREFUSED_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when starting client: " + ex);
       }
       else {
-        fail("Got unexpected exception when starting client", ex);
+        Assert.fail("Got unexpected exception when starting client", ex);
       }
     }
     catch (Exception ex) {
-      fail("Got unexpected exception when starting client", ex);
+      Assert.fail("Got unexpected exception when starting client", ex);
     }
   }
 
@@ -647,7 +652,7 @@ public class SecurityTestUtil extends DistributedTestCase {
         server.start();
       }
       catch (Exception ex) {
-        fail("Unexpected exception when restarting cache servers", ex);
+        Assert.fail("Unexpected exception when restarting cache servers", ex);
       }
       assertTrue(server.isRunning());
     }
@@ -664,7 +669,7 @@ public class SecurityTestUtil extends DistributedTestCase {
       }
       authProps.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
       authProps.setProperty(DistributionConfig.LOCATORS_NAME, 
-                            DistributedTestCase.getIPLiteral() + "[" + port + "]");
+                            NetworkUtils.getIPLiteral() + "[" + port + "]");
       authProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
       clearStaticSSLContext();
       setJavaProps((Properties)javaProps);
@@ -678,7 +683,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           authProps);
     }
     catch (IOException ex) {
-      fail("While starting locator on port " + port.intValue(), ex);
+      Assert.fail("While starting locator on port " + port.intValue(), ex);
     }
   }
 
@@ -690,7 +695,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           .getDistributedSystem().getLogWriter());
     }
     catch (Exception ex) {
-      fail("While stopping locator on port " + port.intValue(), ex);
+      Assert.fail("While stopping locator on port " + port.intValue(), ex);
     }
   }
 
@@ -712,7 +717,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           return ((Boolean)cond.call()).booleanValue();
         }
         catch (Exception e) {
-          fail("Unexpected exception", e);
+          Assert.fail("Unexpected exception", e);
         }
         return false; // NOTREACHED
       }
@@ -720,7 +725,7 @@ public class SecurityTestUtil extends DistributedTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, sleepMillis * numTries, 200, true);
+    Wait.waitForCriterion(ev, sleepMillis * numTries, 200, true);
   }
 
   public static Object getLocalValue(Region region, Object key) {
@@ -757,10 +762,10 @@ public class SecurityTestUtil extends DistributedTestCase {
     }
     catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing puts: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing puts: " + ex);
       }
       else {
-        fail("Got unexpected exception when doing puts", ex);
+        Assert.fail("Got unexpected exception when doing puts", ex);
       }
     }
     for (int index = 0; index < num.intValue(); ++index) {
@@ -777,50 +782,50 @@ public class SecurityTestUtil extends DistributedTestCase {
       }
       catch(NoAvailableServersException ex) {
         if(expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected NoAvailableServers when doing puts: "
               + ex.getCause());
           continue;
         }
         else {
-          fail("Got unexpected exception when doing puts", ex);
+          Assert.fail("Got unexpected exception when doing puts", ex);
         }
       }
       catch (ServerConnectivityException ex) {
         if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
             && (ex.getCause() instanceof NotAuthorizedException)) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected NotAuthorizedException when doing puts: "
                   + ex.getCause());
           continue;
         }
         if ((expectedResult.intValue() == AUTHREQ_EXCEPTION)
             && (ex.getCause() instanceof AuthenticationRequiredException)) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected AuthenticationRequiredException when doing puts: "
                   + ex.getCause());
           continue;
         }
         if ((expectedResult.intValue() == AUTHFAIL_EXCEPTION)
             && (ex.getCause() instanceof AuthenticationFailedException)) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected AuthenticationFailedException when doing puts: "
                   + ex.getCause());
           continue;
         }
         else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing puts: " + ex);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing puts: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing puts", ex);
+          Assert.fail("Got unexpected exception when doing puts", ex);
         }
       }
       catch (Exception ex) {
         if (expectedResult.intValue() == OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing puts: " + ex);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing puts: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing puts", ex);
+          Assert.fail("Got unexpected exception when doing puts", ex);
         }
       }
     }
@@ -840,10 +845,10 @@ public class SecurityTestUtil extends DistributedTestCase {
     }
     catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing getAll: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing getAll: " + ex);
       }
       else {
-        fail("Got unexpected exception when doing getAll", ex);
+        Assert.fail("Got unexpected exception when doing getAll", ex);
       }
     }
     try {
@@ -869,28 +874,28 @@ public class SecurityTestUtil extends DistributedTestCase {
       }
     } catch (NoAvailableServersException ex) {
       if (expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NoAvailableServers when doing getAll: "
                 + ex.getCause());
       } else {
-        fail("Got unexpected exception when doing getAll", ex);
+        Assert.fail("Got unexpected exception when doing getAll", ex);
       }
     } catch (ServerConnectivityException ex) {
       if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
           && (ex.getCause() instanceof NotAuthorizedException)) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NotAuthorizedException when doing getAll: "
                 + ex.getCause());
       } else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing getAll: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing getAll: " + ex);
       } else {
-        fail("Got unexpected exception when doing getAll", ex);
+        Assert.fail("Got unexpected exception when doing getAll", ex);
       }
     } catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing getAll: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing getAll: " + ex);
       } else {
-        fail("Got unexpected exception when doing getAll", ex);
+        Assert.fail("Got unexpected exception when doing getAll", ex);
       }
     }
   }
@@ -916,10 +921,10 @@ public class SecurityTestUtil extends DistributedTestCase {
     }
     catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing gets: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing gets: " + ex);
       }
       else {
-        fail("Got unexpected exception when doing gets", ex);
+        Assert.fail("Got unexpected exception when doing gets", ex);
       }
     }
     for (int index = 0; index < num.intValue(); ++index) {
@@ -937,36 +942,36 @@ public class SecurityTestUtil extends DistributedTestCase {
       }
       catch(NoAvailableServersException ex) {
         if(expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected NoAvailableServers when doing gets: "
               + ex.getCause());
           continue;
         }
         else {
-          fail("Got unexpected exception when doing gets", ex);
+          Assert.fail("Got unexpected exception when doing gets", ex);
         }
       }
       catch (ServerConnectivityException ex) {
         if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
             && (ex.getCause() instanceof NotAuthorizedException)) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected NotAuthorizedException when doing gets: "
                   + ex.getCause());
           continue;
         }
         else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing gets: " + ex);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing gets: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing gets", ex);
+          Assert.fail("Got unexpected exception when doing gets", ex);
         }
       }
       catch (Exception ex) {
         if (expectedResult.intValue() == OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing gets: " + ex);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing gets: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing gets", ex);
+          Assert.fail("Got unexpected exception when doing gets", ex);
         }
       }
       assertNotNull(value);
@@ -1017,10 +1022,10 @@ public class SecurityTestUtil extends DistributedTestCase {
       assertNotNull(region);
     } catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when doing region destroy: " + ex);
       } else {
-        fail("Got unexpected exception when doing region destroy", ex);
+        Assert.fail("Got unexpected exception when doing region destroy", ex);
       }
     }
 
@@ -1037,30 +1042,30 @@ public class SecurityTestUtil extends DistributedTestCase {
       assertNull(region);
     } catch (NoAvailableServersException ex) {
       if (expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NoAvailableServers when doing region destroy: "
                 + ex.getCause());
       } else {
-        fail("Got unexpected exception when doing region destroy", ex);
+        Assert.fail("Got unexpected exception when doing region destroy", ex);
       }
     } catch (ServerConnectivityException ex) {
       if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
           && (ex.getCause() instanceof NotAuthorizedException)) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NotAuthorizedException when doing region destroy: "
                 + ex.getCause());
       } else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when doing region destroy: " + ex);
       } else {
-        fail("Got unexpected exception when doing region destroy", ex);
+        Assert.fail("Got unexpected exception when doing region destroy", ex);
       }
     } catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when doing region destroy: " + ex);
       } else {
-        fail("Got unexpected exception when doing region destroy", ex);
+        Assert.fail("Got unexpected exception when doing region destroy", ex);
       }
     }
   }
@@ -1081,10 +1086,10 @@ public class SecurityTestUtil extends DistributedTestCase {
     }
     catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing destroys: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing destroys: " + ex);
       }
       else {
-        fail("Got unexpected exception when doing destroys", ex);
+        Assert.fail("Got unexpected exception when doing destroys", ex);
       }
     }
     for (int index = 0; index < num.intValue(); ++index) {
@@ -1096,36 +1101,36 @@ public class SecurityTestUtil extends DistributedTestCase {
       }
       catch(NoAvailableServersException ex) {
         if(expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected NoAvailableServers when doing destroys: "
               + ex.getCause());
           continue;
         }
         else {
-          fail("Got unexpected exception when doing destroys", ex);
+          Assert.fail("Got unexpected exception when doing destroys", ex);
         }
       }
       catch (ServerConnectivityException ex) {
         if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
             && (ex.getCause() instanceof NotAuthorizedException)) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected NotAuthorizedException when doing destroys: "
                   + ex.getCause());
           continue;
         }
         else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing destroys: " + ex);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing destroys: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing destroys", ex);
+          Assert.fail("Got unexpected exception when doing destroys", ex);
         }
       }
       catch (Exception ex) {
         if (expectedResult.intValue() == OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing destroys: " + ex);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing destroys: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing destroys", ex);
+          Assert.fail("Got unexpected exception when doing destroys", ex);
         }
       }
     }
@@ -1147,10 +1152,10 @@ public class SecurityTestUtil extends DistributedTestCase {
     }
     catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing invalidates: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing invalidates: " + ex);
       }
       else {
-        fail("Got unexpected exception when doing invalidates", ex);
+        Assert.fail("Got unexpected exception when doing invalidates", ex);
       }
     }
     for (int index = 0; index < num.intValue(); ++index) {
@@ -1162,36 +1167,36 @@ public class SecurityTestUtil extends DistributedTestCase {
       }
       catch(NoAvailableServersException ex) {
         if(expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected NoAvailableServers when doing invalidates: "
               + ex.getCause());
           continue;
         }
         else {
-          fail("Got unexpected exception when doing invalidates", ex);
+          Assert.fail("Got unexpected exception when doing invalidates", ex);
         }
       }
       catch (ServerConnectivityException ex) {
         if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
             && (ex.getCause() instanceof NotAuthorizedException)) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected NotAuthorizedException when doing invalidates: "
                   + ex.getCause());
           continue;
         }
         else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing invalidates: " + ex);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing invalidates: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing invalidates", ex);
+          Assert.fail("Got unexpected exception when doing invalidates", ex);
         }
       }
       catch (Exception ex) {
         if (expectedResult.intValue() == OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing invalidates: " + ex);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing invalidates: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing invalidates", ex);
+          Assert.fail("Got unexpected exception when doing invalidates", ex);
         }
       }
     }
@@ -1213,10 +1218,10 @@ public class SecurityTestUtil extends DistributedTestCase {
     }
     catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing containsKey: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing containsKey: " + ex);
       }
       else {
-        fail("Got unexpected exception when doing containsKey", ex);
+        Assert.fail("Got unexpected exception when doing containsKey", ex);
       }
     }
     for (int index = 0; index < num.intValue(); ++index) {
@@ -1229,36 +1234,36 @@ public class SecurityTestUtil extends DistributedTestCase {
       }
       catch(NoAvailableServersException ex) {
         if(expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected NoAvailableServers when doing containsKey: "
               + ex.getCause());
           continue;
         }
         else {
-          fail("Got unexpected exception when doing containsKey", ex);
+          Assert.fail("Got unexpected exception when doing containsKey", ex);
         }
       }
       catch (ServerConnectivityException ex) {
         if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
             && (ex.getCause() instanceof NotAuthorizedException)) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Got expected NotAuthorizedException when doing containsKey: "
                   + ex.getCause());
           continue;
         }
         else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing containsKey: " + ex);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing containsKey: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing containsKey", ex);
+          Assert.fail("Got unexpected exception when doing containsKey", ex);
         }
       }
       catch (Exception ex) {
         if (expectedResult.intValue() == OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing containsKey: " + ex);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing containsKey: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing containsKey", ex);
+          Assert.fail("Got unexpected exception when doing containsKey", ex);
         }
       }
       assertEquals(expectedValue, result);
@@ -1277,9 +1282,9 @@ public class SecurityTestUtil extends DistributedTestCase {
       assertNotNull(region);
     } catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing queries: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing queries: " + ex);
       } else {
-        fail("Got unexpected exception when doing queries", ex);
+        Assert.fail("Got unexpected exception when doing queries", ex);
       }
     }
     String queryStr = "SELECT DISTINCT * FROM " + region.getFullPath();
@@ -1292,39 +1297,39 @@ public class SecurityTestUtil extends DistributedTestCase {
       }
     } catch (NoAvailableServersException ex) {
       if (expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NoAvailableServers when doing queries: "
                 + ex.getCause());
       } else {
-        fail("Got unexpected exception when doing queries", ex);
+        Assert.fail("Got unexpected exception when doing queries", ex);
       }
     } catch (ServerConnectivityException ex) {
       if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
           && (ex.getCause() instanceof NotAuthorizedException)) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NotAuthorizedException when doing queries: "
                 + ex.getCause());
       } else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing queries: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing queries: " + ex);
       } else {
-        fail("Got unexpected exception when doing queries", ex);
+        Assert.fail("Got unexpected exception when doing queries", ex);
       }
     } catch (QueryInvocationTargetException qite) {
       if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
           && (qite.getCause() instanceof NotAuthorizedException)) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NotAuthorizedException when doing queries: "
                 + qite.getCause());
       } else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing queries: " + qite);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing queries: " + qite);
       } else {
-        fail("Got unexpected exception when doing queries", qite);
+        Assert.fail("Got unexpected exception when doing queries", qite);
       }
     } catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing queries: " + ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Got expected exception when doing queries: " + ex);
       } else {
-        fail("Got unexpected exception when doing queries", ex);
+        Assert.fail("Got unexpected exception when doing queries", ex);
       }
     }
   }
@@ -1342,10 +1347,10 @@ public class SecurityTestUtil extends DistributedTestCase {
       assertNotNull(region);
     } catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when executing function: " + ex);
       } else {
-        fail("Got unexpected exception when executing function", ex);
+        Assert.fail("Got unexpected exception when executing function", ex);
       }
     }
     try {
@@ -1372,44 +1377,44 @@ public class SecurityTestUtil extends DistributedTestCase {
       }
     } catch (NoAvailableServersException ex) {
       if (expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NoAvailableServers when executing function: "
                 + ex.getCause());
       } else {
-        fail("Got unexpected exception when executing function", ex);
+        Assert.fail("Got unexpected exception when executing function", ex);
       }
     } catch (ServerConnectivityException ex) {
       if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
           && (ex.getCause() instanceof NotAuthorizedException)) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NotAuthorizedException when executing function: "
                 + ex.getCause());
       } else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when executing function: " + ex);
       } else {
-        fail("Got unexpected exception when executing function", ex);
+        Assert.fail("Got unexpected exception when executing function", ex);
       }
     } catch (FunctionException ex) {
       if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
           && ((ex.getCause() instanceof NotAuthorizedException) || ((ex
               .getCause() instanceof ServerOperationException) && (((ServerOperationException)ex
               .getCause()).getCause() instanceof NotAuthorizedException)))) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NotAuthorizedException when executing function: "
                 + ex.getCause());
       } else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when executing function: " + ex);
       } else {
-        fail("Got unexpected exception when executing function", ex);
+        Assert.fail("Got unexpected exception when executing function", ex);
       }
     } catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when executing function: " + ex);
       } else {
-        fail("Got unexpected exception when executing function", ex);
+        Assert.fail("Got unexpected exception when executing function", ex);
       }
     }
   }
@@ -1426,10 +1431,10 @@ public class SecurityTestUtil extends DistributedTestCase {
       assertNotNull(region);
     } catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when executing query: " + ex);
       } else {
-        fail("Got unexpected exception when executing query", ex);
+        Assert.fail("Got unexpected exception when executing query", ex);
       }
     }
     try {
@@ -1448,30 +1453,30 @@ public class SecurityTestUtil extends DistributedTestCase {
       assertEquals(expectedValue.intValue(), result.asList().size());
     } catch (NoAvailableServersException ex) {
       if (expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NoAvailableServers when executing query: "
                 + ex.getCause());
       } else {
-        fail("Got unexpected exception when executing query", ex);
+        Assert.fail("Got unexpected exception when executing query", ex);
       }
     } catch (ServerConnectivityException ex) {
       if ((expectedResult.intValue() == NOTAUTHZ_EXCEPTION)
           && (ex.getCause() instanceof NotAuthorizedException)) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected NotAuthorizedException when executing query: "
                 + ex.getCause());
       } else if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when executing query: " + ex);
       } else {
-        fail("Got unexpected exception when executing query", ex);
+        Assert.fail("Got unexpected exception when executing query", ex);
       }
     } catch (Exception ex) {
       if (expectedResult.intValue() == OTHER_EXCEPTION) {
-        getLogWriter().info(
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
             "Got expected exception when executing query: " + ex);
       } else {
-        fail("Got unexpected exception when executing query", ex);
+        Assert.fail("Got unexpected exception when executing query", ex);
       }
     }
   }
@@ -1493,7 +1498,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           + ", but expected results " + expectedResults.length);
     }
     for (int i = 0; i < numOfUsers; i++) {
-      getLogWriter().info("PUT: MultiUser# " + i);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("PUT: MultiUser# " + i);
       doPutsP(num, Integer.valueOf(i), expectedResults[i], false);
     }
   }
@@ -1519,7 +1524,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           + ", but expected results " + expectedResults.length);
     }
     for (int i = 0; i < numOfUsers; i++) {
-      getLogWriter().info(
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
           "GET_ALL" + (useTX ? " in TX" : "") + ": MultiUser# " + i);
       doGetAllP(Integer.valueOf(i), expectedResults[i], useTX);
     }
@@ -1532,7 +1537,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           + ", but expected results " + expectedResults.length);
     }
     for (int i = 0; i < numOfUsers; i++) {
-      getLogWriter().info("GET: MultiUser# " + i);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("GET: MultiUser# " + i);
       doGetsP(num, Integer.valueOf(i), expectedResults[i], false);
     }
   }
@@ -1544,7 +1549,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           + ", but expected results " + expectedResults.length);
     }
     for (int i = numOfUsers-1; i >= 0; i--) {
-      getLogWriter().info("DESTROY: MultiUser# " + i);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("DESTROY: MultiUser# " + i);
       doRegionDestroysP(Integer.valueOf(i), expectedResults[i]);
     }
   }
@@ -1556,7 +1561,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           + ", but expected results " + expectedResults.length);
     }
     for (int i = 0; i < numOfUsers; i++) {
-      getLogWriter().info("DESTROY: MultiUser# " + i);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("DESTROY: MultiUser# " + i);
       doDestroysP(num, Integer.valueOf(i), expectedResults[i], false);
     }
   }
@@ -1568,7 +1573,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           + ", but expected results " + expectedResults.length);
     }
     for (int i = 0; i < numOfUsers; i++) {
-      getLogWriter().info("INVALIDATE: MultiUser# " + i);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("INVALIDATE: MultiUser# " + i);
       doInvalidatesP(num, Integer.valueOf(i), expectedResults[i], false);
     }
   }
@@ -1584,7 +1589,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           + ", but #expected output " + results.length);
     }
     for (int i = 0; i < numOfUsers; i++) {
-      getLogWriter().info("CONTAINS_KEY: MultiUser# " + i);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("CONTAINS_KEY: MultiUser# " + i);
       doContainsKeysP(num, Integer.valueOf(i), expectedResults[i], false, results[i]);
     }
   }
@@ -1596,7 +1601,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           + ", but #expected results " + expectedResults.length);
     }
     for (int i = 0; i < numOfUsers; i++) {
-      getLogWriter().info("QUERY: MultiUser# " + i);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("QUERY: MultiUser# " + i);
       doQueriesP(Integer.valueOf(i), expectedResults[i], valueSize);
     }
   }
@@ -1612,16 +1617,16 @@ public class SecurityTestUtil extends DistributedTestCase {
           + ", but #expected output " + results.length);
     }
     for (int i = 0; i < numOfUsers; i++) {
-      getLogWriter().info("FunctionExecute:onRegion MultiUser# " + i);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("FunctionExecute:onRegion MultiUser# " + i);
       doFunctionExecuteP(Integer.valueOf(i), function, expectedResults[i], results[i], "region");
     }
     for (int i = 0; i < numOfUsers; i++) {
-      getLogWriter().info("FunctionExecute:onServer MultiUser# " + i);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("FunctionExecute:onServer MultiUser# " + i);
       doFunctionExecuteP(Integer.valueOf(i), function, expectedResults[i], results[i], "server");
     }
     if (!isFailoverCase) {
       for (int i = 0; i < numOfUsers; i++) {
-        getLogWriter().info("FunctionExecute:onServers MultiUser# " + i);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("FunctionExecute:onServers MultiUser# " + i);
         doFunctionExecuteP(Integer.valueOf(i), function, expectedResults[i],
             results[i], "servers");
       }
@@ -1635,7 +1640,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           + ", but #expected results " + expectedResults.length);
     }
     for (int i = 0; i < numOfUsers; i++) {
-      getLogWriter().info("QueryExecute: MultiUser# " + i);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("QueryExecute: MultiUser# " + i);
       doQueryExecuteP(Integer.valueOf(i), expectedResults[i], result);
     }
   }
@@ -1682,7 +1687,7 @@ public class SecurityTestUtil extends DistributedTestCase {
           fail("Expected " + expectedResult + " but found "
               + e.getClass().getSimpleName() + " in doSimpleGet()");
         } else {
-          getLogWriter().fine(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().fine(
               "Got expected " + e.getClass().getSimpleName()
                   + " in doSimpleGet()");
         }
@@ -1699,10 +1704,10 @@ public class SecurityTestUtil extends DistributedTestCase {
         }
       } catch (Exception e) {
         if (!e.getClass().getSimpleName().endsWith(expectedResult)) {
-          fail("Expected " + expectedResult + " but found "
+          Assert.fail("Expected " + expectedResult + " but found "
               + e.getClass().getSimpleName() + " in doSimplePut()", e);
         } else {
-          getLogWriter().fine(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().fine(
               "Got expected " + e.getClass().getSimpleName()
                   + " in doSimplePut()");
         }
@@ -1730,11 +1735,11 @@ public class SecurityTestUtil extends DistributedTestCase {
         }
       }
       catch (IllegalAccessException ex) {
-        getLogWriter()
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .warning("Exception while clearing static SSL field.", ex);
       }
       catch (ClassCastException ex) {
-        getLogWriter()
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .warning("Exception while clearing static SSL field.", ex);
       }
     }
@@ -1754,7 +1759,7 @@ public class SecurityTestUtil extends DistributedTestCase {
         assertNull(field.get(obj));
       }
       catch (IllegalAccessException ex) {
-        getLogWriter().warning("Exception while clearing SSL fields.", ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().warning("Exception while clearing SSL fields.", ex);
       }
     }
   }
@@ -1779,7 +1784,7 @@ public class SecurityTestUtil extends DistributedTestCase {
         }
       }
       catch (IllegalAccessException ex) {
-        getLogWriter().warning("Exception while getting SSL fields.", ex);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().warning("Exception while getting SSL fields.", ex);
       }
     }
     return resultFields;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Assert.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Assert.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Assert.java
new file mode 100755
index 0000000..5d927eb
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Assert.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+/**
+ * Extends <code>org.junit.Assert</code> with additional assertion and fail
+ * methods. 
+ * 
+ * These methods can be used directly: <code>Assert.assertEquals(...)</code>, 
+ * however, they are intended to be referenced through static import:
+ *
+ * <pre>
+ * import static com.gemstone.gemfire.test.dunit.Assert.*;
+ *    ...
+ *    fail(...);
+ * </pre>
+ *
+ * Extracted from DistributedTestCase.
+ * 
+ * @see java.lang.AssertionError
+ */
+public class Assert extends org.junit.Assert {
+
+  protected Assert() {
+  }
+
+  /**
+   * Fails a test by throwing a new {@code AssertionError} with the specified
+   * detail message and cause.
+   *
+   * <p>Note that the detail message associated with
+   * {@code cause} is <i>not</i> automatically incorporated in
+   * this error's detail message.
+   *
+   * @param  message the detail message, may be {@code null}
+   * @param  cause the cause, may be {@code null}
+   *
+   * @see java.lang.AssertionError
+   */
+  public static void fail(final String message, final Throwable cause) {
+    if (message == null && cause == null) {
+      throw new AssertionError();
+    }    
+    if (message == null) {
+      throw new AssertionError(cause);
+    }
+    if (cause == null) {
+      throw new AssertionError(message);
+    }
+    throw new AssertionError(message, cause);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/AsyncInvocation.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/AsyncInvocation.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/AsyncInvocation.java
index 6735fe5..544638e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/AsyncInvocation.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/AsyncInvocation.java
@@ -21,9 +21,6 @@ import java.util.concurrent.TimeoutException;
 import com.gemstone.gemfire.InternalGemFireError;
 import com.gemstone.gemfire.SystemFailure;
 
-// @todo davidw Add the ability to get a return value back from the
-// async method call.  (Use a static ThreadLocal field that is
-// accessible from the Runnable used in VM#invoke)
 /**
  * <P>An <code>AsyncInvocation</code> represents the invocation of a
  * remote invocation that executes asynchronously from its caller.  An
@@ -50,6 +47,9 @@ import com.gemstone.gemfire.SystemFailure;
  * @see VM#invokeAsync(Class, String)
  */
 public class AsyncInvocation<T> extends Thread {
+  //@todo davidw Add the ability to get a return value back from the
+  //async method call.  (Use a static ThreadLocal field that is
+  //accessible from the Runnable used in VM#invoke)
   
   private static final ThreadLocal returnValue = new ThreadLocal();
 
@@ -158,20 +158,22 @@ public class AsyncInvocation<T> extends Thread {
   //////////////////////  Inner Classes  //////////////////////
 
   /**
-   * A <code>ThreadGroup</code> that notices when an exception occurrs
+   * A <code>ThreadGroup</code> that notices when an exception occurs
    * during an <code>AsyncInvocation</code>.
+   * 
+   * TODO: reimplement using Futures
    */
   private static class AsyncInvocationGroup extends ThreadGroup {
     AsyncInvocationGroup() {
       super("Async Invocations");
     }
 
-    public void uncaughtException(Thread t, Throwable e) {
-      if (e instanceof VirtualMachineError) {
-        SystemFailure.setFailure((VirtualMachineError)e); // don't throw
+    public void uncaughtException(Thread thread, Throwable throwable) {
+      if (throwable instanceof VirtualMachineError) {
+        SystemFailure.setFailure((VirtualMachineError)throwable); // don't throw
       }
-      if (t instanceof AsyncInvocation) {
-        ((AsyncInvocation) t).exception = e;
+      if (thread instanceof AsyncInvocation) {
+        ((AsyncInvocation) thread).exception = throwable;
       }
     }
   }
@@ -202,8 +204,7 @@ public class AsyncInvocation<T> extends Thread {
     return this.returnedObj;
   }
   
-  public void run()
-  {
+  public void run() {
     super.run();
     this.returnedObj = (T) returnValue.get();
     returnValue.set(null);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DUnitEnv.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DUnitEnv.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DUnitEnv.java
index eea2d65..d662779 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DUnitEnv.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DUnitEnv.java
@@ -35,7 +35,6 @@ import com.gemstone.gemfire.test.dunit.standalone.BounceResult;
  * and run them on a different VM launching system.
  *   
  * @author dsmith
- *
  */
 public abstract class DUnitEnv {
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DebuggerUtils.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DebuggerUtils.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DebuggerUtils.java
new file mode 100755
index 0000000..534eab2
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DebuggerUtils.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+import com.gemstone.gemfire.internal.util.DebuggerSupport;
+
+/**
+ * <code>DebuggerUtils</code> provides static utility methods that facilitate
+ * runtime debugging.
+ * 
+ * These methods can be used directly: <code>DebuggerUtils.attachDebugger(...)</code>, 
+ * however, they are intended to be referenced through static import:
+ *
+ * <pre>
+ * import static com.gemstone.gemfire.test.dunit.DebuggerUtils.*;
+ *    ...
+ *    attachDebugger(...);
+ * </pre>
+ *
+ * Extracted from DistributedTestCase.
+ * 
+ * @see com.gemstone.gemfire.internal.util.DebuggerSupport
+ */
+public class DebuggerUtils {
+
+  protected DebuggerUtils() {
+  }
+  
+  @SuppressWarnings("serial")
+  public static void attachDebugger(final VM vm, final String message) {
+    vm.invoke(new SerializableRunnable(DebuggerSupport.class.getSimpleName()+" waitForJavaDebugger") {
+      public void run() {
+        DebuggerSupport.waitForJavaDebugger(message);
+      } 
+    });
+  }
+
+}


[27/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAClearDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAClearDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAClearDUnitTest.java
index 2528b2b..58e54ed 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAClearDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAClearDUnitTest.java
@@ -39,6 +39,8 @@ import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.cache.Region;
 
@@ -106,15 +108,13 @@ public class HAClearDUnitTest extends DistributedTestCase
 
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     client1.invoke(HAClearDUnitTest.class, "closeCache");
     client2.invoke(HAClearDUnitTest.class, "closeCache");
     server1.invoke(HAClearDUnitTest.class, "closeCache");
     server2.invoke(HAClearDUnitTest.class, "closeCache");
     closeCache();
-
   }
 
   /* The test perorms following operations
@@ -527,7 +527,7 @@ public class HAClearDUnitTest extends DistributedTestCase
       {
         Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
         assertNotNull(region);
-        getLogWriter().info("Size of the region " + region.size());
+        LogWriterUtils.getLogWriter().info("Size of the region " + region.size());
         assertEquals(size, region.size());
       }
     };
@@ -542,7 +542,7 @@ public class HAClearDUnitTest extends DistributedTestCase
       public void run2() throws CacheException
       {
         Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
-        getLogWriter().warning("Found region " + region);
+        LogWriterUtils.getLogWriter().warning("Found region " + region);
         assertNull(region);
       }
     };
@@ -559,14 +559,14 @@ public class HAClearDUnitTest extends DistributedTestCase
     PORT2 = ((Integer)server2.invoke(HAClearDUnitTest.class,
         "createServerCache")).intValue();
     client1.invoke(HAClearDUnitTest.class, "createClientCache", new Object[] {
-        getServerHostName(Host.getHost(0)),
+        NetworkUtils.getServerHostName(Host.getHost(0)),
         new Integer(PORT1), new Integer(PORT2), new Boolean(true),
         new Boolean(true) });
     client2.invoke(HAClearDUnitTest.class, "createClientCache", new Object[] {
-        getServerHostName(Host.getHost(0)),
+        NetworkUtils.getServerHostName(Host.getHost(0)),
         new Integer(PORT1), new Integer(PORT2), new Boolean(true),
         new Boolean(true) });
-    createClientCache(getServerHostName(Host.getHost(0)),
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)),
         new Integer(PORT1), new Integer(PORT2),
         new Boolean(true), new Boolean(true));
   }
@@ -616,7 +616,7 @@ public class HAClearDUnitTest extends DistributedTestCase
       factory.setCacheListener(new CacheListenerAdapter() {
         public void afterRegionClear(RegionEvent event)
         {
-          getLogWriter().info("-------> afterRegionClear received");
+          LogWriterUtils.getLogWriter().info("-------> afterRegionClear received");
           synchronized (HAClearDUnitTest.class) {
             gotClearCallback = true;
             HAClearDUnitTest.class.notifyAll();
@@ -626,7 +626,7 @@ public class HAClearDUnitTest extends DistributedTestCase
         public void afterRegionDestroy(RegionEvent event)
         {
           synchronized (HAClearDUnitTest.class) {
-            getLogWriter().info("-------> afterRegionDestroy received");
+            LogWriterUtils.getLogWriter().info("-------> afterRegionDestroy received");
             gotDestroyRegionCallback = true;
             HAClearDUnitTest.class.notifyAll();
           }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAConflationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAConflationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAConflationDUnitTest.java
index a86b50a..241ac39 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAConflationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAConflationDUnitTest.java
@@ -39,8 +39,10 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This is Targetted conflation Dunit test.
@@ -112,17 +114,15 @@ public class HAConflationDUnitTest extends CacheTestCase
     server1.invoke(ConflationDUnitTest.class, "setIsSlowStart");
     server1.invoke(HAConflationDUnitTest.class, "makeDispatcherSlow");
     client1.invoke(HAConflationDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(host), new Integer(PORT1), new Boolean(true) });
+        new Object[] { NetworkUtils.getServerHostName(host), new Integer(PORT1), new Boolean(true) });
 
   }
 
-  public void tearDown2() throws Exception
-  {
-	super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     client1.invoke(HAConflationDUnitTest.class, "closeCache");
     // close server
     server1.invoke(HAConflationDUnitTest.class, "closeCache");
-
   }
   
   public static void closeCache()
@@ -287,7 +287,7 @@ public class HAConflationDUnitTest extends CacheTestCase
             }
           };
           
-          waitForCriterion(w, 3 * 60 * 1000, interval, true);
+          Wait.waitForCriterion(w, 3 * 60 * 1000, interval, true);
       }
     };
     return checkEvents;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HADuplicateDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HADuplicateDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HADuplicateDUnitTest.java
index b4fd1ac..732e22a 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HADuplicateDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HADuplicateDUnitTest.java
@@ -40,6 +40,8 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -105,15 +107,13 @@ public class HADuplicateDUnitTest extends DistributedTestCase
     client2 = host.getVM(3);
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     client1.invoke(HADuplicateDUnitTest.class, "closeCache");
     // close server
     server1.invoke(HADuplicateDUnitTest.class, "reSetQRMslow");
     server1.invoke(HADuplicateDUnitTest.class, "closeCache");
     server2.invoke(HADuplicateDUnitTest.class, "closeCache");
-
   }
 
   public void _testDuplicate() throws Exception
@@ -160,8 +160,8 @@ public class HADuplicateDUnitTest extends DistributedTestCase
   public void testSample() throws Exception
   {
 
-    addExpectedException("IOException");
-    addExpectedException("Connection reset");
+    IgnoredException.addIgnoredException("IOException");
+    IgnoredException.addIgnoredException("Connection reset");
     createClientServerConfiguration();
     server1.invoke(new CacheSerializableRunnable("putKey") {
 
@@ -225,7 +225,7 @@ public class HADuplicateDUnitTest extends DistributedTestCase
     PORT2 = ((Integer)server2.invoke(HADuplicateDUnitTest.class,
         "createServerCache")).intValue();
     client1.invoke(HADuplicateDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2) });
+        new Object[] { NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2) });
 
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAEventIdPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAEventIdPropagationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAEventIdPropagationDUnitTest.java
index ae3a4dc..af1eb8b 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAEventIdPropagationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAEventIdPropagationDUnitTest.java
@@ -46,9 +46,14 @@ import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.cache.RegionEventImpl;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  *
@@ -92,9 +97,8 @@ public class HAEventIdPropagationDUnitTest extends DistributedTestCase
   }
 
   /** close the caches* */
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     client1.invoke(HAEventIdPropagationDUnitTest.class, "closeCache");
     // close server
     server1.invoke(HAEventIdPropagationDUnitTest.class, "closeCache");
@@ -123,7 +127,7 @@ public class HAEventIdPropagationDUnitTest extends DistributedTestCase
     int PORT1 = ((Integer)server1.invoke(HAEventIdPropagationDUnitTest.class,
         "createServerCache")).intValue();
     client1.invoke(HAEventIdPropagationDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(server1.getHost()), new Integer(PORT1) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
   }
 
   /** create the server * */
@@ -212,9 +216,9 @@ public class HAEventIdPropagationDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 10 * 1000, 200, true);
     synchronized(map) {
-      getLogWriter().info("assertThreadIdToSequenceIdMapisNotNullButEmpty: map size is " + map.size());
+      LogWriterUtils.getLogWriter().info("assertThreadIdToSequenceIdMapisNotNullButEmpty: map size is " + map.size());
       assertTrue(map.size() == 1);
     }
 
@@ -415,7 +419,7 @@ public class HAEventIdPropagationDUnitTest extends DistributedTestCase
     }
     catch (Exception e) {
       e.printStackTrace();
-      fail("test failed due to " + e, e);
+      Assert.fail("test failed due to " + e, e);
     }
   }
 
@@ -683,7 +687,7 @@ public class HAEventIdPropagationDUnitTest extends DistributedTestCase
 
     public void afterCreate(EntryEvent event)
     {
-      getLogWriter().fine(" entered after created with " + event.getKey());
+      LogWriterUtils.getLogWriter().fine(" entered after created with " + event.getKey());
       boolean shouldNotify = false;
       Object key = event.getKey();
       if (key.equals(PUTALL_KEY1)) {
@@ -776,7 +780,7 @@ public class HAEventIdPropagationDUnitTest extends DistributedTestCase
 
     public void afterCreate(EntryEvent event)
     {
-      getLogWriter().fine(" entered after created with " + event.getKey());
+      LogWriterUtils.getLogWriter().fine(" entered after created with " + event.getKey());
       boolean shouldNotify = false;
       Object key = event.getKey();
       if (key.equals(PUTALL_KEY1)) {
@@ -852,14 +856,14 @@ public class HAEventIdPropagationDUnitTest extends DistributedTestCase
 
     public void afterRegionDestroy(RegionEvent event)
     {
-      getLogWriter().info("Before Regionestroy in Server");
+      LogWriterUtils.getLogWriter().info("Before Regionestroy in Server");
       eventId = ((RegionEventImpl)event).getEventId();
       assertNotNull(eventId);
       synchronized (lockObject) {
         receivedOperation = true;
         lockObject.notify();
       }
-      getLogWriter().info("After RegionDestroy in Server");
+      LogWriterUtils.getLogWriter().info("After RegionDestroy in Server");
     }
 
     public void afterRegionClear(RegionEvent event)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAExpiryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAExpiryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAExpiryDUnitTest.java
index 3d3eb0a..92ee889 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAExpiryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAExpiryDUnitTest.java
@@ -36,8 +36,11 @@ import com.gemstone.gemfire.internal.cache.HARegion;
 import com.gemstone.gemfire.internal.cache.RegionQueue;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This test checks Expiration of events in the regionqueue.
@@ -93,16 +96,14 @@ public class HAExpiryDUnitTest extends DistributedTestCase
 
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     vm0.invoke(HAExpiryDUnitTest.class, "closeCache");
     vm1.invoke(HAExpiryDUnitTest.class, "closeCache");
     vm2.invoke(HAExpiryDUnitTest.class, "closeCache");
     vm3.invoke(HAExpiryDUnitTest.class, "closeCache");
     cache = null;
-    invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
-
+    Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
   }
 
   public void testExpiryPeriod() throws Exception
@@ -155,7 +156,7 @@ public class HAExpiryDUnitTest extends DistributedTestCase
     vm2.invoke(HAExpiryDUnitTest.class, "checkSizeBeforeExpiration");
     vm3.invoke(HAExpiryDUnitTest.class, "checkSizeBeforeExpiration");
 
-   pause(5000); // wait for some time to make sure that we give sufficient time
+   Wait.pause(5000); // wait for some time to make sure that we give sufficient time
                 // to expiry
    // in spite of giving time the events should not expire, and queue should be
    // same as before expiration
@@ -187,7 +188,7 @@ public class HAExpiryDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     /*
      * if (regionqueue.size() < 1) fail("RegionQueue size canot be less than 1
      * before expiration");
@@ -216,7 +217,7 @@ public class HAExpiryDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     
     /*
      * if (regionqueue.size() > regionQueueSize) fail("RegionQueue size should

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAGIIBugDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAGIIBugDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAGIIBugDUnitTest.java
index 1c151e7..df754d0 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAGIIBugDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAGIIBugDUnitTest.java
@@ -37,10 +37,14 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.HARegion;
 import com.gemstone.gemfire.internal.cache.RegionQueue;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -112,15 +116,14 @@ public class HAGIIBugDUnitTest extends DistributedTestCase
 
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     vm0.invoke(HAGIIBugDUnitTest.class, "closeCache");
     vm1.invoke(HAGIIBugDUnitTest.class, "closeCache");
     vm2.invoke(HAGIIBugDUnitTest.class, "closeCache");
     vm3.invoke(HAGIIBugDUnitTest.class, "closeCache");
     cache = null;
-    invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+    Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
   }
 
   protected void createCache(Properties props) throws Exception
@@ -136,7 +139,7 @@ public class HAGIIBugDUnitTest extends DistributedTestCase
   
   public void testDummy() throws Exception
   {
-    getLogWriter().info("This is Dummy test for the GII");  
+    LogWriterUtils.getLogWriter().info("This is Dummy test for the GII");  
   }
   
   
@@ -158,7 +161,7 @@ public class HAGIIBugDUnitTest extends DistributedTestCase
           factory.setCacheListener(regionListener);
           RegionAttributes attrs = factory.create();
           Region region = cache.createRegion(REGION_NAME, attrs);
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Name of the region is : " + region.getFullPath());
 
           HARegionQueueAttributes hattr = new HARegionQueueAttributes();
@@ -183,14 +186,14 @@ public class HAGIIBugDUnitTest extends DistributedTestCase
     AsyncInvocation[] async = new AsyncInvocation[4];
     async[0] = vm0.invokeAsync(putFrmVm("vm0_2"));
     t1.start();
-    DistributedTestCase.join(t1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(t1, 30 * 1000);
     if (isTestFailed)
       fail("HARegionQueue can not be created");
 
     for (int count = 0; count < 1; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
       if (async[count].exceptionOccurred()) {
-        fail("Got exception on " + count, async[count].getException());
+        Assert.fail("Got exception on " + count, async[count].getException());
       }
     }
 
@@ -204,7 +207,7 @@ public class HAGIIBugDUnitTest extends DistributedTestCase
 
     validationFlag = true;
     validateResults(validationFlag);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "No. of keys that are missed by HARegion Queue during GII "
             + keys_set_after_gii.size());
 
@@ -218,7 +221,7 @@ public class HAGIIBugDUnitTest extends DistributedTestCase
 //    int k = 0;
     for (int i = 0; i < 1; i++) {
       long totalPuts = ((Long)total_no_puts[i]).longValue() - 3 * NO_OF_PUTS;
-      getLogWriter().info("Total no of puts expectesd " + totalPuts);
+      LogWriterUtils.getLogWriter().info("Total no of puts expectesd " + totalPuts);
       for (int j = 0; j < totalPuts; j++) {
         keys_set_after_gii.add("vm" + i + "_2" + j);
 
@@ -239,7 +242,7 @@ public class HAGIIBugDUnitTest extends DistributedTestCase
   {
     HARegion regionForQueue = (HARegion)cache.getRegion(Region.SEPARATOR
         + HARegionQueue.createRegionName(HAExpiryDUnitTest.regionQueueName));
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Region Queue size : " + regionForQueue.keys().size());
     Iterator itr = regionForQueue.entries(false).iterator();
     while (itr.hasNext()) {
@@ -315,7 +318,7 @@ public class HAGIIBugDUnitTest extends DistributedTestCase
           }
           TOTAL_NO_OF_PUTS = TOTAL_NO_OF_PUTS + NO_OF_PUTS;
         }
-        getLogWriter().info("Total no of puts : " + TOTAL_NO_OF_PUTS);
+        LogWriterUtils.getLogWriter().info("Total no of puts : " + TOTAL_NO_OF_PUTS);
       }
     };
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAGIIDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAGIIDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAGIIDUnitTest.java
index 93cd77e..40d44b4 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAGIIDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HAGIIDUnitTest.java
@@ -47,10 +47,15 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientNotifier;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ClientTombstoneMessage;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
 import com.gemstone.gemfire.internal.cache.versions.VersionSource;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Client is connected to S1 which has a slow dispatcher. Puts are made on S1.  Then S2 is started
@@ -146,7 +151,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
     PORT2 =  AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     //Start the client
     client0.invoke(HAGIIDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(host), new Integer(PORT1),new Integer(PORT2)});
+        new Object[] { NetworkUtils.getServerHostName(host), new Integer(PORT1),new Integer(PORT2)});
   }
   
   public void testGIIRegionQueue()
@@ -159,7 +164,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
 
     client0.invoke(HAGIIDUnitTest.class, "verifyEntries");
     server1.invoke(HAGIIDUnitTest.class, "createServer2Cache" ,new Object[] {new Integer(PORT2)});
-    pause(6000);
+    Wait.pause(6000);
     server0.invoke(HAGIIDUnitTest.class, "stopServer");
     //pause(10000);
     client0.invoke(HAGIIDUnitTest.class, "verifyEntriesAfterGiiViaListener");
@@ -231,7 +236,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
       r.registerInterest("key-3",InterestResultPolicy.KEYS_VALUES);
     }
     catch (Exception ex) {
-      fail("failed while registering keys ", ex);
+      Assert.fail("failed while registering keys ", ex);
     }
   }
   public static void createEntries()
@@ -245,7 +250,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -275,7 +280,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
   
@@ -326,7 +331,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 60 * 1000, 200, true);
       // assertEquals( "key-2",r.getEntry("key-2").getValue());
       
       // wait until we
@@ -340,11 +345,11 @@ public class HAGIIDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 60 * 1000, 200, true);
       // assertEquals( "key-3",r.getEntry("key-3").getValue());
     }
     catch (Exception ex) {
-      fail("failed while verifyEntries()", ex);
+      Assert.fail("failed while verifyEntries()", ex);
     }
   }
 
@@ -359,7 +364,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 90 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 90 * 1000, 200, true);
 
     ev = new WaitCriterion() {
       public boolean done() {
@@ -369,7 +374,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
 
     ev = new WaitCriterion() {
       public boolean done() {
@@ -379,7 +384,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     
     assertEquals(3, HAGIIDUnitTest.checker.getUpdates());
   }
@@ -400,7 +405,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 60 * 1000, 200, true);
 
       // wait until
       // we have a
@@ -413,7 +418,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 60 * 1000, 200, true);
       // assertEquals( "key-2",r.getEntry("key-2").getValue());
 
 
@@ -428,7 +433,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 60 * 1000, 200, true);
       
       /*
        * assertEquals( "value-1",r.getEntry("key-1").getValue()); assertEquals(
@@ -438,7 +443,7 @@ public class HAGIIDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while verifyEntriesAfterGII()", ex);
+      Assert.fail("failed while verifyEntriesAfterGII()", ex);
     }
   }
 
@@ -446,11 +451,11 @@ public class HAGIIDUnitTest extends DistributedTestCase
   {
       System.setProperty("slowStartTimeForTesting", "120000");
   }
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+
+  @Override
+  protected final void preTearDown() throws Exception {
     ConflationDUnitTest.unsetIsSlowStart();
-    invokeInEveryVM(ConflationDUnitTest.class, "unsetIsSlowStart");
+    Invoke.invokeInEveryVM(ConflationDUnitTest.class, "unsetIsSlowStart");
     // close the clients first
     client0.invoke(HAGIIDUnitTest.class, "closeCache");
     // then close the servers

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQAddOperationJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQAddOperationJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQAddOperationJUnitTest.java
index 205ba4a..ad649d3 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQAddOperationJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQAddOperationJUnitTest.java
@@ -47,7 +47,7 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.Conflatable;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -470,8 +470,8 @@ public class HARQAddOperationJUnitTest
     t1.start();
     t2.start();
 
-    DistributedTestCase.join(t1, 180 * 1000, null);
-    DistributedTestCase.join(t2, 180 * 1000, null);
+    ThreadUtils.join(t1, 180 * 1000);
+    ThreadUtils.join(t2, 180 * 1000);
 
     if (testFailed)
       fail("Test failed due to " + message);
@@ -528,8 +528,8 @@ public class HARQAddOperationJUnitTest
     t2.start();
     t1.start();
 
-    DistributedTestCase.join(t1, 180 * 1000, null);
-    DistributedTestCase.join(t2, 180 * 1000, null);
+    ThreadUtils.join(t1, 180 * 1000);
+    ThreadUtils.join(t2, 180 * 1000);
 
     if (testFailed)
       fail("Test failed due to " + message);
@@ -628,7 +628,7 @@ public class HARQAddOperationJUnitTest
     }
 
     for (int k = 0; k < numOfThreads; k++) {
-      DistributedTestCase.join(threads[k], 180 * 1000, null);
+      ThreadUtils.join(threads[k], 180 * 1000);
     }
 
     this.logWriter
@@ -711,7 +711,7 @@ public class HARQAddOperationJUnitTest
     }
 
     for (int k = 0; k < numOfThreads; k++) {
-      DistributedTestCase.join(threads[k], 180 * 1000, null);
+      ThreadUtils.join(threads[k], 180 * 1000);
     }
 
     if (testFailed)
@@ -779,7 +779,7 @@ public class HARQAddOperationJUnitTest
     }
 
     for (int k = 0; k < numOfThreads; k++) {
-      DistributedTestCase.join(threads[k], 180 * 1000, null);
+      ThreadUtils.join(threads[k], 180 * 1000);
     }
 
     if (testFailed)
@@ -853,7 +853,7 @@ public class HARQAddOperationJUnitTest
     }
 
     for (int k = 0; k < numOfThreads; k++) {
-      DistributedTestCase.join(threads[k], 180 * 1000, null);
+      ThreadUtils.join(threads[k], 180 * 1000);
     }
 
     if (testFailed)
@@ -902,7 +902,7 @@ public class HARQAddOperationJUnitTest
     }
 
     for (int k = 0; k < numOfPuts; k++) {
-      DistributedTestCase.join(threads_peek_remove[k], 180 * 1000, null);
+      ThreadUtils.join(threads_peek_remove[k], 180 * 1000);
     }
 
     if (testFailed)
@@ -965,7 +965,7 @@ public class HARQAddOperationJUnitTest
     }
 
     for (int k = 0; k < numOfThreads; k++) {
-      DistributedTestCase.join(threads[k], 180 * 1000, null);
+      ThreadUtils.join(threads[k], 180 * 1000);
     }
 
     if (testFailed)
@@ -1013,7 +1013,7 @@ public class HARQAddOperationJUnitTest
     }
 
     for (int k = 0; k < numOfPuts - 1; k++) {
-      DistributedTestCase.join(threads_peek_remove[k], 180 * 1000, null);
+      ThreadUtils.join(threads_peek_remove[k], 180 * 1000);
     }
 
     if (testFailed)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQueueNewImplDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQueueNewImplDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQueueNewImplDUnitTest.java
index 84c2f05..3691ba1 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQueueNewImplDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARQueueNewImplDUnitTest.java
@@ -52,9 +52,13 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ClientUpdateMessage;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
 import com.gemstone.gemfire.internal.cache.tier.sockets.HAEventWrapper;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import util.TestException;
 
@@ -129,8 +133,8 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
   /**
    * Tears down the test.
    */
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "closeCache");
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "closeCache");
@@ -258,7 +262,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
       r.registerInterest("ALL_KEYS");
     }
     catch (Exception ex) {
-      fail("failed in registerInterestListAll", ex);
+      Assert.fail("failed in registerInterestListAll", ex);
     }
   }
 
@@ -271,7 +275,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
       r.registerInterest("k5");
     }
     catch (Exception ex) {
-      fail("failed while registering keys", ex);
+      Assert.fail("failed while registering keys", ex);
     }
   }
 
@@ -288,7 +292,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
       r.put("k5", "pv5");
     }
     catch (Exception ex) {
-      fail("failed in putEntries()", ex);
+      Assert.fail("failed in putEntries()", ex);
     }
   }
 
@@ -304,7 +308,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
       r.create("k5", "v5");
     }
     catch (Exception ex) {
-      fail("failed in createEntries()", ex);
+      Assert.fail("failed in createEntries()", ex);
     }
   }
 
@@ -317,7 +321,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception ex) {
-      fail("failed in createEntries(Long)", ex);
+      Assert.fail("failed in createEntries(Long)", ex);
     }
   }
 
@@ -332,7 +336,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception ex) {
-      fail("failed in putHeavyEntries(Long)", ex);
+      Assert.fail("failed in putHeavyEntries(Long)", ex);
     }
   }
 
@@ -350,11 +354,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     serverVM1.invoke(ConflationDUnitTest.class, "setIsSlowStart",
         new Object[] { "30000" });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM2.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM2.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -388,11 +392,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     serverVM1.invoke(ConflationDUnitTest.class, "setIsSlowStart",
         new Object[] { "30000" });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM2.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM2.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -427,11 +431,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     serverVM0.invoke(ConflationDUnitTest.class, "setIsSlowStart",
         new Object[] { "30000" });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM2.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM2.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -464,11 +468,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     serverVM0.invoke(ConflationDUnitTest.class, "setIsSlowStart",
         new Object[] { "30000" });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM2.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM2.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -506,11 +510,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     serverVM1.invoke(ConflationDUnitTest.class, "setIsSlowStart",
         new Object[] { "30000" });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM2.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM2.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -561,11 +565,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     serverVM1.invoke(ConflationDUnitTest.class, "setIsSlowStart",
         new Object[] { "40000" });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM2.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM2.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -617,11 +621,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     Integer port3 = (Integer)serverVM0.invoke(HARQueueNewImplDUnitTest.class,
         "createOneMoreBridgeServer", new Object[] { Boolean.TRUE });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), port3, "0");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), port3, "0");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM2.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM2.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -648,8 +652,8 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     Integer port3 = (Integer)serverVM0.invoke(HARQueueNewImplDUnitTest.class,
         "createOneMoreBridgeServer", new Object[] { Boolean.FALSE });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1", Boolean.TRUE);
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1", Boolean.TRUE);
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, port3, new Integer(PORT2), "1", Boolean.TRUE });
 
@@ -686,11 +690,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     serverVM0.invoke(ConflationDUnitTest.class, "setIsSlowStart",
         new Object[] { "30000" });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM2.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM2.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -736,11 +740,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     serverVM0.invoke(ConflationDUnitTest.class, "setIsSlowStart",
         new Object[] { "30000" });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM2.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM2.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -780,11 +784,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     serverVM0.invoke(ConflationDUnitTest.class, "setIsSlowStart",
         new Object[] { "30000" });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM1.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -818,11 +822,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
    */
   public void testCMRNotReturnedByRootRegionsMethod() throws Exception {
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM2.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM2.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -867,11 +871,11 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
     serverVM1.invoke(ConflationDUnitTest.class, "setIsSlowStart",
         new Object[] { "60000" });
 
-    createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
-    final String client1Host = getServerHostName(clientVM1.getHost());
+    createClientCache(NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1");
+    final String client1Host = NetworkUtils.getServerHostName(clientVM1.getHost());
     clientVM1.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client1Host, new Integer(PORT1), new Integer(PORT2), "1" });
-    final String client2Host = getServerHostName(clientVM2.getHost());
+    final String client2Host = NetworkUtils.getServerHostName(clientVM2.getHost());
     clientVM2.invoke(HARQueueNewImplDUnitTest.class, "createClientCache",
         new Object[] { client2Host, new Integer(PORT1), new Integer(PORT2), "1" });
 
@@ -1073,7 +1077,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
 
     Set entries = msgsRegion.entrySet();
     Iterator iter = entries.iterator();
@@ -1206,7 +1210,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 120 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 120 * 1000, 1000, true);
   }
 
   public static void verifyRegionSize(final Integer regionSize, 
@@ -1244,7 +1248,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 120 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 120 * 1000, 1000, true);
   }
 
   public static void verifyHaContainerType(Boolean isRegion, Integer port) {
@@ -1308,7 +1312,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, waitLimit.longValue(), 200, true);
+        Wait.waitForCriterion(ev, waitLimit.longValue(), 200, true);
       }
       else {
         WaitCriterion ev = new WaitCriterion() {
@@ -1319,7 +1323,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, waitLimit.longValue(), 200, true);
+        Wait.waitForCriterion(ev, waitLimit.longValue(), 200, true);
       }
     }
     catch (Exception e) {
@@ -1351,7 +1355,7 @@ public class HARQueueNewImplDUnitTest extends DistributedTestCase {
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, waitLimit.longValue(), 200, true);
+      Wait.waitForCriterion(ev, waitLimit.longValue(), 200, true);
     }
     catch (Exception e) {
       fail("failed in waitTillMessagesAreDispatched()" + e);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionDUnitTest.java
index 59780ef..d553d1c 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionDUnitTest.java
@@ -71,9 +71,8 @@ public class HARegionDUnitTest extends DistributedTestCase
   /**
    * close the cache in tearDown
    */
-  public void tearDown2() throws Exception
-  {
-	super.tearDown2();  
+  @Override
+  protected final void preTearDown() throws Exception {
     vm0.invoke(HARegionDUnitTest.class, "closeCache");
     vm1.invoke(HARegionDUnitTest.class, "closeCache");
   }
@@ -311,7 +310,7 @@ public class HARegionDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.put()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
     }
   }
 
@@ -327,7 +326,7 @@ public class HARegionDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.put()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
     }
   }
 
@@ -346,7 +345,7 @@ public class HARegionDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.get()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
     }
   }
 
@@ -365,7 +364,7 @@ public class HARegionDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.get()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
     }
   }
 
@@ -384,7 +383,7 @@ public class HARegionDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.get()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
index f4c50cd..da57b86 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueDUnitTest.java
@@ -45,7 +45,10 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.HARegion;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  *
@@ -98,9 +101,8 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
   /**
    * close the cache in tearDown
    */
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     vm0.invoke(HARegionQueueDUnitTest.class, "closeCache");
     vm1.invoke(HARegionQueueDUnitTest.class, "closeCache");
     vm2.invoke(HARegionQueueDUnitTest.class, "closeCache");
@@ -294,7 +296,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 60 * 1000, 200, true);
       }
     });
 
@@ -403,7 +405,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 60 * 1000, 200, true);
 
       /*
        * if (region.get(new Long(0)) != null) { fail("Expected message to have
@@ -444,7 +446,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.put()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
     }
   }
 
@@ -460,7 +462,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.put()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
     }
   }
 
@@ -554,7 +556,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.put()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
     }
   }
 
@@ -570,7 +572,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.put()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.put()", ex);
     }
   }
 
@@ -589,7 +591,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.get()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
     }
   }
 
@@ -608,7 +610,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.get()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
     }
   }
 
@@ -627,7 +629,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      fail("failed while region.get()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.get()", ex);
     }
   }
 
@@ -851,7 +853,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
           if (opThreads[i].isInterrupted()) {
             fail("Test failed because  thread encountered exception");
           }
-          DistributedTestCase.join(opThreads[i], 30 * 1000, getLogWriter());
+          ThreadUtils.join(opThreads[i], 30 * 1000);
         }
       }
     };
@@ -965,8 +967,8 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
-        DistributedTestCase.join(createQueuesThread, 300 * 1000, getLogWriter());
+        Wait.waitForCriterion(ev, 30 * 1000, 200, true);
+        ThreadUtils.join(createQueuesThread, 300 * 1000);
       }
     };
 
@@ -980,7 +982,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
         if (opThreads[0].isInterrupted()) {
           fail("The test has failed as it encountered interrupts in puts & takes");
         }
-        DistributedTestCase.join(opThreads[0], 30 * 1000, getLogWriter());
+        ThreadUtils.join(opThreads[0], 30 * 1000);
       }
     };
     vm0.invoke(joinWithThread);
@@ -1106,7 +1108,7 @@ public class HARegionQueueDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     // assertEquals(0, hrq.getAvalaibleIds().size());
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueJUnitTest.java
index f73a5a6..48da630 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HARegionQueueJUnitTest.java
@@ -54,7 +54,7 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.Conflatable;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.cache.RegionQueue;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -292,7 +292,7 @@ public class HARegionQueueJUnitTest
     // call join on the put-threads so that this thread waits till they complete
     // before doing verfication
     for (i = 0; i < TOTAL_PUT_THREADS; i++) {
-      DistributedTestCase.join(putThreads[i], 30 * 1000, null);
+      ThreadUtils.join(putThreads[i], 30 * 1000);
     }
     assertFalse(encounteredException);
 
@@ -384,7 +384,7 @@ public class HARegionQueueJUnitTest
 
       long startTime = System.currentTimeMillis();
       for (int k = 0; k < threads.length; k++) {
-        DistributedTestCase.join(threads[k], 60 * 1000, null);
+        ThreadUtils.join(threads[k], 60 * 1000);
       }
 
       long totalTime = System.currentTimeMillis() - startTime;
@@ -1252,8 +1252,8 @@ public class HARegionQueueJUnitTest
       };
       thread1.start();
       thread2.start();
-      DistributedTestCase.join(thread1, 30 * 1000, null);
-      DistributedTestCase.join(thread2, 30 * 1000, null);
+      ThreadUtils.join(thread1, 30 * 1000);
+      ThreadUtils.join(thread2, 30 * 1000);
       List list2 = HARegionQueue.createMessageListForTesting();
       Iterator iterator = list1.iterator();
       boolean doOnce = false;
@@ -1363,8 +1363,8 @@ public class HARegionQueueJUnitTest
       };
       thread1.start();
       thread2.start();
-      DistributedTestCase.join(thread1, 30 * 1000, null);
-      DistributedTestCase.join(thread2, 30 * 1000, null);
+      ThreadUtils.join(thread1, 30 * 1000);
+      ThreadUtils.join(thread2, 30 * 1000);
       List list2 = HARegionQueue.createMessageListForTesting();
       Iterator iterator = list1.iterator();
       boolean doOnce = false;
@@ -1483,8 +1483,8 @@ public class HARegionQueueJUnitTest
       };
       thread1.start();
       thread2.start();
-      DistributedTestCase.join(thread1, 30 * 1000, null);
-      DistributedTestCase.join(thread2, 30 * 1000, null);
+      ThreadUtils.join(thread1, 30 * 1000);
+      ThreadUtils.join(thread2, 30 * 1000);
       List list2 = HARegionQueue.createMessageListForTesting();
       Iterator iterator = list1.iterator();
       boolean doOnce = true;
@@ -1649,8 +1649,8 @@ public class HARegionQueueJUnitTest
       };
       thread1.start();
       thread2.start();
-      DistributedTestCase.join(thread1, 30 * 1000, null);
-      DistributedTestCase.join(thread2, 30 * 1000, null);
+      ThreadUtils.join(thread1, 30 * 1000);
+      ThreadUtils.join(thread2, 30 * 1000);
       List list2 = HARegionQueue.createMessageListForTesting();
       Iterator iterator = list1.iterator();
       boolean doOnce = true;
@@ -1769,7 +1769,7 @@ public class HARegionQueueJUnitTest
 
       long startTime = System.currentTimeMillis();
       for (int k = 0; k < 3; k++) {
-        DistributedTestCase.join(threads[k], 180 * 1000, null);
+        ThreadUtils.join(threads[k], 180 * 1000);
       }
 
       long totalTime = System.currentTimeMillis() - startTime;
@@ -1848,7 +1848,7 @@ public class HARegionQueueJUnitTest
 
       long startTime = System.currentTimeMillis();
       for (int k = 0; k < 3; k++) {
-        DistributedTestCase.join(threads[k], 60 * 1000, null);
+        ThreadUtils.join(threads[k], 60 * 1000);
       }
 
       long totalTime = System.currentTimeMillis() - startTime;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HASlowReceiverDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HASlowReceiverDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HASlowReceiverDUnitTest.java
index 90fc817..6b805ad 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HASlowReceiverDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HASlowReceiverDUnitTest.java
@@ -37,9 +37,14 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class HASlowReceiverDUnitTest extends DistributedTestCase {
   protected static Cache cache = null;
@@ -86,8 +91,7 @@ public class HASlowReceiverDUnitTest extends DistributedTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void preTearDown() throws Exception {
     clientVM.invoke(HASlowReceiverDUnitTest.class, "closeCache");
 
     // then close the servers
@@ -196,7 +200,7 @@ public class HASlowReceiverDUnitTest extends DistributedTestCase {
       r.registerInterest("ALL_KEYS");
     }
     catch (Exception ex) {
-      fail("failed in registerInterestListAll", ex);
+      Assert.fail("failed in registerInterestListAll", ex);
     }
   }
 
@@ -211,7 +215,7 @@ public class HASlowReceiverDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception ex) {
-      fail("failed in putEntries()", ex);
+      Assert.fail("failed in putEntries()", ex);
     }
   }
 
@@ -224,7 +228,7 @@ public class HASlowReceiverDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception ex) {
-      fail("failed in createEntries(Long)", ex);
+      Assert.fail("failed in createEntries(Long)", ex);
     }
   }
 
@@ -239,20 +243,20 @@ public class HASlowReceiverDUnitTest extends DistributedTestCase {
             + ") to become " + redundantServers.intValue();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 200 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 200 * 1000, 1000, true);
   }
 
   // Test slow client
   public void testSlowClient() throws Exception {
     setBridgeObeserverForAfterQueueDestroyMessage();
     clientVM.invoke(HASlowReceiverDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(Host.getHost(0)), new Integer(PORT0),
+        new Object[] { NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT0),
             new Integer(PORT1), new Integer(PORT2), new Integer(2) });
     clientVM.invoke(HASlowReceiverDUnitTest.class, "registerInterest");
     // add expected socket exception string
-    final ExpectedException ex1 = addExpectedException(SocketException.class
+    final IgnoredException ex1 = IgnoredException.addIgnoredException(SocketException.class
         .getName());
-    final ExpectedException ex2 = addExpectedException(InterruptedException.class
+    final IgnoredException ex2 = IgnoredException.addIgnoredException(InterruptedException.class
         .getName());
     putEntries();
     Thread.sleep(20000);// wait for put to block and allow server to remove

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/OperationsPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/OperationsPropagationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/OperationsPropagationDUnitTest.java
index de0979e..154f9cc 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/OperationsPropagationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/OperationsPropagationDUnitTest.java
@@ -33,9 +33,13 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  *
@@ -114,20 +118,18 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
     PORT2 = ((Integer)server2.invoke(OperationsPropagationDUnitTest.class,
         "createServerCache")).intValue();
     client1.invoke(OperationsPropagationDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(host), new Integer(PORT2) });
+        new Object[] { NetworkUtils.getServerHostName(host), new Integer(PORT2) });
 
   }
 
   /**
    * close the caches of the client and the servers
    */
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     client1.invoke(OperationsPropagationDUnitTest.class, "closeCache");
     server1.invoke(OperationsPropagationDUnitTest.class, "closeCache");
     server2.invoke(OperationsPropagationDUnitTest.class, "closeCache");
-
   }
 
   /**
@@ -282,7 +284,7 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
       region.put(DESTROY_KEY, DESTROY_VALUE);
     }
     catch (Exception e) {
-      fail(" Test failed due to " + e, e);
+      Assert.fail(" Test failed due to " + e, e);
     }
 
   }
@@ -304,7 +306,7 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
       region.putAll(map);
     }
     catch (Exception e) {
-      fail(" Test failed due to " + e, e);
+      Assert.fail(" Test failed due to " + e, e);
     }
 
   }
@@ -326,7 +328,7 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       /*
        * if (!(region.get(UPDATE_KEY).equals(UPDATE_VALUE1))) { fail(" Expected
@@ -343,7 +345,7 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       /*
        * if (!(region.get(INVALIDATE_KEY).equals(INVALIDATE_VALUE))) { fail("
@@ -360,7 +362,7 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       /*
        * if (!(region.get(DESTROY_KEY).equals(DESTROY_VALUE))) { fail(" Expected
@@ -370,7 +372,7 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
 
     }
     catch (Exception e) {
-      fail(" Test failed due to " + e, e);
+      Assert.fail(" Test failed due to " + e, e);
     }
 
   }
@@ -394,7 +396,7 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       /*if (!(region.get(CREATE_KEY).equals(CREATE_VALUE))) {
        fail("CREATE operation did not propagate to client : Expected value to be "
@@ -410,7 +412,7 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       /*if (!(region.get(UPDATE_KEY).equals(UPDATE_VALUE2))) {
        fail(" UPDATE operation did not propagate to Client : Expected value to be "
@@ -425,7 +427,7 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
 
       /*if (region.containsKey(DESTROY_KEY)) {
@@ -442,7 +444,7 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       /*if (!(region.get(INVALIDATE_KEY) == null)) {
        fail(" INVALIDATE operation did not propagate to Client : Expected value to be null but it is "
@@ -458,7 +460,7 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       /*
        * if (!(region.get(PUTALL_KEY).equals(PUTALL_VALUE))) { fail("PUTALL
@@ -475,10 +477,10 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
     }
     catch (Exception e) {
-      fail(" Test failed due to " + e, e);
+      Assert.fail(" Test failed due to " + e, e);
     }
   }
   
@@ -497,6 +499,6 @@ public class OperationsPropagationDUnitTest extends DistributedTestCase
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/PutAllDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/PutAllDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/PutAllDUnitTest.java
index ef82f7a..ef75a38 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/PutAllDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/PutAllDUnitTest.java
@@ -44,6 +44,8 @@ import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -100,9 +102,8 @@ public class PutAllDUnitTest extends DistributedTestCase
   }
 
   /** close the caches**/
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     client1.invoke(PutAllDUnitTest.class, "closeCache");
     client2.invoke(PutAllDUnitTest.class, "closeCache");
     // close server
@@ -134,11 +135,11 @@ public class PutAllDUnitTest extends DistributedTestCase
     PORT2 = ((Integer)server2.invoke(PutAllDUnitTest.class,
     "createServerCache")).intValue();
     client1.invoke(PutAllDUnitTest.class, "createClientCache1",
-        new Object[] { getServerHostName(server1.getHost()), new Integer(PORT1) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1) });
     client2.invoke(PutAllDUnitTest.class, "createClientCache2",
-        new Object[] { getServerHostName(server1.getHost()), new Integer(PORT2) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT2) });
     try {
-      createClientCache2(getServerHostName(server1.getHost()), new Integer(PORT2));
+      createClientCache2(NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT2));
     }
     catch (Exception e) {
      fail(" test failed due to "+e);
@@ -548,7 +549,7 @@ public class PutAllDUnitTest extends DistributedTestCase
 
     public void afterCreate(EntryEvent event)
     {
-      getLogWriter().fine(" entered after created with "+event.getKey());
+      LogWriterUtils.getLogWriter().fine(" entered after created with "+event.getKey());
       boolean shouldNotify = false;
       Object key = event.getKey();
       if (key.equals(PUTALL_KEY1)) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitTest.java
index 788692f..dd8cf87 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/StatsBugDUnitTest.java
@@ -36,7 +36,10 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 /**
@@ -150,9 +153,8 @@ public class StatsBugDUnitTest extends DistributedTestCase
    * @throws Exception
    *           thrown if any problem occurs in closing cache
    */
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close client
     client1.invoke(StatsBugDUnitTest.class, "closeCache");
 
@@ -176,13 +178,13 @@ public class StatsBugDUnitTest extends DistributedTestCase
    */
   public void testBug36109() throws Exception
   {
-    getLogWriter().info("testBug36109 : BEGIN");
+    LogWriterUtils.getLogWriter().info("testBug36109 : BEGIN");
     client1.invoke(StatsBugDUnitTest.class, "createClientCacheForInvalidates", new Object[] {
-        getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2) });
+        NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2) });
     client1.invoke(StatsBugDUnitTest.class, "prepopulateClient");
     primary.invoke(StatsBugDUnitTest.class, "doEntryOperations",
         new Object[] { primaryPrefix });
-    pause(3000);
+    Wait.pause(3000);
     primary.invoke(StatsBugDUnitTest.class, "stopServer");
     try {
       Thread.sleep(5000);
@@ -201,7 +203,7 @@ public class StatsBugDUnitTest extends DistributedTestCase
     }
 
     client1.invoke(StatsBugDUnitTest.class, "verifyNumInvalidates");
-    getLogWriter().info("testBug36109 : END");
+    LogWriterUtils.getLogWriter().info("testBug36109 : END");
   }
 
   /**
@@ -229,7 +231,7 @@ public class StatsBugDUnitTest extends DistributedTestCase
     server.setNotifyBySubscription(false);
     server.setSocketBufferSize(32768);
     server.start();
-    getLogWriter().info("Server started at PORT = " + port);
+    LogWriterUtils.getLogWriter().info("Server started at PORT = " + port);
     return new Integer(port);
   }
 
@@ -254,7 +256,7 @@ public class StatsBugDUnitTest extends DistributedTestCase
     RegionAttributes attrs = factory.create();
     Region region = cache.createRegion(REGION_NAME, attrs);
     region.registerInterest("ALL_KEYS");
-    getLogWriter().info("Client cache created");
+    LogWriterUtils.getLogWriter().info("Client cache created");
   }
 
   /**
@@ -278,7 +280,7 @@ public class StatsBugDUnitTest extends DistributedTestCase
     RegionAttributes attrs = factory.create();
     Region region = cache.createRegion(REGION_NAME, attrs);
     region.registerInterest("ALL_KEYS", false, false);
-    getLogWriter().info("Client cache created");
+    LogWriterUtils.getLogWriter().info("Client cache created");
   }
   
   /**
@@ -289,11 +291,11 @@ public class StatsBugDUnitTest extends DistributedTestCase
   public static void verifyNumInvalidates()
   {
     long invalidatesRecordedByStats = pool.getInvalidateCount();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "invalidatesRecordedByStats = " + invalidatesRecordedByStats);
 
     int expectedInvalidates = TOTAL_SERVERS * PUTS_PER_SERVER;
-    getLogWriter().info("expectedInvalidates = " + expectedInvalidates);
+    LogWriterUtils.getLogWriter().info("expectedInvalidates = " + expectedInvalidates);
 
     if (invalidatesRecordedByStats != expectedInvalidates) {
       fail("Invalidates received by client(" + invalidatesRecordedByStats


[20/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
index ce0f422..ff918b8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/AsyncEventQueueTestBase.java
@@ -75,9 +75,15 @@ import com.gemstone.gemfire.internal.cache.ForceReattemptException;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.RegionQueue;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class AsyncEventQueueTestBase extends DistributedTestCase {
 
@@ -132,7 +138,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
     // this is done to vary the number of dispatchers for sender
     // during every test method run
     shuffleNumDispatcherThreads();
-    invokeInEveryVM(AsyncEventQueueTestBase.class,
+    Invoke.invokeInEveryVM(AsyncEventQueueTestBase.class,
         "setNumDispatcherThreadsForTheRun",
         new Object[] { dispatcherThreads.get(0) });
   }
@@ -149,7 +155,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
     if (Locator.hasLocator()) {
       Locator.getLocator().stop();
     }
-    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
@@ -163,7 +169,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
   }
 
   public static Integer createFirstRemoteLocator(int dsId, int remoteLocPort) {
-    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
@@ -180,7 +186,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
 
   public static void createReplicatedRegionWithAsyncEventQueue(
       String regionName, String asyncQueueIds, Boolean offHeap) {
-    ExpectedException exp1 = addExpectedException(ForceReattemptException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
     try {
       AttributesFactory fact = new AttributesFactory();
@@ -223,7 +229,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
 
   public static void createReplicatedRegionWithSenderAndAsyncEventQueue(
       String regionName, String senderIds, String asyncChannelId, Boolean offHeap) {
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
     try {
 
@@ -367,7 +373,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
       Integer batchSize, boolean isConflation, boolean isPersistent,
       String diskStoreName, boolean isDiskSynchronous, int nDispatchers) {
 
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
 
     try {
@@ -443,7 +449,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
     else {
       persistentDirectory = new File(diskStoreName);
     }
-    getLogWriter().info("The ds is : " + persistentDirectory.getName());
+    LogWriterUtils.getLogWriter().info("The ds is : " + persistentDirectory.getName());
     persistentDirectory.mkdir();
     DiskStoreFactory dsf = cache.createDiskStoreFactory();
     File[] dirs1 = new File[] { persistentDirectory };
@@ -566,7 +572,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
       final Set<RegionQueue> queues = ((AbstractGatewaySender)sender)
           .getQueues();
 
-      waitForCriterion(new WaitCriterion() {
+      Wait.waitForCriterion(new WaitCriterion() {
 
         public String description() {
           return "Waiting for EventQueue size to be " + numQueueEntries;
@@ -590,9 +596,9 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
 
   public static void createPartitionedRegion(String regionName,
       String senderIds, Integer redundantCopies, Integer totalNumBuckets) {
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(PartitionOfflineException.class
         .getName());
     try {
       AttributesFactory fact = new AttributesFactory();
@@ -621,9 +627,9 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
 
   public static void createPartitionedRegionWithAsyncEventQueue(
       String regionName, String asyncEventQueueId, Boolean offHeap) {
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(PartitionOfflineException.class
         .getName());
     try {
       AttributesFactory fact = new AttributesFactory();
@@ -646,9 +652,9 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
       String regionName, String asyncEventQueueId, Integer totalNumBuckets,
       String colocatedWith) {
 
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(PartitionOfflineException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(PartitionOfflineException.class
         .getName());
     try {
       AttributesFactory fact = new AttributesFactory();
@@ -687,7 +693,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
    */
   public static void createPRWithRedundantCopyWithAsyncEventQueue(
       String regionName, String asyncEventQueueId, Boolean offHeap) {
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
 
     try {
@@ -721,7 +727,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
   }
 
   protected static void createCache(Integer locPort) {
-    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort
@@ -731,7 +737,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
   }
 
   public static void createCacheWithoutLocator(Integer mCastPort) {
-    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "" + mCastPort);
     InternalDistributedSystem ds = test.getSystem(props);
@@ -873,7 +879,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
         return "Expected sender primary state to be true but is false";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10000, 1000, true);
+    Wait.waitForCriterion(wc, 10000, 1000, true);
   }
 
   private static GatewaySender getGatewaySenderById(Set<GatewaySender> senders,
@@ -891,7 +897,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
       boolean isParallel, Integer maxMemory, Integer batchSize,
       boolean isConflation, boolean isPersistent, GatewayEventFilter filter,
       boolean isManulaStart) {
-    final ExpectedException exln = addExpectedException("Could not connect");
+    final IgnoredException exln = IgnoredException.addIgnoredException("Could not connect");
     try {
       File persistentDirectory = new File(dsName + "_disk_"
           + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
@@ -966,11 +972,11 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
         return "Expected to wait for " + millisec + " millisec.";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, millisec, 500, false);
+    Wait.waitForCriterion(wc, millisec, 500, false);
   }
 
   public static int createReceiver(int locPort) {
-    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort
@@ -1016,25 +1022,25 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
     try {
       RebalanceResults simulateResults = null;
       if (!heapEviction) {
-        getLogWriter().info("Calling rebalance simulate");
+        LogWriterUtils.getLogWriter().info("Calling rebalance simulate");
         RebalanceOperation simulateOp = factory.simulate();
         simulateResults = simulateOp.getResults();
       }
 
-      getLogWriter().info("Starting rebalancing");
+      LogWriterUtils.getLogWriter().info("Starting rebalancing");
       RebalanceOperation rebalanceOp = factory.start();
       RebalanceResults rebalanceResults = rebalanceOp.getResults();
 
     }
     catch (InterruptedException e) {
-      fail("Interrupted", e);
+      Assert.fail("Interrupted", e);
     }
   }
 
   public static void doPuts(String regionName, int numPuts) {
-    ExpectedException exp1 = addExpectedException(InterruptedException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(InterruptedException.class
         .getName());
-    ExpectedException exp2 = addExpectedException(GatewaySenderException.class
+    IgnoredException exp2 = IgnoredException.addIgnoredException(GatewaySenderException.class
         .getName());
     try {
       Region r = cache.getRegion(Region.SEPARATOR + regionName);
@@ -1094,7 +1100,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
 
   public static void doNextPuts(String regionName, int start, int numPuts) {
     // waitForSitesToUpdate();
-    ExpectedException exp = addExpectedException(CacheClosedException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(CacheClosedException.class
         .getName());
     try {
       Region r = cache.getRegion(Region.SEPARATOR + regionName);
@@ -1109,9 +1115,9 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
   }
 
   public static void validateRegionSize(String regionName, final int regionSize) {
-    ExpectedException exp = addExpectedException(ForceReattemptException.class
+    IgnoredException exp = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(CacheClosedException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(CacheClosedException.class
         .getName());
     try {
 
@@ -1131,7 +1137,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
               + " present region keyset " + r.keySet();
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 240000, 500, true);
+      Wait.waitForCriterion(wc, 240000, 500, true);
     }
     finally {
       exp.remove();
@@ -1232,7 +1238,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
             + " but actual entries: " + eventsMap.size();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 500, true); // TODO:Yogs
+    Wait.waitForCriterion(wc, 60000, 500, true); // TODO:Yogs
   }
 
   public static void validateAsyncEventForOperationDetail(String asyncQueueId,
@@ -1263,7 +1269,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
             + " but actual entries: " + eventsMap.size();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 500, true); // TODO:Yogs
+    Wait.waitForCriterion(wc, 60000, 500, true); // TODO:Yogs
     Collection values = eventsMap.values();
     Iterator itr = values.iterator();
     while (itr.hasNext()) {
@@ -1302,7 +1308,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
             + " but actual entries: " + eventsMap.size();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 500, true); // TODO:Yogs
+    Wait.waitForCriterion(wc, 60000, 500, true); // TODO:Yogs
 
     Iterator<AsyncEvent> itr = eventsMap.values().iterator();
     while (itr.hasNext()) {
@@ -1350,7 +1356,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
               + size;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60000, 500, true);
+      Wait.waitForCriterion(wc, 60000, 500, true);
 
     }
     else {
@@ -1377,7 +1383,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
               + size;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60000, 500, true);
+      Wait.waitForCriterion(wc, 60000, 500, true);
     }
   }
 
@@ -1400,7 +1406,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
     for (int bucketId : bucketIds) {
       List<GatewaySenderEventImpl> eventsForBucket = bucketToEventsMap
           .get(bucketId);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Events for bucket: " + bucketId + " is " + eventsForBucket);
       assertNotNull(eventsForBucket);
       for (int i = 0; i < batchSize; i++) {
@@ -1422,7 +1428,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
 
     final Map eventsMap = ((MyAsyncEventListener)theListener).getEventsMap();
     assertNotNull(eventsMap);
-    getLogWriter().info("The events map size is " + eventsMap.size());
+    LogWriterUtils.getLogWriter().info("The events map size is " + eventsMap.size());
     return eventsMap.size();
   }
 
@@ -1467,10 +1473,10 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
   }
 
   public static Boolean killSender(String senderId) {
-    final ExpectedException exln = addExpectedException("Could not connect");
-    ExpectedException exp = addExpectedException(CacheClosedException.class
+    final IgnoredException exln = IgnoredException.addIgnoredException("Could not connect");
+    IgnoredException exp = IgnoredException.addIgnoredException(CacheClosedException.class
         .getName());
-    ExpectedException exp1 = addExpectedException(ForceReattemptException.class
+    IgnoredException exp1 = IgnoredException.addIgnoredException(ForceReattemptException.class
         .getName());
     try {
       Set<GatewaySender> senders = cache.getGatewaySenders();
@@ -1482,7 +1488,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
         }
       }
       if (sender.isPrimary()) {
-        getLogWriter().info("Gateway sender is killed by a test");
+        LogWriterUtils.getLogWriter().info("Gateway sender is killed by a test");
         cache.getDistributedSystem().disconnect();
         return Boolean.TRUE;
       }
@@ -1505,7 +1511,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
       }
     }
     if (queue.isPrimary()) {
-      getLogWriter().info("AsyncEventQueue is killed by a test");
+      LogWriterUtils.getLogWriter().info("AsyncEventQueue is killed by a test");
       cache.getDistributedSystem().disconnect();
       return Boolean.TRUE;
     }
@@ -1513,10 +1519,10 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
   }
 
   public static void killSender() {
-    getLogWriter().info("Gateway sender is going to be killed by a test");
+    LogWriterUtils.getLogWriter().info("Gateway sender is going to be killed by a test");
     cache.close();
     cache.getDistributedSystem().disconnect();
-    getLogWriter().info("Gateway sender is killed by a test");
+    LogWriterUtils.getLogWriter().info("Gateway sender is killed by a test");
   }
 
   public static class MyLocatorCallback extends LocatorDiscoveryCallbackAdapter {
@@ -1565,8 +1571,8 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
     }
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDown() throws Exception {
     cleanupVM();
     vm0.invoke(AsyncEventQueueTestBase.class, "cleanupVM");
     vm1.invoke(AsyncEventQueueTestBase.class, "cleanupVM");
@@ -1589,7 +1595,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
       cache = null;
     }
     else {
-      AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+      AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(getTestMethodName());
       if (test.isConnectedToDS()) {
         test.getSystem().disconnect();
       }
@@ -1597,7 +1603,7 @@ public class AsyncEventQueueTestBase extends DistributedTestCase {
   }
 
   public static void shutdownLocator() {
-    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(getTestMethodName());
     test.getSystem().disconnect();
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
index e696248..02ed4ef 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/wan/asyncqueue/AsyncEventListenerDUnitTest.java
@@ -32,6 +32,8 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.wan.AsyncEventQueueTestBase;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
 
@@ -50,7 +52,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
    * is passed.
    */
   public void testCreateAsyncEventQueueWithNullListener() {
-    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(testName);
+    AsyncEventQueueTestBase test = new AsyncEventQueueTestBase(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     InternalDistributedSystem ds = test.getSystem(props);
@@ -100,13 +102,13 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, false, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -120,9 +122,9 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm7
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
-    pause(1000);// pause at least for the batchTimeInterval
+    Wait.pause(1000);// pause at least for the batchTimeInterval
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
 
     int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class,
@@ -152,13 +154,13 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 150, true, false, null, false, 2, OrderPolicy.KEY });
 
 	vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-		new Object[] { testName + "_RR", "ln", isOffHeap() });
+		new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 	vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-		new Object[] { testName + "_RR", "ln", isOffHeap() });
+		new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 	vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-	    new Object[] { testName + "_RR", "ln", isOffHeap() });
+	    new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 	vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-		new Object[] { testName + "_RR", "ln", isOffHeap() });
+		new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
 	vm4
 	  .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -167,9 +169,9 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
 	  .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
 		new Object[] { "ln" });
 
-	pause(1000);// pause at least for the batchTimeInterval
+	Wait.pause(1000);// pause at least for the batchTimeInterval
 
-	vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+	vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
 		1000 });
 
 	int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class,
@@ -206,15 +208,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, false, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
@@ -250,15 +252,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithCacheLoaderAndAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln" });
+        new Object[] { getTestMethodName() + "_RR", "ln" });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithCacheLoaderAndAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln" });
+        new Object[] { getTestMethodName() + "_RR", "ln" });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithCacheLoaderAndAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln" });
+        new Object[] { getTestMethodName() + "_RR", "ln" });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithCacheLoaderAndAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln" });
+        new Object[] { getTestMethodName() + "_RR", "ln" });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doGets", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doGets", new Object[] { getTestMethodName() + "_RR",
         10 });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventForOperationDetail",
@@ -302,13 +304,13 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, false, false, null, false, 1 });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -322,9 +324,9 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm7
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
-    pause(2000);// pause at least for the batchTimeInterval
+    Wait.pause(2000);// pause at least for the batchTimeInterval
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         100 });
     
     vm4.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
@@ -371,13 +373,13 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, true, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -391,7 +393,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm7
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
-    pause(1000);// pause at least for the batchTimeInterval
+    Wait.pause(1000);// pause at least for the batchTimeInterval
 
     final Map keyValues = new HashMap();
     final Map updateKeyValues = new HashMap();
@@ -400,9 +402,9 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     }
 
     vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-        testName + "_RR", keyValues });
+        getTestMethodName() + "_RR", keyValues });
 
-    pause(1000);
+    Wait.pause(1000);
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
         "ln", keyValues.size() });
 
@@ -413,7 +415,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     // Put the update events and check the queue size.
     // There should be no conflation with the previous create events.
     vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-        testName + "_RR", updateKeyValues });
+        getTestMethodName() + "_RR", updateKeyValues });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
         "ln", keyValues.size() + updateKeyValues.size() });
@@ -421,7 +423,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     // Put the update events again and check the queue size.
     // There should be conflation with the previous update events.
     vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-        testName + "_RR", updateKeyValues });
+        getTestMethodName() + "_RR", updateKeyValues });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
         "ln", keyValues.size() + updateKeyValues.size() });
@@ -477,15 +479,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, false, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
@@ -526,15 +528,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, true, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
         new Object[] { "ln", 1000 });// primary sender
@@ -570,13 +572,13 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
             100, true, null });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
     // pause async channel and then do the puts
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
 
     // ------------------ KILL VM4 AND REBUILD
@@ -587,7 +589,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithDiskStore",
         new Object[] { "ln", false, 100, 100, true, firstDStore });
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     // -----------------------------------------------------------------------------------
 
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
@@ -619,16 +621,16 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         new Object[] { "ln", false, 100, 100, true, null });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm4.invoke(AsyncEventQueueTestBase.class, "addCacheListenerAndCloseCache",
-        new Object[] { testName + "_RR" });
+        new Object[] { getTestMethodName() + "_RR" });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
     vm5.invoke(AsyncEventQueueTestBase.class, "doPuts",
-        new Object[] { testName + "_RR", 2000 });
+        new Object[] { getTestMethodName() + "_RR", 2000 });
 
     // -----------------------------------------------------------------------------------
     vm5.invoke(AsyncEventQueueTestBase.class, "waitForSenderToBecomePrimary",
@@ -643,8 +645,8 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     int vm5size = (Integer)vm5.invoke(AsyncEventQueueTestBase.class,
         "getAsyncEventListenerMapSize", new Object[] { "ln" });
 
-    getLogWriter().info("vm4 size is: " + vm4size);
-    getLogWriter().info("vm5 size is: " + vm5size);
+    LogWriterUtils.getLogWriter().info("vm4 size is: " + vm4size);
+    LogWriterUtils.getLogWriter().info("vm5 size is: " + vm5size);
     // verify that there is no event loss
     assertTrue(
         "Total number of entries in events map on vm4 and vm5 should be at least 2000",
@@ -678,15 +680,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, true, false, null, false, 3, OrderPolicy.KEY });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
         new Object[] {"ln", 1000 });// primary sender
@@ -725,19 +727,19 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, true, false, null, false, 3, OrderPolicy.THREAD });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
-    vm4.invokeAsync(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invokeAsync(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         500 });
-    vm4.invokeAsync(AsyncEventQueueTestBase.class, "doNextPuts", new Object[] { testName + "_RR",
+    vm4.invokeAsync(AsyncEventQueueTestBase.class, "doNextPuts", new Object[] { getTestMethodName() + "_RR",
       500, 1000 });
-    vm4.invokeAsync(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invokeAsync(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
       1000, 1500 });
     
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
@@ -773,15 +775,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, true, false, null, false, 3, null });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
         new Object[] {"ln", 1000 });// primary sender
@@ -818,18 +820,18 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, false, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
         500 });
     vm5.invoke(AsyncEventQueueTestBase.class, "doPutsFrom", new Object[] {
-        testName + "_PR", 500, 1000 });
+        getTestMethodName() + "_PR", 500, 1000 });
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
         new Object[] { "ln", 1000 });// primary sender
     vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
@@ -865,13 +867,13 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, true, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -886,7 +888,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
     
-    pause(2000);
+    Wait.pause(2000);
 
     final Map keyValues = new HashMap();
     final Map updateKeyValues = new HashMap();
@@ -895,7 +897,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     }
 
     vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-        testName + "_PR", keyValues });
+        getTestMethodName() + "_PR", keyValues });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
         "ln", keyValues.size() });
@@ -907,7 +909,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     // Put the update events and check the queue size.
     // There should be no conflation with the previous create events.
     vm5.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-        testName + "_PR", updateKeyValues });
+        getTestMethodName() + "_PR", updateKeyValues });
 
     vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
         "ln", keyValues.size() + updateKeyValues.size() });
@@ -915,7 +917,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     // Put the update events again and check the queue size.
     // There should be conflation with the previous update events.
     vm5.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-      testName + "_PR", updateKeyValues });
+      getTestMethodName() + "_PR", updateKeyValues });
 
     vm5.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
       "ln", keyValues.size() + updateKeyValues.size() });
@@ -966,18 +968,18 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         false, 100, 100, false, true, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
         500 });
     vm5.invoke(AsyncEventQueueTestBase.class, "doPutsFrom", new Object[] {
-        testName + "_PR", 500, 1000 });
+        getTestMethodName() + "_PR", 500, 1000 });
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
         new Object[] { "ln", 1000 });// primary sender
     vm5.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
@@ -1010,14 +1012,14 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
             100, true, null });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
     // pause async channel and then do the puts
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueueAndWaitForDispatcherToPause",
             new Object[] { "ln" });
   
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
         1000 });
 
     // ------------------ KILL VM4 AND REBUILD
@@ -1028,7 +1030,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithDiskStore",
         new Object[] { "ln", false, 100, 100, true, firstDStore });
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     // -----------------------------------------------------------------------------------
 
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventListener",
@@ -1056,7 +1058,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
 
       vm4.invoke(AsyncEventQueueTestBase.class,
           "createReplicatedRegionWithAsyncEventQueue", new Object[] {
-              testName + "_RR", "ln", isOffHeap() });
+              getTestMethodName() + "_RR", "ln", isOffHeap() });
       fail("Expected GatewaySenderConfigException where parallel async event queue can not be used with replicated region");
     }
     catch (Exception e) {
@@ -1086,15 +1088,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         true, 100, 100, false, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
         256 });
     
     vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
@@ -1141,15 +1143,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     	true, 100, 100, false, false, null, false, "MyAsyncEventListener_CacheLoader" });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithCacheLoaderAndAsyncQueue",
-    	new Object[] { testName + "_PR", "ln" });
+    	new Object[] { getTestMethodName() + "_PR", "ln" });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithCacheLoaderAndAsyncQueue",
-    	new Object[] { testName + "_PR", "ln" });
+    	new Object[] { getTestMethodName() + "_PR", "ln" });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithCacheLoaderAndAsyncQueue",
-    	new Object[] { testName + "_PR", "ln" });
+    	new Object[] { getTestMethodName() + "_PR", "ln" });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithCacheLoaderAndAsyncQueue",
-    	new Object[] { testName + "_PR", "ln" });
+    	new Object[] { getTestMethodName() + "_PR", "ln" });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPutAll", new Object[] { testName + "_PR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPutAll", new Object[] { getTestMethodName() + "_PR",
     	100, 10 });
     vm4.invoke(AsyncEventQueueTestBase.class, "validateAsyncEventForOperationDetail",
     	new Object[] { "ln", 250, false, true });
@@ -1180,13 +1182,13 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         true, 100, 100, false, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -1200,9 +1202,9 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm7
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
-    pause(1000);// pause at least for the batchTimeInterval
+    Wait.pause(1000);// pause at least for the batchTimeInterval
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
         1000 });
 
     int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class,
@@ -1237,13 +1239,13 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
 	  true, 100, 100, false, false, null, false, 2, OrderPolicy.KEY });
 
 	vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-	  new Object[] { testName + "_PR", "ln", isOffHeap() });
+	  new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 	vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-	  new Object[] { testName + "_PR", "ln", isOffHeap() });
+	  new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 	vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-	  new Object[] { testName + "_PR", "ln", isOffHeap() });
+	  new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 	vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-	  new Object[] { testName + "_PR", "ln", isOffHeap() });
+	  new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
 	vm4
 	  .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -1257,9 +1259,9 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
 	vm7
 	  .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
 		new Object[] { "ln" });
-	pause(1000);// pause at least for the batchTimeInterval
+	Wait.pause(1000);// pause at least for the batchTimeInterval
 
-	vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+	vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
 	  1000 });
 
 	int vm4size = (Integer)vm4.invoke(AsyncEventQueueTestBase.class,
@@ -1290,13 +1292,13 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         true, 100, 100, true, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -1311,7 +1313,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
 
-    pause(2000);// pause for the batchTimeInterval to ensure that all the
+    Wait.pause(2000);// pause for the batchTimeInterval to ensure that all the
     // senders are paused
 
     final Map keyValues = new HashMap();
@@ -1321,7 +1323,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     }
 
     vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-        testName + "_PR", keyValues });
+        getTestMethodName() + "_PR", keyValues });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
         "ln", keyValues.size() });
@@ -1331,14 +1333,14 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     }
 
     vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-        testName + "_PR", updateKeyValues });
+        getTestMethodName() + "_PR", updateKeyValues });
 
  
     vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncEventQueueSize", new Object[] {
         "ln", keyValues.size() + updateKeyValues.size() }); // no conflation of creates
 
     vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-        testName + "_PR", updateKeyValues });
+        getTestMethodName() + "_PR", updateKeyValues });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncEventQueueSize", new Object[] {
         "ln", keyValues.size() + updateKeyValues.size() }); // conflation of updates
@@ -1395,13 +1397,13 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         true, 100, 100, true, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPRWithRedundantCopyWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPRWithRedundantCopyWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPRWithRedundantCopyWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPRWithRedundantCopyWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -1416,7 +1418,7 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
 
-    pause(2000);// pause for the batchTimeInterval to ensure that all the
+    Wait.pause(2000);// pause for the batchTimeInterval to ensure that all the
     // senders are paused
 
     final Map keyValues = new HashMap();
@@ -1426,9 +1428,9 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     }
 
     vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-        testName + "_PR", keyValues });
+        getTestMethodName() + "_PR", keyValues });
 
-    pause(2000);
+    Wait.pause(2000);
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
         "ln", keyValues.size() });
 
@@ -1437,13 +1439,13 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     }
 
     vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-        testName + "_PR", updateKeyValues });
+        getTestMethodName() + "_PR", updateKeyValues });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "putGivenKeyValue", new Object[] {
-        testName + "_PR", updateKeyValues });
+        getTestMethodName() + "_PR", updateKeyValues });
 
     // pause to ensure that events have been conflated.
-    pause(2000);
+    Wait.pause(2000);
     vm4.invoke(AsyncEventQueueTestBase.class, "checkAsyncEventQueueSize", new Object[] {
         "ln", keyValues.size() + updateKeyValues.size() });
 
@@ -1501,17 +1503,17 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
 
     vm3.invoke(AsyncEventQueueTestBase.class,
         "createPartitionedRegionAccessorWithAsyncEventQueue", new Object[] {
-            testName + "_PR", "ln" });
+            getTestMethodName() + "_PR", "ln" });
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
-    vm3.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm3.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
         256 });
     
     vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
@@ -1558,15 +1560,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         true, 100, 100, false, true, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_PR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_PR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_PR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_PR",
         256 });
     
     vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
@@ -1617,15 +1619,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         true, 100, 100, false, false, null, false });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm6.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
     vm7.invoke(AsyncEventQueueTestBase.class, "createReplicatedRegionWithAsyncEventQueue",
-        new Object[] { testName + "_RR", "ln", isOffHeap() });
+        new Object[] { getTestMethodName() + "_RR", "ln", isOffHeap() });
 
-    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { testName + "_RR",
+    vm4.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] { getTestMethodName() + "_RR",
         1000 });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",
@@ -1660,23 +1662,23 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
     vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
 
-    getLogWriter().info("Created the cache");
+    LogWriterUtils.getLogWriter().info("Created the cache");
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithListener2",
         new Object[] { "ln", true, 100, 5, false, null });
     vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithListener2",
         new Object[] { "ln", true, 100, 5, false, null });
 
-    getLogWriter().info("Created the AsyncEventQueue");
+    LogWriterUtils.getLogWriter().info("Created the AsyncEventQueue");
 
     vm4.invoke(AsyncEventQueueTestBase.class,
         "createPRWithRedundantCopyWithAsyncEventQueue", new Object[] {
-            testName + "_PR", "ln", isOffHeap() });
+            getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class,
         "createPRWithRedundantCopyWithAsyncEventQueue", new Object[] {
-            testName + "_PR", "ln", isOffHeap() });
+            getTestMethodName() + "_PR", "ln", isOffHeap() });
 
-    getLogWriter().info("Created PR with AsyncEventQueue");
+    LogWriterUtils.getLogWriter().info("Created PR with AsyncEventQueue");
 
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -1684,25 +1686,25 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm5
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
-    pause(1000);// pause for the batchTimeInterval to make sure the AsyncQueue
+    Wait.pause(1000);// pause for the batchTimeInterval to make sure the AsyncQueue
                 // is paused
 
-    getLogWriter().info("Paused the AsyncEventQueue");
+    LogWriterUtils.getLogWriter().info("Paused the AsyncEventQueue");
 
     vm4.invoke(AsyncEventQueueTestBase.class, "doPuts",
-        new Object[] { testName + "_PR", 80 });
+        new Object[] { getTestMethodName() + "_PR", 80 });
 
-    getLogWriter().info("Done puts");
+    LogWriterUtils.getLogWriter().info("Done puts");
 
     Set<Integer> primaryBucketsVm5 = (Set<Integer>)vm5.invoke(
         AsyncEventQueueTestBase.class, "getAllPrimaryBucketsOnTheNode",
-        new Object[] { testName + "_PR" });
+        new Object[] { getTestMethodName() + "_PR" });
 
-    getLogWriter().info("Primary buckets on vm5: " + primaryBucketsVm5);
+    LogWriterUtils.getLogWriter().info("Primary buckets on vm5: " + primaryBucketsVm5);
     // ---------------------------- Kill vm5 --------------------------
     vm5.invoke(AsyncEventQueueTestBase.class, "killSender", new Object[] {});
 
-    pause(1000);// give some time for rebalancing to happen
+    Wait.pause(1000);// give some time for rebalancing to happen
     vm4.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
         new Object[] { "ln" });
 
@@ -1726,23 +1728,23 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
     vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
 
-    getLogWriter().info("Created the cache");
+    LogWriterUtils.getLogWriter().info("Created the cache");
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithListener2",
         new Object[] { "ln", true, 100, 5, false, null });
     vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithListener2",
         new Object[] { "ln", true, 100, 5, false, null });
 
-    getLogWriter().info("Created the AsyncEventQueue");
+    LogWriterUtils.getLogWriter().info("Created the AsyncEventQueue");
 
     vm4.invoke(AsyncEventQueueTestBase.class,
         "createPRWithRedundantCopyWithAsyncEventQueue", new Object[] {
-            testName + "_PR", "ln", isOffHeap() });
+            getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class,
         "createPRWithRedundantCopyWithAsyncEventQueue", new Object[] {
-            testName + "_PR", "ln", isOffHeap() });
+            getTestMethodName() + "_PR", "ln", isOffHeap() });
 
-    getLogWriter().info("Created PR with AsyncEventQueue");
+    LogWriterUtils.getLogWriter().info("Created PR with AsyncEventQueue");
 
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -1750,21 +1752,21 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm5
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
-    pause(1000);// pause for the batchTimeInterval to make sure the AsyncQueue
+    Wait.pause(1000);// pause for the batchTimeInterval to make sure the AsyncQueue
                 // is paused
 
-    getLogWriter().info("Paused the AsyncEventQueue");
+    LogWriterUtils.getLogWriter().info("Paused the AsyncEventQueue");
 
     vm4.invoke(AsyncEventQueueTestBase.class, "doPuts",
-        new Object[] { testName + "_PR", 80 });
+        new Object[] { getTestMethodName() + "_PR", 80 });
 
-    getLogWriter().info("Done puts");
+    LogWriterUtils.getLogWriter().info("Done puts");
 
     Set<Integer> primaryBucketsVm5 = (Set<Integer>)vm5.invoke(
         AsyncEventQueueTestBase.class, "getAllPrimaryBucketsOnTheNode",
-        new Object[] { testName + "_PR" });
+        new Object[] { getTestMethodName() + "_PR" });
 
-    getLogWriter().info("Primary buckets on vm5: " + primaryBucketsVm5);
+    LogWriterUtils.getLogWriter().info("Primary buckets on vm5: " + primaryBucketsVm5);
     // ---------------------------- Kill vm5 --------------------------
     vm5.invoke(AsyncEventQueueTestBase.class, "killSender", new Object[] {});
     // ----------------------------------------------------------------
@@ -1775,14 +1777,14 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         new Object[] { "ln", true, 100, 5, false, null });
     vm6.invoke(AsyncEventQueueTestBase.class,
         "createPRWithRedundantCopyWithAsyncEventQueue", new Object[] {
-            testName + "_PR", "ln", isOffHeap() });
+            getTestMethodName() + "_PR", "ln", isOffHeap() });
 
     // ------------------------------------------------------------------
 
-    pause(1000);// give some time for rebalancing to happen
+    Wait.pause(1000);// give some time for rebalancing to happen
     Set<Integer> primaryBucketsVm6 = (Set<Integer>)vm6.invoke(
         AsyncEventQueueTestBase.class, "getAllPrimaryBucketsOnTheNode",
-        new Object[] { testName + "_PR" });
+        new Object[] { getTestMethodName() + "_PR" });
 
     vm4.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
         new Object[] { "ln" });
@@ -1810,23 +1812,23 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm4.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
     vm5.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
 
-    getLogWriter().info("Created the cache");
+    LogWriterUtils.getLogWriter().info("Created the cache");
 
     vm4.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithListener2",
         new Object[] { "ln", true, 100, 5, false, null });
     vm5.invoke(AsyncEventQueueTestBase.class, "createAsyncEventQueueWithListener2",
         new Object[] { "ln", true, 100, 5, false, null });
 
-    getLogWriter().info("Created the AsyncEventQueue");
+    LogWriterUtils.getLogWriter().info("Created the AsyncEventQueue");
 
     vm4.invoke(AsyncEventQueueTestBase.class,
         "createPRWithRedundantCopyWithAsyncEventQueue", new Object[] {
-            testName + "_PR", "ln", isOffHeap() });
+            getTestMethodName() + "_PR", "ln", isOffHeap() });
     vm5.invoke(AsyncEventQueueTestBase.class,
         "createPRWithRedundantCopyWithAsyncEventQueue", new Object[] {
-            testName + "_PR", "ln", isOffHeap() });
+            getTestMethodName() + "_PR", "ln", isOffHeap() });
 
-    getLogWriter().info("Created PR with AsyncEventQueue");
+    LogWriterUtils.getLogWriter().info("Created PR with AsyncEventQueue");
 
     vm4
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
@@ -1834,15 +1836,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
     vm5
         .invoke(AsyncEventQueueTestBase.class, "pauseAsyncEventQueue",
             new Object[] { "ln" });
-    pause(1000);// pause for the batchTimeInterval to make sure the AsyncQueue
+    Wait.pause(1000);// pause for the batchTimeInterval to make sure the AsyncQueue
                 // is paused
 
-    getLogWriter().info("Paused the AsyncEventQueue");
+    LogWriterUtils.getLogWriter().info("Paused the AsyncEventQueue");
 
     vm4.invoke(AsyncEventQueueTestBase.class, "doPuts",
-        new Object[] { testName + "_PR", 80 });
+        new Object[] { getTestMethodName() + "_PR", 80 });
 
-    getLogWriter().info("Done puts");
+    LogWriterUtils.getLogWriter().info("Done puts");
 
     // ---------------------------- start vm6 --------------------------
     vm6.invoke(AsyncEventQueueTestBase.class, "createCache", new Object[] { lnPort });
@@ -1850,15 +1852,15 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
         new Object[] { "ln", true, 100, 5, false, null });
     vm6.invoke(AsyncEventQueueTestBase.class,
         "createPRWithRedundantCopyWithAsyncEventQueue", new Object[] {
-            testName + "_PR", "ln", isOffHeap() });
+            getTestMethodName() + "_PR", "ln", isOffHeap() });
 
     // ------------------------------------------------------------------
     vm4.invoke(AsyncEventQueueTestBase.class, "doRebalance", new Object[] {});
 
     Set<Integer> primaryBucketsVm6 = (Set<Integer>)vm6.invoke(
         AsyncEventQueueTestBase.class, "getAllPrimaryBucketsOnTheNode",
-        new Object[] { testName + "_PR" });
-    getLogWriter().info("Primary buckets on vm6: " + primaryBucketsVm6);
+        new Object[] { getTestMethodName() + "_PR" });
+    LogWriterUtils.getLogWriter().info("Primary buckets on vm6: " + primaryBucketsVm6);
     vm4.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
         new Object[] { "ln" });
     vm5.invoke(AsyncEventQueueTestBase.class, "resumeAsyncEventQueue",
@@ -1893,19 +1895,19 @@ public class AsyncEventListenerDUnitTest extends AsyncEventQueueTestBase {
 
     // create leader (parent) PR on node
     vm3.invoke(AsyncEventQueueTestBase.class, "createPartitionedRegion",
-        new Object[] { testName + "PARENT_PR", null, 0, 100 });
+        new Object[] { getTestMethodName() + "PARENT_PR", null, 0, 100 });
     String parentRegionFullPath = (String)vm3.invoke(
         AsyncEventQueueTestBase.class, "getRegionFullPath",
-        new Object[] { testName + "PARENT_PR" });
+        new Object[] { getTestMethodName() + "PARENT_PR" });
 
     // create colocated (child) PR on node
     vm3.invoke(AsyncEventQueueTestBase.class,
         "createColocatedPartitionedRegionWithAsyncEventQueue", new Object[] {
-            testName + "CHILD_PR", "ln", 100, parentRegionFullPath });
+            getTestMethodName() + "CHILD_PR", "ln", 100, parentRegionFullPath });
 
     // do puts in colocated (child) PR on node
     vm3.invoke(AsyncEventQueueTestBase.class, "doPuts", new Object[] {
-        testName + "CHILD_PR", 1000 });
+        getTestMethodName() + "CHILD_PR", 1000 });
 
     // wait for AsyncEventQueue to get empty on node
     vm3.invoke(AsyncEventQueueTestBase.class, "waitForAsyncQueueToGetEmpty",


[34/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionStatsDUnitTest.java
index 189d0d2..a31e1e7 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionStatsDUnitTest.java
@@ -40,7 +40,9 @@ import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceType;
 import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
 import com.gemstone.gemfire.internal.cache.lru.HeapLRUCapacityController;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -325,14 +327,14 @@ public class EvictionStatsDUnitTest extends CacheTestCase {
       @Override
       public void run2() throws CacheException {
         PartitionedRegion pr1 = (PartitionedRegion)cache.getRegion("PR1");
-        getLogWriter().info("dddd  local"+pr1.getLocalMaxMemory());
-        getLogWriter().info("dddd  local evi"+((AbstractLRURegionMap)pr1.entries)._getLruList().stats()
+        LogWriterUtils.getLogWriter().info("dddd  local"+pr1.getLocalMaxMemory());
+        LogWriterUtils.getLogWriter().info("dddd  local evi"+((AbstractLRURegionMap)pr1.entries)._getLruList().stats()
         .getEvictions());
-        getLogWriter().info("dddd  local entries"+((AbstractLRURegionMap)pr1.entries)._getLruList().stats()
+        LogWriterUtils.getLogWriter().info("dddd  local entries"+((AbstractLRURegionMap)pr1.entries)._getLruList().stats()
             .getCounter()/(1024*1024));
         HeapMemoryMonitor hmm = ((InternalResourceManager) cache.getResourceManager()).getHeapMonitor();
         long memused=hmm.getBytesUsed()/(1024*1024);
-        getLogWriter().info("dddd  local memused= "+memused);
+        LogWriterUtils.getLogWriter().info("dddd  local memused= "+memused);
         assertTrue(((AbstractLRURegionMap)pr1.entries)._getLruList().stats()
             .getEvictions() >= extraEntries / 2);
         assertEquals(((AbstractLRURegionMap)pr1.entries)._getLruList().stats()
@@ -410,12 +412,12 @@ public class EvictionStatsDUnitTest extends CacheTestCase {
       ds = getSystem(props);
       cache = CacheFactory.create(ds);
       assertNotNull(cache);
-      getLogWriter().info("cache= " + cache);
-      getLogWriter().info("cache closed= " + cache.isClosed());
+      LogWriterUtils.getLogWriter().info("cache= " + cache);
+      LogWriterUtils.getLogWriter().info("cache closed= " + cache.isClosed());
       cache.getResourceManager().setEvictionHeapPercentage(20);
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      Assert.fail("Failed while creating the cache", e);
     }
   }
 
@@ -482,7 +484,7 @@ public class EvictionStatsDUnitTest extends CacheTestCase {
 
     region = cache.createRegion(regionName, factory.create());
     assertNotNull(region);
-    getLogWriter().info("Partitioned Region created Successfully :" + region);
+    LogWriterUtils.getLogWriter().info("Partitioned Region created Successfully :" + region);
   }
 
   public static void putData(final String regionName, final int noOfElememts) {
@@ -541,7 +543,7 @@ public class EvictionStatsDUnitTest extends CacheTestCase {
       if (bucketRegion == null) {
         continue;
       }
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Size of bucket " + bucketRegion.getName() + "of Pr " + prRegionName
               + " = " + bucketRegion.getCounter() / (1000000));
       bucketSize = bucketSize + bucketRegion.getCounter();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionTestBase.java
index cbd2f65..6726747 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionTestBase.java
@@ -44,11 +44,14 @@ import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.Resou
 import com.gemstone.gemfire.internal.cache.control.MemoryEvent;
 import com.gemstone.gemfire.internal.cache.control.MemoryThresholds.MemoryState;
 import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class EvictionTestBase extends CacheTestCase {
 
@@ -85,18 +88,6 @@ public class EvictionTestBase extends CacheTestCase {
     dataStore4 = host.getVM(3);
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    /*
-     * dataStore1.invoke(EvictionTestBase.class, "destroyObjects", new Object[] {
-     * setEvictionOn, evictionAlgorithm, regionName, totalNoOfBuckets,
-     * evictionAction, evictorInterval });
-     * dataStore2.invoke(EvictionTestBase.class, "createPartitionedRegion", new
-     * Object[] { setEvictionOn, evictionAlgorithm, regionName,
-     * totalNoOfBuckets, evictionAction, evictorInterval });
-     */
-  }
-
   public void prepareScenario1(EvictionAlgorithm evictionAlgorithm,int maxEntries) {
     createCacheInAllVms();
     createPartitionedRegionInAllVMS(true, evictionAlgorithm, "PR1",
@@ -138,7 +129,7 @@ public class EvictionTestBase extends CacheTestCase {
             .getEvictions();
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+        Wait.waitForCriterion(wc, 60000, 1000, true);
       }
     });
   }
@@ -290,14 +281,14 @@ public class EvictionTestBase extends CacheTestCase {
       ds = getSystem(props);
       cache = CacheFactory.create(ds);
       assertNotNull(cache);
-      getLogWriter().info("cache= " + cache);
-      getLogWriter().info("cache closed= " + cache.isClosed());
+      LogWriterUtils.getLogWriter().info("cache= " + cache);
+      LogWriterUtils.getLogWriter().info("cache closed= " + cache.isClosed());
       cache.getResourceManager().setEvictionHeapPercentage(85);
-      getLogWriter().info("eviction= "+cache.getResourceManager().getEvictionHeapPercentage());
-      getLogWriter().info("critical= "+cache.getResourceManager().getCriticalHeapPercentage());
+      LogWriterUtils.getLogWriter().info("eviction= "+cache.getResourceManager().getEvictionHeapPercentage());
+      LogWriterUtils.getLogWriter().info("critical= "+cache.getResourceManager().getCriticalHeapPercentage());
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      Assert.fail("Failed while creating the cache", e);
     }
   }
 
@@ -375,7 +366,7 @@ public class EvictionTestBase extends CacheTestCase {
 
     region = cache.createRegion(regionName, factory.create());
     assertNotNull(region);
-    getLogWriter().info("Partitioned Region created Successfully :" + region);
+    LogWriterUtils.getLogWriter().info("Partitioned Region created Successfully :" + region);
   }
 
   public static void putData(final String regionName, final int noOfElememts,
@@ -399,7 +390,7 @@ public class EvictionTestBase extends CacheTestCase {
         final Region pr = cache.getRegion("DR1");
         for (int counter = 1; counter <= noOfElememts; counter++) {
           pr.put(new Integer(counter), new byte[sizeOfElement * 1024 * 1024]);
-          getLogWriter().info("Amar put data element no->" + counter);
+          LogWriterUtils.getLogWriter().info("Amar put data element no->" + counter);
         }
       }
     });
@@ -422,7 +413,7 @@ public class EvictionTestBase extends CacheTestCase {
             if (bucketRegion == null) {
               continue;
             }
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "FINAL bucket= " + bucketRegion.getFullPath() + "size= "
                     + bucketRegion.size());
           }
@@ -437,7 +428,7 @@ public class EvictionTestBase extends CacheTestCase {
     };
     long evictionsInVM1 = (Long)dataStore1.invoke(validate);
     long evictionsInVM2 = (Long)dataStore2.invoke(validate);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "EEE evicitons = " + noOfEvictions + " "
             + (evictionsInVM1 + evictionsInVM2));
     assertEquals(noOfEvictions, (evictionsInVM1 + evictionsInVM2));
@@ -488,7 +479,7 @@ public class EvictionTestBase extends CacheTestCase {
         while(itr.hasNext())
         {
           BucketRegion br=(BucketRegion)itr.next();
-          getLogWriter().info("Print "+ br.size());
+          LogWriterUtils.getLogWriter().info("Print "+ br.size());
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
index 6a96948..86376d9 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
@@ -40,10 +40,13 @@ import com.gemstone.gemfire.internal.cache.partitioned.fixed.QuarterPartitionRes
 import com.gemstone.gemfire.internal.cache.partitioned.fixed.SingleHopQuarterPartitionResolver;
 import com.gemstone.gemfire.internal.cache.partitioned.fixed.FixedPartitioningTestBase.Q1_Months;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -281,7 +284,7 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
     VM server4 = host.getVM(3);
     Boolean simpleFPR = false;
     final int portLocator = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String hostLocator = getServerHostName(server1.getHost());
+    final String hostLocator = NetworkUtils.getServerHostName(server1.getHost());
     final String locator = hostLocator + "[" + portLocator + "]";
     server3.invoke(FixedPRSinglehopDUnitTest.class,
         "startLocatorInVM", new Object[] { portLocator });
@@ -308,7 +311,7 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
     putIntoPartitionedRegionsThreeQs();
 
     getFromPartitionedRegionsFor3Qs();
-    pause(2000);
+    Wait.pause(2000);
     // TODO: Verify that all the fpa's are in the map
     server1.invoke(FixedPRSinglehopDUnitTest.class, "printView");
     server2.invoke(FixedPRSinglehopDUnitTest.class, "printView");
@@ -331,13 +334,13 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
         FixedPRSinglehopDUnitTest.class, "createServerWithLocator", new Object[] { locator, false,
             fpaList, simpleFPR });    
     
-    pause(2000);
+    Wait.pause(2000);
     putIntoPartitionedRegions();
     // Client should get the new partition
     // TODO: Verify that
 
     getFromPartitionedRegions();
-    pause(2000);
+    Wait.pause(2000);
     server1.invoke(FixedPRSinglehopDUnitTest.class, "printView");
     server2.invoke(FixedPRSinglehopDUnitTest.class, "printView");
     server4.invoke(FixedPRSinglehopDUnitTest.class, "printView");
@@ -367,7 +370,7 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
     
     if (!fpaList.isEmpty() || isAccessor) {
@@ -385,7 +388,7 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
       attr.setPartitionAttributes(paf.create());
       region = cache.createRegion(PR_NAME, attr.create());
       assertNotNull(region);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region " + PR_NAME + " created Successfully :"
               + region.toString());
     }
@@ -412,7 +415,7 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
     
     if (!fpaList.isEmpty() || isAccessor) {
@@ -431,7 +434,7 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
       attr.setPartitionAttributes(paf.create());
       region = cache.createRegion(PR_NAME, attr.create());
       assertNotNull(region);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region " + PR_NAME + " created Successfully :"
               + region.toString());
     }
@@ -485,7 +488,7 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
     attr.setPartitionAttributes(paf.create());
     region = cache.createRegion(PR_NAME, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -602,7 +605,7 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
     RegionAttributes attrs = factory.create();
     region = cache.createRegion(PR_NAME, attrs);
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region " + PR_NAME + " created Successfully :"
             + region.toString());
   }
@@ -859,7 +862,7 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
     
     assertTrue(regionMetaData.containsKey(region.getFullPath()));
     final ClientPartitionAdvisor prMetaData = regionMetaData
@@ -873,7 +876,7 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
     for (Entry entry : prMetaData.getBucketServerLocationsMap_TEST_ONLY()
         .entrySet()) {
       assertEquals(currentRedundancy, ((List)entry.getValue()).size());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GIIDeltaDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GIIDeltaDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GIIDeltaDUnitTest.java
index 9f318fb..1e714f1 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GIIDeltaDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GIIDeltaDUnitTest.java
@@ -55,12 +55,17 @@ import com.gemstone.gemfire.internal.cache.persistence.DiskStoreID;
 import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
@@ -83,7 +88,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
 //  protected static String REGION_NAME = GIIDeltaDUnitTest.class.getSimpleName()+"_Region";
   protected static String REGION_NAME = "_Region";
   final String expectedExceptions = GemFireIOException.class.getName();
-  protected ExpectedException expectedEx;
+  protected IgnoredException expectedEx;
   static Object giiSyncObject = new Object();
   
   /**
@@ -95,7 +100,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
 
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(GIIDeltaDUnitTest.class,"setRegionName", new Object[]{getUniqueName()});
+    Invoke.invokeInEveryVM(GIIDeltaDUnitTest.class,"setRegionName", new Object[]{getUniqueName()});
     setRegionName(getUniqueName());
   }
   
@@ -103,7 +108,8 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
     REGION_NAME = testName + "_Region";
   }
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
     P.invoke(GIIDeltaDUnitTest.class, "resetSlowGII");
     R.invoke(GIIDeltaDUnitTest.class, "resetSlowGII");
     P.invoke(InitialImageOperation.class, "resetAllGIITestHooks");
@@ -113,8 +119,10 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
     changeForceFullGII(P, false, false);
     P = null;
     R = null;
-    super.tearDown2();
-    
+  }
+  
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     // clean up the test hook, which can be moved to CacheTestCase
     DistributedCacheOperation.SLOW_DISTRIBUTION_MS = 0;
     if (expectedEx != null) {
@@ -152,7 +160,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
     createDistributedRegion(vm1);
     assignVMsToPandR(vm0, vm1);
     // from now on, use P and R as vmhttps://wiki.gemstone.com/display/gfepersistence/DeltaGII+Spec+for+8.0
-    expectedEx = addExpectedException(expectedExceptions);
+    expectedEx = IgnoredException.addIgnoredException(expectedExceptions);
   }
   
   // these steps are shared by all test cases
@@ -516,7 +524,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
     // force tombstone GC to let RVVGC to become P4:R0
     changeTombstoneTimout(R, MAX_WAIT);
     changeTombstoneTimout(P, MAX_WAIT);
-    pause((int)MAX_WAIT);
+    Wait.pause((int)MAX_WAIT);
     forceGC(P, 2);
     waitForToVerifyRVV(P, memberP, 6, null, 4); // P's rvv=p6, gc=4
 
@@ -1244,7 +1252,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
       }
     };
 
-    DistributedTestCase.waitForCriterion(ev, 30000, 200, true);
+    Wait.waitForCriterion(ev, 30000, 200, true);
     int count = getDeltaGIICount(P);
     assertEquals(2, count);
 
@@ -1253,7 +1261,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
     changeTombstoneTimout(R, MAX_WAIT);
     changeTombstoneTimout(P, MAX_WAIT);
     changeTombstoneTimout(T, MAX_WAIT);
-    pause((int)MAX_WAIT);
+    Wait.pause((int)MAX_WAIT);
     forceGC(P, 2);
     waitForToVerifyRVV(P, memberP, 7, null, 0); // P's rvv=p7, gc=0
 
@@ -1270,7 +1278,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev2, 30000, 200, true);
+    Wait.waitForCriterion(ev2, 30000, 200, true);
     count = getDeltaGIICount(P);
     assertEquals(0, count);
     verifyTombstoneExist(P, "key2", true, true); // expect key2 is still tombstone during and after GIIs
@@ -1372,7 +1380,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
       }
     };
 
-    DistributedTestCase.waitForCriterion(ev, 30000, 200, true);
+    Wait.waitForCriterion(ev, 30000, 200, true);
     int count = getDeltaGIICount(P);
     assertEquals(1, count);
     
@@ -1380,7 +1388,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
     // Wait for tombstone is GCed at R, but still exists in P
     changeTombstoneTimout(R, MAX_WAIT);
     changeTombstoneTimout(P, MAX_WAIT);
-    pause((int)MAX_WAIT);
+    Wait.pause((int)MAX_WAIT);
     forceGC(R, 3);
     forceGC(P, 3);
     
@@ -1432,7 +1440,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
     forceAddGIICount(P);
     changeTombstoneTimout(R, MAX_WAIT);
     changeTombstoneTimout(P, MAX_WAIT);
-    pause((int)MAX_WAIT);
+    Wait.pause((int)MAX_WAIT);
     forceGC(R, 3);
     waitForToVerifyRVV(P, memberR, 6, null, 0); // P's rvv=r6, gc=0
     waitForToVerifyRVV(P, memberR, 6, null, 0); // P's rvv=r6, gc=0
@@ -2003,7 +2011,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
     try {
       future.join(MAX_WAIT);
     } catch (InterruptedException e) {
-      fail("Create region is interrupted", e);
+      Assert.fail("Create region is interrupted", e);
     }
     if(future.isAlive()) {
       fail("Region not created within" + MAX_WAIT);
@@ -2022,10 +2030,10 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
           RegionFactory f = getCache().createRegionFactory(getRegionAttributes());
 //          CCRegion = (LocalRegion)f.create(REGION_NAME);
           LocalRegion lr = (LocalRegion)f.create(REGION_NAME);
-          getLogWriter().info("In createDistributedRegion, using hydra.getLogWriter()");
-          getLogWriter().fine("Unfinished Op limit="+InitialImageOperation.MAXIMUM_UNFINISHED_OPERATIONS);
+          LogWriterUtils.getLogWriter().info("In createDistributedRegion, using hydra.getLogWriter()");
+          LogWriterUtils.getLogWriter().fine("Unfinished Op limit="+InitialImageOperation.MAXIMUM_UNFINISHED_OPERATIONS);
         } catch (CacheException ex) {
-          fail("While creating region", ex);
+          Assert.fail("While creating region", ex);
         }
       }
     };
@@ -2091,9 +2099,9 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
   protected void removeSystemPropertiesInVM(VM vm, final String prop) {
     SerializableRunnable change = new SerializableRunnable() {
       public void run() {
-        getLogWriter().info("Current prop setting: "+prop+"="+System.getProperty(prop));
+        LogWriterUtils.getLogWriter().info("Current prop setting: "+prop+"="+System.getProperty(prop));
         System.getProperties().remove(prop);
-        getLogWriter().info(prop+"="+System.getProperty(prop));
+        LogWriterUtils.getLogWriter().info(prop+"="+System.getProperty(prop));
       }
     };
     vm.invoke(change);
@@ -2126,7 +2134,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
     DiskStoreID dsid0 = getMemberID(vm0);
     DiskStoreID dsid1 = getMemberID(vm1);
     int compare = dsid0.compareTo(dsid1);
-    getLogWriter().info("Before assignVMsToPandR, dsid0 is "+dsid0+",dsid1 is "+dsid1+",compare="+compare);
+    LogWriterUtils.getLogWriter().info("Before assignVMsToPandR, dsid0 is "+dsid0+",dsid1 is "+dsid1+",compare="+compare);
     if (compare > 0) {
       P = vm0;
       R = vm1;
@@ -2134,7 +2142,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
       P = vm1;
       R = vm0;
     }
-    getLogWriter().info("After assignVMsToPandR, P is "+P.getPid()+"; R is "+R.getPid()+" for region "+REGION_NAME);
+    LogWriterUtils.getLogWriter().info("After assignVMsToPandR, P is "+P.getPid()+"; R is "+R.getPid()+" for region "+REGION_NAME);
   }
   
   private DiskStoreID getMemberID(VM vm) {
@@ -2188,7 +2196,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
           for (long i:exceptionList) {
             exceptionListVerified = !rvv.contains(member, i);
             if (!exceptionListVerified) {
-              getLogWriter().finer("DeltaGII:missing exception "+i+":"+rvv);
+              LogWriterUtils.getLogWriter().finer("DeltaGII:missing exception "+i+":"+rvv);
               break;
             }
           }
@@ -2197,7 +2205,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
           for (long i = 1; i<=regionversion; i++) {
             if (!rvv.contains(member, i)) {
               exceptionListVerified = false;
-              getLogWriter().finer("DeltaGII:unexpected exception "+i);
+              LogWriterUtils.getLogWriter().finer("DeltaGII:unexpected exception "+i);
               break;
             }
           }
@@ -2213,8 +2221,8 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
             long gcversion = getRegionVersionForMember(rvv, member, true);
             
             boolean exceptionListVerified = verifyExceptionList(member, regionversion, rvv, exceptionList);
-            getLogWriter().info("DeltaGII:expected:"+expectedRegionVersion+":"+expectedGCVersion);
-            getLogWriter().info("DeltaGII:actual:"+regionversion+":"+gcversion+":"+exceptionListVerified+":"+rvv);
+            LogWriterUtils.getLogWriter().info("DeltaGII:expected:"+expectedRegionVersion+":"+expectedGCVersion);
+            LogWriterUtils.getLogWriter().info("DeltaGII:actual:"+regionversion+":"+gcversion+":"+exceptionListVerified+":"+rvv);
 
             boolean match = true;
             if (expectedRegionVersion != -1) {
@@ -2233,7 +2241,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
           }
         };
         
-        DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 10 * 1000, 200, true);
         RegionVersionVector rvv = ((LocalRegion)getCache().getRegion(REGION_NAME)).getVersionVector().getCloneForTransmission();
         long regionversion = getRegionVersionForMember(rvv, member, false);
         long gcversion = getRegionVersionForMember(rvv, member, true);
@@ -2265,7 +2273,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
           }
         };
 
-        DistributedTestCase.waitForCriterion(ev, 30000, 200, true);
+        Wait.waitForCriterion(ev, 30000, 200, true);
         if (callback == null || !callback.isRunning) {
           fail("GII tesk hook is not started yet");
         }
@@ -2303,7 +2311,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
           }
         };
         
-        DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 10 * 1000, 200, true);
         String value = (String)((LocalRegion)getCache().getRegion(REGION_NAME)).get(key);
         assertEquals(expect_value, value);
       }
@@ -2367,9 +2375,9 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
         try {
           remote_rvv = DataSerializer.readObject(new DataInputStream(bais));
         } catch (IOException e) {
-          fail("Unexpected exception", e);
+          Assert.fail("Unexpected exception", e);
         } catch (ClassNotFoundException e) {
-          fail("Unexpected exception", e);
+          Assert.fail("Unexpected exception", e);
         }
         RequestImageMessage rim = new RequestImageMessage();
         rim.setSender(R_ID);
@@ -2499,7 +2507,7 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
           }
         };
         
-        DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 10 * 1000, 200, true);
         assertTrue(doneVerify());
       }
     };
@@ -2521,10 +2529,10 @@ public class GIIDeltaDUnitTest extends CacheTestCase {
     try {
       async.join(30000);
       if (async.exceptionOccurred()) {
-        fail("Test failed", async.getException());
+        Assert.fail("Test failed", async.getException());
       }
     } catch (InterruptedException e1) {
-      fail("Test failed", e1);
+      Assert.fail("Test failed", e1);
     }
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GIIFlowControlDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GIIFlowControlDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GIIFlowControlDUnitTest.java
index d478b8b..e2d4af6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GIIFlowControlDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GIIFlowControlDUnitTest.java
@@ -30,10 +30,15 @@ import com.gemstone.gemfire.distributed.internal.DistributionManager;
 import com.gemstone.gemfire.distributed.internal.DistributionMessage;
 import com.gemstone.gemfire.distributed.internal.DistributionMessageObserver;
 import com.gemstone.gemfire.internal.cache.InitialImageOperation.ImageReplyMessage;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author dsmith
@@ -54,21 +59,20 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable("reset chunk size") {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable("reset chunk size") {
       public void run() {
         InitialImageOperation.CHUNK_SIZE_IN_BYTES = origChunkSize;
         InitialImageOperation.CHUNK_PERMITS = origNumChunks;
       }
     });
-    super.tearDown2();
   }
   
   public void testLotsOfChunks() throws Throwable {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    invokeInEveryVM(new SerializableRunnable("reset chunk size") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("reset chunk size") {
       public void run() {
         InitialImageOperation.CHUNK_SIZE_IN_BYTES = 10;
         InitialImageOperation.CHUNK_PERMITS = 2;
@@ -89,7 +93,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    invokeInEveryVM(new SerializableRunnable("set chunk size") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("set chunk size") {
       public void run() {
         InitialImageOperation.CHUNK_SIZE_IN_BYTES = 10;
         InitialImageOperation.CHUNK_PERMITS = 2;
@@ -118,7 +122,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
     vm1.invoke(new SerializableRunnable("Wait for chunks") {
       
       public void run() {
-        waitForCriterion(new WaitCriterion(){
+        Wait.waitForCriterion(new WaitCriterion(){
 
           public String description() {
             return "Waiting for messages to be at least 2: " + observer.messageCount.get();
@@ -134,7 +138,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
         try {
           Thread.sleep(500);
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         }
         assertEquals(2, observer.messageCount.get());
         observer.allowMessages.countDown();
@@ -158,7 +162,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    invokeInEveryVM(new SerializableRunnable("set chunk size") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("set chunk size") {
       public void run() {
         InitialImageOperation.CHUNK_SIZE_IN_BYTES = 10;
         InitialImageOperation.CHUNK_PERMITS = 2;
@@ -185,7 +189,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
     vm1.invoke(new SerializableRunnable("Wait to flow control messages") {
 
       public void run() {
-        waitForCriterion(new WaitCriterion(){
+        Wait.waitForCriterion(new WaitCriterion(){
 
           public String description() {
             return "Waiting for messages to be at least 2: " + observer.messageCount.get();
@@ -201,7 +205,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
         try {
           Thread.sleep(500);
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         }
         assertEquals(2, observer.messageCount.get());
       }
@@ -237,7 +241,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
-    invokeInEveryVM(new SerializableRunnable("set chunk size") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("set chunk size") {
       public void run() {
         InitialImageOperation.CHUNK_SIZE_IN_BYTES = 10;
         InitialImageOperation.CHUNK_PERMITS = 2;
@@ -257,7 +261,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
         
       }
     });
-    ExpectedException expectedEx = null;
+    IgnoredException expectedEx = null;
     try {
       createRegion(vm0);
 
@@ -268,7 +272,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
       vm1.invoke(new SerializableRunnable("Wait to flow control messages") {
 
         public void run() {
-          waitForCriterion(new WaitCriterion(){
+          Wait.waitForCriterion(new WaitCriterion(){
 
             public String description() {
               return "Waiting for messages to be at least 2: " + observer.messageCount.get();
@@ -284,7 +288,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
           try {
             Thread.sleep(500);
           } catch (InterruptedException e) {
-            fail("interrupted", e);
+            Assert.fail("interrupted", e);
           }
           assertEquals(2, observer.messageCount.get());
         }
@@ -306,7 +310,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
         throw e;
       }
 
-      expectedEx = addExpectedException(InterruptedException.class.getName(),
+      expectedEx = IgnoredException.addIgnoredException(InterruptedException.class.getName(),
           vm1);
       if(disconnect) {
         disconnect(vm1);
@@ -325,7 +329,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
 
         public void run() {
           final DMStats stats = getSystem().getDMStats();
-          waitForCriterion(new WaitCriterion() {
+          Wait.waitForCriterion(new WaitCriterion() {
             
             public boolean done() {
               return stats.getInitialImageMessagesInFlight() == 0;
@@ -437,7 +441,7 @@ public class GIIFlowControlDUnitTest extends CacheTestCase {
         try {
           allowMessages.await();
         } catch (InterruptedException e) {
-          fail("Interrupted", e);
+          Assert.fail("Interrupted", e);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GridAdvisorDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GridAdvisorDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GridAdvisorDUnitTest.java
index 9a6b3e4..f12333f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GridAdvisorDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/GridAdvisorDUnitTest.java
@@ -33,8 +33,11 @@ import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.distributed.internal.InternalLocator;
 import com.gemstone.gemfire.internal.AvailablePort.Keeper;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -78,14 +81,14 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
     final Keeper bsKeeper4 = freeTCPPorts.get(5);
     final int bsPort4 = bsKeeper4.getPort();
 
-    final String host0 = getServerHostName(host); 
+    final String host0 = NetworkUtils.getServerHostName(host); 
     final String locators =   host0 + "[" + port1 + "]" + "," 
                             + host0 + "[" + port2 + "]";
 
     final Properties dsProps = new Properties();
     dsProps.setProperty("locators", locators);
     dsProps.setProperty("mcast-port", "0");
-    dsProps.setProperty("log-level", getDUnitLogLevel());
+    dsProps.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
     dsProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
     
     keeper1.release();
@@ -96,7 +99,7 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
           try {
             Locator.startLocatorAndDS(port1, logFile, null, dsProps, true, true, null);
           } catch (IOException ex) {
-            fail("While starting locator on port " + port1, ex);
+            Assert.fail("While starting locator on port " + port1, ex);
           }
         }
       });
@@ -112,7 +115,7 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
             Locator.startLocatorAndDS(port2, logFile, null, dsProps, true, true, "locator2HNFC");
 
           } catch (IOException ex) {
-            fail("While starting locator on port " + port2, ex);
+            Assert.fail("While starting locator on port " + port2, ex);
           }
         }
       });
@@ -123,7 +126,7 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
             Properties props = new Properties();
             props.setProperty("mcast-port", "0");
             props.setProperty("locators", locators);
-            dsProps.setProperty("log-level", getDUnitLogLevel());
+            dsProps.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
             CacheFactory.create(DistributedSystem.connect(props));
           }
         };
@@ -283,7 +286,7 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
             DistributionAdvisee advisee = (DistributionAdvisee)bslist.get(i);
             CacheServerAdvisor bsa = (CacheServerAdvisor)advisee.getDistributionAdvisor();
             List others = bsa.fetchBridgeServers();
-            getLogWriter().info("found these bridgeservers in " + advisee + ": " + others);
+            LogWriterUtils.getLogWriter().info("found these bridgeservers in " + advisee + ": " + others);
             assertEquals(3, others.size());
             others = bsa.fetchControllers();
             assertEquals(2, others.size());
@@ -311,7 +314,7 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
             DistributionAdvisee advisee = (DistributionAdvisee)bslist.get(i);
             CacheServerAdvisor bsa = (CacheServerAdvisor)advisee.getDistributionAdvisor();
             List others = bsa.fetchBridgeServers();
-            getLogWriter().info("found these bridgeservers in " + advisee + ": " + others);
+            LogWriterUtils.getLogWriter().info("found these bridgeservers in " + advisee + ": " + others);
             assertEquals(3, others.size());
             others = bsa.fetchControllers();
             assertEquals(2, others.size());
@@ -587,14 +590,14 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
     final Keeper bsKeeper4 = freeTCPPorts.get(5);
     final int bsPort4 = bsKeeper4.getPort();
 
-    final String host0 = getServerHostName(host); 
+    final String host0 = NetworkUtils.getServerHostName(host); 
     final String locators =   host0 + "[" + port1 + "]" + "," 
                             + host0 + "[" + port2 + "]";
 
     final Properties dsProps = new Properties();
     dsProps.setProperty("locators", locators);
     dsProps.setProperty("mcast-port", "0");
-    dsProps.setProperty("log-level", getDUnitLogLevel());
+    dsProps.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
     dsProps.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
     
     keeper1.release();
@@ -605,7 +608,7 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
           try {
             Locator.startLocatorAndDS(port1, logFile, null, dsProps, true, true, null);
           } catch (IOException ex) {
-            fail("While starting locator on port " + port1, ex);
+            Assert.fail("While starting locator on port " + port1, ex);
           }
         }
       });
@@ -621,7 +624,7 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
             Locator.startLocatorAndDS(port2, logFile, null, dsProps, true, true, "locator2HNFC");
 
           } catch (IOException ex) {
-            fail("While starting locator on port " + port2, ex);
+            Assert.fail("While starting locator on port " + port2, ex);
           }
         }
       });
@@ -632,7 +635,7 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
         props.setProperty("mcast-port", "0");
         props.setProperty("locators", locators);
         props.setProperty("groups", "bs1Group1, bs1Group2");
-        props.setProperty("log-level", getDUnitLogLevel());
+        props.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
         CacheFactory.create(DistributedSystem.connect(props));
       }
     });
@@ -642,7 +645,7 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
         props.setProperty("mcast-port", "0");
         props.setProperty("locators", locators);
         props.setProperty("groups", "bs2Group1, bs2Group2");
-        props.setProperty("log-level", getDUnitLogLevel());
+        props.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
         CacheFactory.create(DistributedSystem.connect(props));
       }
     });
@@ -797,7 +800,7 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
             DistributionAdvisee advisee = (DistributionAdvisee)bslist.get(i);
             CacheServerAdvisor bsa = (CacheServerAdvisor)advisee.getDistributionAdvisor();
             List others = bsa.fetchBridgeServers();
-            getLogWriter().info("found these bridgeservers in " + advisee + ": " + others);
+            LogWriterUtils.getLogWriter().info("found these bridgeservers in " + advisee + ": " + others);
             assertEquals(3, others.size());
             others = bsa.fetchControllers();
             assertEquals(2, others.size());
@@ -825,7 +828,7 @@ public class GridAdvisorDUnitTest extends DistributedTestCase {
             DistributionAdvisee advisee = (DistributionAdvisee)bslist.get(i);
             CacheServerAdvisor bsa = (CacheServerAdvisor)advisee.getDistributionAdvisor();
             List others = bsa.fetchBridgeServers();
-            getLogWriter().info("found these bridgeservers in " + advisee + ": " + others);
+            LogWriterUtils.getLogWriter().info("found these bridgeservers in " + advisee + ": " + others);
             assertEquals(3, others.size());
             others = bsa.fetchControllers();
             assertEquals(2, others.size());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HABug36773DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HABug36773DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HABug36773DUnitTest.java
index 1396aba..a590aef 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HABug36773DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HABug36773DUnitTest.java
@@ -38,8 +38,11 @@ import com.gemstone.gemfire.cache30.ClientServerTestCase;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -156,7 +159,7 @@ public class HABug36773DUnitTest extends DistributedTestCase
 
   public void testDummyForBug36773()
   {
-    getLogWriter().info(" This is the dummy test for the Bug 36773");
+    LogWriterUtils.getLogWriter().info(" This is the dummy test for the Bug 36773");
     
   }
   
@@ -171,7 +174,7 @@ public class HABug36773DUnitTest extends DistributedTestCase
       {
         Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
         assertNotNull(region);
-        getLogWriter().info("Size of the region " + region.size());
+        LogWriterUtils.getLogWriter().info("Size of the region " + region.size());
         assertEquals(size, region.size());
       }
     };
@@ -234,7 +237,7 @@ public class HABug36773DUnitTest extends DistributedTestCase
           Thread.sleep(700);
         }
         catch (InterruptedException ie) {
-          fail("Interrupted while waiting ", ie);
+          Assert.fail("Interrupted while waiting ", ie);
         }
       }
     }
@@ -260,7 +263,7 @@ public class HABug36773DUnitTest extends DistributedTestCase
       assertEquals(r1.getEntry(KEY2).getValue(), "key-2");
     }
     catch (Exception ex) {
-      fail("failed while createEntriesK1andK2()", ex);
+      Assert.fail("failed while createEntriesK1andK2()", ex);
     }
   }
   
@@ -280,7 +283,7 @@ public class HABug36773DUnitTest extends DistributedTestCase
     new HABug36773DUnitTest("temp").createCache(props);
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.DISTRIBUTED_ACK);
-    ClientServerTestCase.configureConnectionPool(factory, DistributedTestCase.getIPLiteral(), new int[] {PORT1,PORT2}, true, -1, 2, null);
+    ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getIPLiteral(), new int[] {PORT1,PORT2}, true, -1, 2, null);
     RegionAttributes attrs = factory.create();
     cache.createRegion(REGION_NAME, attrs);
   }
@@ -316,7 +319,7 @@ public class HABug36773DUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while registering interest", ex);
+      Assert.fail("failed while registering interest", ex);
     }
   }
 
@@ -354,16 +357,14 @@ public class HABug36773DUnitTest extends DistributedTestCase
 
   }
   
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     //close client
     client1.invoke(HABug36773DUnitTest.class, "closeCache");
     client2.invoke(HABug36773DUnitTest.class, "closeCache");
     //close server
     server1.invoke(HABug36773DUnitTest.class, "closeCache");
     server2.invoke(HABug36773DUnitTest.class, "closeCache");
-
   }
-  
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HAOverflowMemObjectSizerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HAOverflowMemObjectSizerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HAOverflowMemObjectSizerDUnitTest.java
index 4539321..371d8f9 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HAOverflowMemObjectSizerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/HAOverflowMemObjectSizerDUnitTest.java
@@ -40,6 +40,7 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.ClientUpdateMessageImpl;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -93,8 +94,8 @@ public class HAOverflowMemObjectSizerDUnitTest extends DistributedTestCase {
     serverVM = host.getVM(3);
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     serverVM.invoke(ConflationDUnitTest.class, "unsetIsSlowStart");
     client.invoke(HAOverflowMemObjectSizerDUnitTest.class, "closeCache");
     serverVM.invoke(HAOverflowMemObjectSizerDUnitTest.class, "closeCache");
@@ -190,7 +191,7 @@ public class HAOverflowMemObjectSizerDUnitTest extends DistributedTestCase {
     client.invoke(HAOverflowMemObjectSizerDUnitTest.class,
                   "createCacheClient",
                   new Object[] { port1, 
-                  getServerHostName(client.getHost()) });
+                  NetworkUtils.getServerHostName(client.getHost()) });
 
     serverVM.invoke(HAOverflowMemObjectSizerDUnitTest.class, "performPut",
         new Object[] { new Long(0L), new Long(100L) });
@@ -216,7 +217,7 @@ public class HAOverflowMemObjectSizerDUnitTest extends DistributedTestCase {
     client.invoke(HAOverflowMemObjectSizerDUnitTest.class,
                   "createCacheClient", 
                   new Object[] { port2,
-                  getServerHostName(client.getHost()) });
+                  NetworkUtils.getServerHostName(client.getHost()) });
 
     serverVM.invoke(HAOverflowMemObjectSizerDUnitTest.class, "performPut",
         new Object[] { new Long(101L), new Long(200L) });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/IncrementalBackupDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/IncrementalBackupDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/IncrementalBackupDUnitTest.java
index 0d1f0b4..c8f0933 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/IncrementalBackupDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/IncrementalBackupDUnitTest.java
@@ -50,11 +50,13 @@ import com.gemstone.gemfire.internal.JarDeployer;
 import com.gemstone.gemfire.internal.cache.persistence.BackupManager;
 import com.gemstone.gemfire.internal.util.IOUtils;
 import com.gemstone.gemfire.internal.util.TransformUtils;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests for the incremental backup feature.
@@ -88,7 +90,7 @@ public class IncrementalBackupDUnitTest extends CacheTestCase {
   private final SerializableRunnable createRegions = new SerializableRunnable() {
     @Override
     public void run() {
-      Cache cache = getCache(new CacheFactory().set("log-level", getDUnitLogLevel()));
+      Cache cache = getCache(new CacheFactory().set("log-level", LogWriterUtils.getDUnitLogLevel()));
       cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("fooStore");
       cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("barStore");
       getRegionFactory(cache).setDiskStoreName("fooStore").create("fooRegion");
@@ -125,7 +127,7 @@ public class IncrementalBackupDUnitTest extends CacheTestCase {
    * @param message a message to log.
    */
   private void log(String message) {
-    getLogWriter().info("[IncrementalBackupDUnitTest] " + message);
+    LogWriterUtils.getLogWriter().info("[IncrementalBackupDUnitTest] " + message);
   }
 
   /**
@@ -365,7 +367,7 @@ public class IncrementalBackupDUnitTest extends CacheTestCase {
     });
     
     final Set<PersistentID> missingMembers = new HashSet<PersistentID>();
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       @Override
       public boolean done() {
         missingMembers.clear();
@@ -666,12 +668,10 @@ public class IncrementalBackupDUnitTest extends CacheTestCase {
    * Removes backup directories (and all backup data).
    */
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     FileUtil.delete(getIncremental2Dir());
     FileUtil.delete(getIncrementalDir());
     FileUtil.delete(getBaselineDir());
-    
-    super.tearDown2();
   }
 
   /**
@@ -837,7 +837,7 @@ public class IncrementalBackupDUnitTest extends CacheTestCase {
      * member is back online.
      */
     final Set<PersistentID> missingMembers = new HashSet<PersistentID>();
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       @Override
       public boolean done() {
         missingMembers.clear();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/InterruptClientServerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/InterruptClientServerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/InterruptClientServerDUnitTest.java
index 03d9edd..fdf2a5b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/InterruptClientServerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/InterruptClientServerDUnitTest.java
@@ -34,6 +34,8 @@ import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.UpdateOperation.UpdateMessage;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -54,8 +56,8 @@ public class InterruptClientServerDUnitTest extends CacheTestCase {
   
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableCallable() {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableCallable() {
       
       @Override
       public Object call() throws Exception {
@@ -63,7 +65,6 @@ public class InterruptClientServerDUnitTest extends CacheTestCase {
         return null;
       }
     });
-    super.tearDown2();
   }
   
   public void _testLoop() throws Throwable {
@@ -83,7 +84,7 @@ public class InterruptClientServerDUnitTest extends CacheTestCase {
    * @throws Throwable 
    */
   public void testClientPutWithInterrupt() throws Throwable {
-    addExpectedException("InterruptedException");
+    IgnoredException.addIgnoredException("InterruptedException");
     Host host = Host.getHost(0);
     final VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/InterruptsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/InterruptsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/InterruptsDUnitTest.java
index 416dff9..6a0ffba 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/InterruptsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/InterruptsDUnitTest.java
@@ -30,6 +30,7 @@ import com.gemstone.gemfire.distributed.internal.DistributionMessageObserver;
 import com.gemstone.gemfire.internal.cache.UpdateOperation.UpdateMessage;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -50,8 +51,8 @@ public class InterruptsDUnitTest extends CacheTestCase {
   
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableCallable() {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableCallable() {
       
       @Override
       public Object call() throws Exception {
@@ -59,7 +60,6 @@ public class InterruptsDUnitTest extends CacheTestCase {
         return null;
       }
     });
-    super.tearDown2();
   }
   
   public void _testLoop() throws Throwable {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/MapClearGIIDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/MapClearGIIDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/MapClearGIIDUnitTest.java
index 3910c3c..3aeceec 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/MapClearGIIDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/MapClearGIIDUnitTest.java
@@ -32,11 +32,15 @@ import com.gemstone.gemfire.cache.RegionEvent;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author ashahid
@@ -138,7 +142,7 @@ public class MapClearGIIDUnitTest extends CacheTestCase {
     region = new MapClearGIIDUnitTest("dumb object to get cache").getCache().createRegion("map", attr);
 
     // region = region.createSubregion("map",attr);
-    getLogWriter().info("Region in VM0 created ");
+    LogWriterUtils.getLogWriter().info("Region in VM0 created ");
   }
 /*
   public static void closeCache() {
@@ -166,7 +170,7 @@ public class MapClearGIIDUnitTest extends CacheTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 10 * 1000, 200, true);
     region.clear();
     assertEquals(0, region.size());
   }
@@ -214,7 +218,7 @@ public class MapClearGIIDUnitTest extends CacheTestCase {
         }
       }
     });
-    getLogWriter().info("Cache created in VM1 successfully");
+    LogWriterUtils.getLogWriter().info("Cache created in VM1 successfully");
     try {
       AsyncInvocation asyncGII = vm0.invokeAsync(MapClearGIIDUnitTest.class, 
           "createRegionInVm0");
@@ -230,28 +234,28 @@ public class MapClearGIIDUnitTest extends CacheTestCase {
                 return null;
               }
             };
-            DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+            Wait.waitForCriterion(ev, 30 * 1000, 200, true);
           }
         });
       // now that the gii has received some entries do the clear
       vm1.invoke(MapClearGIIDUnitTest.class, "clearRegionInVm1");
       // wait for GII to complete
-      DistributedTestCase.join(asyncGII, 30 * 1000, getLogWriter());
+      ThreadUtils.join(asyncGII, 30 * 1000);
       if (asyncGII.exceptionOccurred()) {
         Throwable t = asyncGII.getException();
-        fail("createRegionInVM0 failed", t);
+        Assert.fail("createRegionInVM0 failed", t);
       }
       assertTrue(vm0
           .invokeBoolean(MapClearGIIDUnitTest.class, "checkImageStateFlag"));
 
       if (asyncGII.exceptionOccurred()) {
-        fail("asyncGII failed", asyncGII.getException());
+        Assert.fail("asyncGII failed", asyncGII.getException());
       }
 				   
 	  
     }
     catch (Exception e) {
-      fail("Test failed", e);
+      Assert.fail("Test failed", e);
     }
     finally {
       vm0.invoke(new SerializableRunnable("Set fast image processing") {
@@ -267,13 +271,13 @@ public class MapClearGIIDUnitTest extends CacheTestCase {
   public static class CacheObserverImpl extends CacheObserverAdapter {
 
     public void afterRegionClear(RegionEvent event) {
-      getLogWriter().info("**********Received clear event in VM0 . ");
+      LogWriterUtils.getLogWriter().info("**********Received clear event in VM0 . ");
       Region rgn = event.getRegion();
       wasGIIInProgressDuringClear = ((LocalRegion) rgn).getImageState()
         .wasRegionClearedDuringGII();
       InitialImageOperation.slowImageProcessing = 0;
       InitialImageOperation.slowImageSleeps = 0;
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "wasGIIInProgressDuringClear when clear event was received= "
               + wasGIIInProgressDuringClear);
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/MapInterface2JUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/MapInterface2JUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/MapInterface2JUnitTest.java
index 91fea15..0129ec6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/MapInterface2JUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/MapInterface2JUnitTest.java
@@ -41,7 +41,7 @@ import com.gemstone.gemfire.cache.query.CacheUtils;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.internal.util.StopWatch;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -221,7 +221,7 @@ public class MapInterface2JUnitTest {
          callbackSync.notify();
           }
      }
-     DistributedTestCase.join(th, 30 * 1000, null);
+     ThreadUtils.join(th, 30 * 1000);
   }
   
   
@@ -274,7 +274,7 @@ public class MapInterface2JUnitTest {
      }catch (Exception cwe) {
        fail("The test experienced exception "+cwe);   
      }    
-     DistributedTestCase.join(th, 30 * 1000, null);
+     ThreadUtils.join(th, 30 * 1000);
   }
   
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/NetSearchMessagingDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/NetSearchMessagingDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/NetSearchMessagingDUnitTest.java
index b1375a8..f49a4c6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/NetSearchMessagingDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/NetSearchMessagingDUnitTest.java
@@ -36,9 +36,12 @@ import com.gemstone.gemfire.distributed.internal.DistributionMessageObserver;
 import com.gemstone.gemfire.internal.cache.SearchLoadAndWriteProcessor.NetSearchRequestMessage;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author dsmith
@@ -193,7 +196,7 @@ public class NetSearchMessagingDUnitTest extends CacheTestCase {
           LocalRegion region = (LocalRegion)cache.getRegion("region");
           RegionEntry re = region.getRegionEntry("a");
           Object o = re.getValueInVM(null);
-          getLogWriter().info("key a="+o);;
+          LogWriterUtils.getLogWriter().info("key a="+o);;
           return o == null || o == Token.NOT_AVAILABLE;
         }
       };
@@ -215,7 +218,7 @@ public class NetSearchMessagingDUnitTest extends CacheTestCase {
           for (String key: keys) {
             RegionEntry re = region.getRegionEntry(key);
             Object o = re.getValueInVM(null);
-            getLogWriter().info("key " + key + "=" + o);
+            LogWriterUtils.getLogWriter().info("key " + key + "=" + o);
             assertTrue("expected key " + key + " to not be evicted",
                 (o != null) && (o != Token.NOT_AVAILABLE));
           }
@@ -333,9 +336,9 @@ public class NetSearchMessagingDUnitTest extends CacheTestCase {
       public Object call() {
         Cache cache = getCache();
         Region region = cache.getRegion("region");
-        getLogWriter().info("putting key="+key+"="+value);
+        LogWriterUtils.getLogWriter().info("putting key="+key+"="+value);
         Object result = region.put(key, value);
-        getLogWriter().info("done putting key="+key);
+        LogWriterUtils.getLogWriter().info("done putting key="+key);
         return result;
       }
     });
@@ -353,7 +356,7 @@ public class NetSearchMessagingDUnitTest extends CacheTestCase {
   }
   
   private void waitForReceivedMessages(final VM vm, final long expected) {
-    waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       
       @Override
       public boolean done() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java
index 4482533..708b983 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionDUnitTest.java
@@ -25,9 +25,13 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceType;
 import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Performs eviction dunit tests for off-heap memory.
@@ -39,7 +43,7 @@ public class OffHeapEvictionDUnitTest extends EvictionDUnitTest {
   }  
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -49,12 +53,8 @@ public class OffHeapEvictionDUnitTest extends EvictionDUnitTest {
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override
@@ -74,15 +74,15 @@ public class OffHeapEvictionDUnitTest extends EvictionDUnitTest {
       ds = getSystem(getDistributedSystemProperties());
       cache = CacheFactory.create(ds);
       assertNotNull(cache);
-      getLogWriter().info("cache= " + cache);
-      getLogWriter().info("cache closed= " + cache.isClosed());
+      LogWriterUtils.getLogWriter().info("cache= " + cache);
+      LogWriterUtils.getLogWriter().info("cache closed= " + cache.isClosed());
       cache.getResourceManager().setEvictionOffHeapPercentage(85);
       ((GemFireCacheImpl) cache).getResourceManager().getOffHeapMonitor().stopMonitoring(true);
-      getLogWriter().info("eviction= "+cache.getResourceManager().getEvictionOffHeapPercentage());
-      getLogWriter().info("critical= "+cache.getResourceManager().getCriticalOffHeapPercentage());
+      LogWriterUtils.getLogWriter().info("eviction= "+cache.getResourceManager().getEvictionOffHeapPercentage());
+      LogWriterUtils.getLogWriter().info("critical= "+cache.getResourceManager().getCriticalOffHeapPercentage());
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      Assert.fail("Failed while creating the cache", e);
     }
   }
 
@@ -115,7 +115,7 @@ public class OffHeapEvictionDUnitTest extends EvictionDUnitTest {
             .getEvictions();
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+        Wait.waitForCriterion(wc, 60000, 1000, true);
       }
     });
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionStatsDUnitTest.java
index a2d0555..f574e99 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapEvictionStatsDUnitTest.java
@@ -21,6 +21,9 @@ import java.util.Properties;
 import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -36,7 +39,7 @@ public class OffHeapEvictionStatsDUnitTest extends EvictionStatsDUnitTest {
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -46,12 +49,8 @@ public class OffHeapEvictionStatsDUnitTest extends EvictionStatsDUnitTest {
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override
@@ -71,14 +70,14 @@ public class OffHeapEvictionStatsDUnitTest extends EvictionStatsDUnitTest {
       ds = getSystem(getDistributedSystemProperties());
       cache = CacheFactory.create(ds);
       assertNotNull(cache);
-      getLogWriter().info("cache= " + cache);
-      getLogWriter().info("cache closed= " + cache.isClosed());
+      LogWriterUtils.getLogWriter().info("cache= " + cache);
+      LogWriterUtils.getLogWriter().info("cache closed= " + cache.isClosed());
       cache.getResourceManager().setEvictionOffHeapPercentage(20);
-      getLogWriter().info("eviction= "+cache.getResourceManager().getEvictionOffHeapPercentage());
-      getLogWriter().info("critical= "+cache.getResourceManager().getCriticalOffHeapPercentage());
+      LogWriterUtils.getLogWriter().info("eviction= "+cache.getResourceManager().getEvictionOffHeapPercentage());
+      LogWriterUtils.getLogWriter().info("critical= "+cache.getResourceManager().getCriticalOffHeapPercentage());
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      Assert.fail("Failed while creating the cache", e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
index 948c7f8..cbf3bf6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OffHeapTestUtil.java
@@ -30,7 +30,7 @@ import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl;
 @SuppressWarnings("deprecation")
 public class OffHeapTestUtil {
 
-  public static void checkOrphans() {
+  public static void checkOrphans() { // TODO:KIRK: need to do something special to guarantee proper tearDown
     SimpleMemoryAllocatorImpl allocator = null;
     try {
       allocator = SimpleMemoryAllocatorImpl.getAllocator();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
index 9daf69f..1d9213e 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/OplogJUnitTest.java
@@ -57,8 +57,9 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
 import com.gemstone.gemfire.internal.InternalDataSerializer;
 import com.gemstone.gemfire.internal.cache.Oplog.OPLOG_TYPE;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 import com.jayway.awaitility.Awaitility;
 
@@ -1677,7 +1678,7 @@ public class OplogJUnitTest extends DiskRegionTestingBase
             });
             assertNull(conflated);
             th.start();
-            DistributedTestCase.join(th, 30 * 1000, null);
+            ThreadUtils.join(th, 30 * 1000);
             LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
 
           }
@@ -2093,7 +2094,7 @@ public class OplogJUnitTest extends DiskRegionTestingBase
       finally {
         ((LocalRegion)region).getDiskRegion().releaseWriteLock();
       }
-      DistributedTestCase.join(th, 30 * 1000, null);
+      ThreadUtils.join(th, 30 * 1000);
       region.close();
       region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,
           diskProps, Scope.LOCAL);
@@ -2710,16 +2711,16 @@ public class OplogJUnitTest extends DiskRegionTestingBase
 
     assertEquals(0, dss.getQueueSize());
     put100Int();
-    DistributedTestCase.waitForCriterion(evFull, 2 * 1000, 200, true);
+    Wait.waitForCriterion(evFull, 2 * 1000, 200, true);
     assertEquals(0, dss.getFlushes());
     region.writeToDisk();
-    DistributedTestCase.waitForCriterion(ev, 2 * 1000, 200, true);
-    DistributedTestCase.waitForCriterion(ev2, 1000, 200, true);
+    Wait.waitForCriterion(ev, 2 * 1000, 200, true);
+    Wait.waitForCriterion(ev2, 1000, 200, true);
     put100Int();
-    DistributedTestCase.waitForCriterion(evFull, 2 * 1000, 200, true);
+    Wait.waitForCriterion(evFull, 2 * 1000, 200, true);
     region.writeToDisk();
-    DistributedTestCase.waitForCriterion(ev, 2 * 1000, 200, true);
-    DistributedTestCase.waitForCriterion(ev3, 1000, 200, true);
+    Wait.waitForCriterion(ev, 2 * 1000, 200, true);
+    Wait.waitForCriterion(ev3, 1000, 200, true);
     closeDown();
   }
 
@@ -3169,7 +3170,7 @@ public class OplogJUnitTest extends DiskRegionTestingBase
         }
       });
       try {
-        DistributedTestCase.join(clearOp, 30 * 1000, null);
+        ThreadUtils.join(clearOp, 30 * 1000);
       }
       catch (Exception e) {
         testFailed = true;
@@ -3236,7 +3237,7 @@ public class OplogJUnitTest extends DiskRegionTestingBase
           });
           clearTh.start();
           try {
-            DistributedTestCase.join(clearTh, 120 * 1000, null);
+            ThreadUtils.join(clearTh, 120 * 1000);
             failure = clearTh.isAlive();
             failureCause = "Clear Thread still running !";
           } catch(Exception e) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/P2PDeltaPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/P2PDeltaPropagationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/P2PDeltaPropagationDUnitTest.java
index 508347d..3f814d4 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/P2PDeltaPropagationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/P2PDeltaPropagationDUnitTest.java
@@ -510,14 +510,12 @@ public class P2PDeltaPropagationDUnitTest extends DistributedTestCase
     }
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
     server1.invoke(P2PDeltaPropagationDUnitTest.class, "closeCache");
     server2.invoke(P2PDeltaPropagationDUnitTest.class, "closeCache");
     server3.invoke(P2PDeltaPropagationDUnitTest.class, "closeCache");
-        
   }
 
   public static void closeCache()

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PRBadToDataDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PRBadToDataDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PRBadToDataDUnitTest.java
index f1e7988..8249d2b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PRBadToDataDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PRBadToDataDUnitTest.java
@@ -24,6 +24,7 @@ import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.PartitionAttributesFactory;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
@@ -76,7 +77,7 @@ public class PRBadToDataDUnitTest extends CacheTestCase {
           assertNotNull(pr);
         }
         catch (final CacheException ex) {
-          fail("While creating Partitioned region", ex);
+          Assert.fail("While creating Partitioned region", ex);
         }
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionAPIDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionAPIDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionAPIDUnitTest.java
index bb34cc1..466dfa4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionAPIDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionAPIDUnitTest.java
@@ -47,6 +47,7 @@ import com.gemstone.gemfire.distributed.internal.ReplyException;
 
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -171,7 +172,7 @@ public class PartitionedRegionAPIDUnitTest extends
 			RegionAttributes regionAttribs = attr.create();
 			cache.createRegion("PR1",
 					regionAttribs);
-			getLogWriter().info("Region created in VM1.");
+			LogWriterUtils.getLogWriter().info("Region created in VM1.");
 		}
 	};
 
@@ -191,12 +192,12 @@ public class PartitionedRegionAPIDUnitTest extends
     for (int b = 0; b < numBucks; b++) {
       if (par.getBucketKeys(b).contains(key)) {
         foundIt = true;
-        getLogWriter().severe("Key " + key + " found in bucket " + b);
+        LogWriterUtils.getLogWriter().severe("Key " + key + " found in bucket " + b);
         break;
       }
     }
     if (!foundIt) {
-      getLogWriter().severe("Key " + key + " not found in any bucket");
+      LogWriterUtils.getLogWriter().severe("Key " + key + " not found in any bucket");
     }
     return foundIt;
   }
@@ -347,8 +348,8 @@ public class PartitionedRegionAPIDUnitTest extends
 						"<ExpectedException action=remove>"
 								+ entryNotFoundException
 								+ "</ExpectedException>");
-				getLogWriter().fine("Out of doPutOperations1");
-				getLogWriter().fine("All the puts done successfully for vm0.");
+				LogWriterUtils.getLogWriter().fine("Out of doPutOperations1");
+				LogWriterUtils.getLogWriter().fine("All the puts done successfully for vm0.");
 			}
 		});
 
@@ -506,8 +507,8 @@ public class PartitionedRegionAPIDUnitTest extends
 								+ entryNotFoundException
 								+ "</ExpectedException>");
 
-				getLogWriter().fine("Out of doPutOperations2");
-				getLogWriter().fine("All the puts done successfully for vm1.");
+				LogWriterUtils.getLogWriter().fine("Out of doPutOperations2");
+				LogWriterUtils.getLogWriter().fine("All the puts done successfully for vm1.");
 			}
 		});
 	}
@@ -654,7 +655,7 @@ public class PartitionedRegionAPIDUnitTest extends
 					size = pr.size();
 					assertEquals("Size doesnt return expected value", size, 10);
 //				}
-				getLogWriter().fine(
+				LogWriterUtils.getLogWriter().fine(
 						"All the puts done successfully for vm0.");
                                 
                                 
@@ -795,7 +796,7 @@ public class PartitionedRegionAPIDUnitTest extends
 								"<ExpectedException action=remove>"
 										+ entryNotFoundException
 										+ "</ExpectedException>");
-				getLogWriter()
+				LogWriterUtils.getLogWriter()
 						.fine("All the remove done successfully for vm0.");
 			}
 		});
@@ -1018,7 +1019,7 @@ public class PartitionedRegionAPIDUnitTest extends
 						assertTrue("containsKey() Validation failed for key = "
 								+ i, conKey);
 					}
-					getLogWriter().fine(
+					LogWriterUtils.getLogWriter().fine(
 							"containsKey() Validated entry for key = " + i);
 				}
 
@@ -1036,7 +1037,7 @@ public class PartitionedRegionAPIDUnitTest extends
 								"containsValueForKey() Validation failed for key = "
 										+ i, conKey);
 					}
-					getLogWriter().fine(
+					LogWriterUtils.getLogWriter().fine(
 							"containsValueForKey() Validated entry for key = "
 									+ i);
 				}


[38/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/SearchAndLoadDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/SearchAndLoadDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/SearchAndLoadDUnitTest.java
index ace3e4b..01aa205 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/SearchAndLoadDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/SearchAndLoadDUnitTest.java
@@ -33,9 +33,13 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.RegionEvent;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.TimeoutException;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -72,7 +76,8 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
     super(name);
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
     for (int h = 0; h < Host.getHostCount(); h++) {
       Host host = Host.getHost(h);
       for (int v = 0; v < host.getVMCount(); v++) {
@@ -81,12 +86,9 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
               cleanup();
             }
           });
-          // already called in every VM in super.tearDown
-//        host.getVM(v).invoke(this.getClass(), "remoteTearDown");
       }
     }
     cleanup();
-    super.tearDown2();
   }
 
   /**
@@ -130,7 +132,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
           region.create(objectName,null);
         }
         catch (CacheException ex) {
-          fail("While creating ACK region", ex);
+          Assert.fail("While creating ACK region", ex);
         }
       }
     });
@@ -148,7 +150,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
         }
         catch (CacheException ex) {
-          fail("While creating ACK region", ex);
+          Assert.fail("While creating ACK region", ex);
         }
       }
     });
@@ -166,7 +168,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
         }
         catch (CacheException ex) {
-          fail("While creating ACK region", ex);
+          Assert.fail("While creating ACK region", ex);
         }
       }
     });
@@ -181,10 +183,10 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 //         System.err.println("Results is " + result.toString() + " Key is " + objectName.toString());
         }
         catch(CacheLoaderException cle) {
-          fail("While Get a value", cle);
+          Assert.fail("While Get a value", cle);
         }
         catch(TimeoutException te) {
-          fail("While Get a value", te);
+          Assert.fail("While Get a value", te);
         }
       }
 
@@ -259,7 +261,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
         Region region = createRegion(name,factory.create());
         region.create(objectName, null);
-        addExpectedException(exceptionString);
+        IgnoredException.addIgnoredException(exceptionString);
       }
     });
 
@@ -269,9 +271,9 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
         public void run2() {
           Region region = getCache().getRegion("root/"+name);
   
-          getLogWriter().info("t1 is invoking get("+objectName+")");
+          LogWriterUtils.getLogWriter().info("t1 is invoking get("+objectName+")");
           try {
-            getLogWriter().info("t1 retrieved value " + region.get(objectName));
+            LogWriterUtils.getLogWriter().info("t1 retrieved value " + region.get(objectName));
             fail("first load should have triggered an exception");
           } catch (RuntimeException e) {
             if (!e.getMessage().contains(exceptionString)) {
@@ -286,7 +288,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
           final Object[] valueHolder = new Object[1];
   
           // wait for vm1 to cause the loader to be invoked
-          getLogWriter().info("t2 is waiting for loader to be invoked by t1");
+          LogWriterUtils.getLogWriter().info("t2 is waiting for loader to be invoked by t1");
           try {
             loaderInvokedLatch.await(30, TimeUnit.SECONDS);
           } catch (InterruptedException e) {
@@ -325,7 +327,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
             fail("get() operation blocked for too long - test needs some work");
           }
           
-          getLogWriter().info("t2 is invoking get("+objectName+")");
+          LogWriterUtils.getLogWriter().info("t2 is invoking get("+objectName+")");
           Object value = valueHolder[0];
           if (value instanceof RuntimeException) {
             if ( ((Exception)value).getMessage().contains(exceptionString) ) {
@@ -334,7 +336,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
               throw (RuntimeException)value;
             }
           } else {
-            getLogWriter().info("t2 retrieved value " + value);
+            LogWriterUtils.getLogWriter().info("t2 retrieved value " + value);
             assertNotNull(value);
           }
         }
@@ -378,10 +380,10 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
           assertNull(result);
         }
         catch(CacheLoaderException cle) {
-          fail("While getting value for ACK region", cle);
+          Assert.fail("While getting value for ACK region", cle);
         }
         catch(TimeoutException te) {
-          fail("While getting value for ACK region", te);
+          Assert.fail("While getting value for ACK region", te);
         }
 
       }
@@ -392,7 +394,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
   public void testNetLoad()
   throws CacheException, InterruptedException {
-    invokeInEveryVM(DistributedTestCase.class,
+    Invoke.invokeInEveryVM(DistributedTestCase.class,
         "disconnectFromDS");
 
     Host host = Host.getHost(0);
@@ -426,7 +428,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
         }
         catch (CacheException ex) {
-          fail("While creating ACK region", ex);
+          Assert.fail("While creating ACK region", ex);
         }
       }
     });
@@ -449,7 +451,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
           createRegion(name,factory.create());
         }
         catch (CacheException ex) {
-          fail("While creating ACK region", ex);
+          Assert.fail("While creating ACK region", ex);
         }
       }
     });
@@ -464,7 +466,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
           }
           catch(CacheLoaderException cle) {
-            fail("While getting value for ACK region", cle);
+            Assert.fail("While getting value for ACK region", cle);
 
           }
 /*        catch(EntryNotFoundException enfe) {
@@ -472,7 +474,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
           }*/
           catch(TimeoutException te) {
-            fail("While getting value for ACK region", te);
+            Assert.fail("While getting value for ACK region", te);
 
           }
         }
@@ -487,7 +489,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
    */
   public void testEmptyNetLoad()
   throws CacheException, InterruptedException {
-    invokeInEveryVM(DistributedTestCase.class,
+    Invoke.invokeInEveryVM(DistributedTestCase.class,
         "disconnectFromDS");
 
     Host host = Host.getHost(0);
@@ -522,7 +524,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
           region.create(objectName,null);
         }
         catch (CacheException ex) {
-          fail("While creating ACK region", ex);
+          Assert.fail("While creating ACK region", ex);
         }
       }
     });
@@ -549,7 +551,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
             createRegion(name,factory.create());
           }
           catch (CacheException ex) {
-            fail("While creating ACK region", ex);
+            Assert.fail("While creating ACK region", ex);
           }
         }
       };
@@ -566,7 +568,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
           }
           catch(CacheLoaderException cle) {
-            fail("While getting value for ACK region", cle);
+            Assert.fail("While getting value for ACK region", cle);
 
           }
 /*        catch(EntryNotFoundException enfe) {
@@ -574,7 +576,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
           }*/
           catch(TimeoutException te) {
-            fail("While getting value for ACK region", te);
+            Assert.fail("While getting value for ACK region", te);
 
           }
         }
@@ -640,7 +642,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
         }
         catch (CacheException ex) {
-          fail("While creating ACK region", ex);
+          Assert.fail("While creating ACK region", ex);
         }
       }
     });
@@ -665,7 +667,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
           createRegion(name,factory.create());
         }
         catch (CacheException ex) {
-          fail("While creating ACK region", ex);
+          Assert.fail("While creating ACK region", ex);
         }
       }
     });
@@ -731,7 +733,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
         }
         catch (CacheException ex) {
-          fail("While creating ACK region", ex);
+          Assert.fail("While creating ACK region", ex);
         }
       }
     });
@@ -746,7 +748,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
             createRegion(name,factory.create());
           }
           catch (CacheException ex) {
-            fail("While creating ACK region", ex);
+            Assert.fail("While creating ACK region", ex);
           }
         }
     });
@@ -828,7 +830,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
         }
         catch (CacheException ex) {
-          fail("While creating replicated region", ex);
+          Assert.fail("While creating replicated region", ex);
         }
       }
     });
@@ -841,7 +843,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
             createRegion(name,factory.create());
           }
           catch (CacheException ex) {
-            fail("While creating empty region", ex);
+            Assert.fail("While creating empty region", ex);
           }
         }
     });
@@ -910,7 +912,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
           createRegion(name,factory.create());
         }
         catch (CacheException ex) {
-          fail("While creating empty region", ex);
+          Assert.fail("While creating empty region", ex);
         }
       }
     });
@@ -923,7 +925,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
             createRegion(name,factory.create());
           }
           catch (CacheException ex) {
-            fail("While creating empty region", ex);
+            Assert.fail("While creating empty region", ex);
           }
         }
     });
@@ -964,7 +966,7 @@ public class SearchAndLoadDUnitTest extends CacheTestCase {
 
         }
         catch (CacheException ex) {
-          fail("While creating replicated region", ex);
+          Assert.fail("While creating replicated region", ex);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/SlowRecDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/SlowRecDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/SlowRecDUnitTest.java
index 4d5cac2..899fbb1 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/SlowRecDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/SlowRecDUnitTest.java
@@ -39,10 +39,13 @@ import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.distributed.internal.DM;
 import com.gemstone.gemfire.distributed.internal.DMStats;
 import com.gemstone.gemfire.internal.tcp.Connection;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 /**
@@ -71,12 +74,10 @@ public class SlowRecDUnitTest extends CacheTestCase {
       super.setUp();
     }
   }
-  public void tearDown2() throws Exception {
-    try {
-      super.tearDown2();
-    } finally {
-      disconnectAllFromDS();
-    }
+  
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
+    disconnectAllFromDS();
   }
   
   //////////////////////  Test Methods  //////////////////////
@@ -164,7 +165,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
                 return "waiting for callback";
               }
             };
-            DistributedTestCase.waitForCriterion(ev, 50 * 1000, 200, true);
+            Wait.waitForCriterion(ev, 50 * 1000, 200, true);
             assertEquals(lcb, lastCallback);
           }
           if (lastValue == null) {
@@ -177,7 +178,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
                 return "waiting for key to become null";
               }
             };
-            DistributedTestCase.waitForCriterion(ev, 50 * 1000, 200, true);
+            Wait.waitForCriterion(ev, 50 * 1000, 200, true);
             assertEquals(null, r1.getEntry("key"));
           } else if (CHECK_INVALID.equals(lastValue)) {
             // should be invalid
@@ -195,7 +196,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
                   return "waiting for invalidate";
                 }
               };
-              DistributedTestCase.waitForCriterion(ev, 50 * 1000, 200, true);
+              Wait.waitForCriterion(ev, 50 * 1000, 200, true);
 //              assertNotNull(re);
 //              assertEquals(null, value);
             }
@@ -234,7 +235,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
         return "Waiting for async threads to disappear";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 10 * 1000, 200, true);
   }
   
   private void forceQueuing(final Region r) throws CacheException {
@@ -251,7 +252,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
         return "waiting for flushes to start";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 2 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 2 * 1000, 200, true);
   }
   
   /**
@@ -294,7 +295,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
           dequeuedMsgs = stats.getAsyncDequeuedMsgs();
           curQueuedMsgs = queuedMsgs - dequeuedMsgs;
         }
-        getLogWriter().info("After " + count + " " + " puts slowrec mode kicked in by queuing " + queuedMsgs + " for a total size of " + queueSize);
+        LogWriterUtils.getLogWriter().info("After " + count + " " + " puts slowrec mode kicked in by queuing " + queuedMsgs + " for a total size of " + queueSize);
       } finally {
         forceQueueFlush();
       }
@@ -307,9 +308,9 @@ public class SlowRecDUnitTest extends CacheTestCase {
         }
       };
       final long start = System.currentTimeMillis();
-      DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 30 * 1000, 200, true);
       final long finish = System.currentTimeMillis();
-      getLogWriter().info("After " + (finish - start) + " ms async msgs where flushed. A total of " + stats.getAsyncDequeuedMsgs() + " were flushed. lastValue=" + lastValue);
+      LogWriterUtils.getLogWriter().info("After " + (finish - start) + " ms async msgs where flushed. A total of " + stats.getAsyncDequeuedMsgs() + " were flushed. lastValue=" + lastValue);
     
       checkLastValueInOtherVm(lastValue, null);
     }
@@ -392,7 +393,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
 //                           + "  dequeuedMsgs=" + dequeuedMsgs
 //                           + " conflatedMsgs=" + conflatedMsgs);
     final long finish = System.currentTimeMillis();
-    getLogWriter().info("After " + (finish - start) + " ms async msgs where flushed. A total of " + (stats.getAsyncDequeuedMsgs()-intialDeQueuedMsgs) + " were flushed. Leaving a queue size of " + stats.getAsyncQueueSize() + ". The lastValue was " + lastValue);
+    LogWriterUtils.getLogWriter().info("After " + (finish - start) + " ms async msgs where flushed. A total of " + (stats.getAsyncDequeuedMsgs()-intialDeQueuedMsgs) + " were flushed. Leaving a queue size of " + stats.getAsyncQueueSize() + ". The lastValue was " + lastValue);
     
     checkLastValueInOtherVm(lastValue, null);
   }
@@ -436,8 +437,8 @@ public class SlowRecDUnitTest extends CacheTestCase {
       // give threads a chance to get queued
       try {Thread.sleep(100);} catch (InterruptedException ignore) {fail("interrupted");}
       forceQueueFlush();
-      DistributedTestCase.join(t, 2 * 1000, getLogWriter());
-      DistributedTestCase.join(t2, 2 * 1000, getLogWriter());
+      ThreadUtils.join(t, 2 * 1000);
+      ThreadUtils.join(t2, 2 * 1000);
       long endQueuedMsgs = stats.getAsyncQueuedMsgs();
       long endConflatedMsgs = stats.getAsyncConflatedMsgs();
       assertEquals(startConflatedMsgs, endConflatedMsgs);
@@ -480,7 +481,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
 
     // now make sure update+destroy does not conflate
     final Object key = "key";      
-    getLogWriter().info("[testConflationSequence] about to force queuing");
+    LogWriterUtils.getLogWriter().info("[testConflationSequence] about to force queuing");
     forceQueuing(r);
 
     int count = 0;
@@ -492,7 +493,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
 //    long dequeuedMsgs = stats.getAsyncDequeuedMsgs();
     int endCount = count+60;
 
-    getLogWriter().info("[testConflationSequence] about to build up queue");
+    LogWriterUtils.getLogWriter().info("[testConflationSequence] about to build up queue");
     long begin = System.currentTimeMillis();
     while (count < endCount) {
       value = "count=" + count;
@@ -515,14 +516,14 @@ public class SlowRecDUnitTest extends CacheTestCase {
     checkLastValueInOtherVm(lastValue, mylcb);
 
     // now make sure create+update+localDestroy does not conflate
-    getLogWriter().info("[testConflationSequence] force queuing create-update-destroy");
+    LogWriterUtils.getLogWriter().info("[testConflationSequence] force queuing create-update-destroy");
     forceQueuing(r);
     initialConflatedMsgs = stats.getAsyncConflatedMsgs();
 //    initialDequeuedMsgs = stats.getAsyncDequeuedMsgs();
 //    dequeuedMsgs = stats.getAsyncDequeuedMsgs();
     endCount = count + 40;
     
-    getLogWriter().info("[testConflationSequence] create-update-destroy");
+    LogWriterUtils.getLogWriter().info("[testConflationSequence] create-update-destroy");
     begin = System.currentTimeMillis();
     while (count < endCount) {
       value = "count=" + count;
@@ -542,7 +543,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
     checkLastValueInOtherVm(lastValue, null);
 
     // now make sure update+invalidate does not conflate
-    getLogWriter().info("[testConflationSequence] force queuing update-invalidate");
+    LogWriterUtils.getLogWriter().info("[testConflationSequence] force queuing update-invalidate");
     forceQueuing(r);
     initialConflatedMsgs = stats.getAsyncConflatedMsgs();
 //    initialDequeuedMsgs = stats.getAsyncDequeuedMsgs();
@@ -553,7 +554,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
 //    dequeuedMsgs = stats.getAsyncDequeuedMsgs();
     endCount = count + 40;
 
-    getLogWriter().info("[testConflationSequence] update-invalidate");
+    LogWriterUtils.getLogWriter().info("[testConflationSequence] update-invalidate");
     begin = System.currentTimeMillis();
     while (count < endCount) {
       value = "count=" + count;
@@ -568,14 +569,14 @@ public class SlowRecDUnitTest extends CacheTestCase {
     }
     assertEquals(initialConflatedMsgs, stats.getAsyncConflatedMsgs());
     forceQueueFlush();
-    getLogWriter().info("[testConflationSequence] assert other vm");
+    LogWriterUtils.getLogWriter().info("[testConflationSequence] assert other vm");
     checkLastValueInOtherVm(lastValue, null);
 
     r.destroy(key);
 
     // now make sure updates to a conflating region are conflated even while
     // updates to a non-conflating are not.
-    getLogWriter().info("[testConflationSequence] conflate & no-conflate regions");
+    LogWriterUtils.getLogWriter().info("[testConflationSequence] conflate & no-conflate regions");
     forceQueuing(r);
     final int initialAsyncSocketWrites = stats.getAsyncSocketWrites();
 //    initialDequeuedMsgs = stats.getAsyncDequeuedMsgs();
@@ -605,7 +606,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
     endCount = count + 80;
 
     begin = System.currentTimeMillis();
-    getLogWriter().info("[testConflationSequence:DEBUG] count=" + count
+    LogWriterUtils.getLogWriter().info("[testConflationSequence:DEBUG] count=" + count
                         + " queuedMsgs=" + stats.getAsyncQueuedMsgs()
                         + " conflatedMsgs=" + stats.getAsyncConflatedMsgs()
                         + " dequeuedMsgs=" + stats.getAsyncDequeuedMsgs()
@@ -636,7 +637,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
     }
 
     forceQueueFlush();
-    getLogWriter().info("[testConflationSequence] assert other vm");
+    LogWriterUtils.getLogWriter().info("[testConflationSequence] assert other vm");
     checkLastValueInOtherVm(lastValue, null);
   }
   /**
@@ -687,7 +688,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
           fail("should have exceeded max-queue-size by now");
         }
       }
-      getLogWriter().info("After " + count + " " + VALUE_SIZE + " byte puts slowrec mode kicked in but the queue filled when its size reached " + queueSize + " with " + queuedMsgs + " msgs");
+      LogWriterUtils.getLogWriter().info("After " + count + " " + VALUE_SIZE + " byte puts slowrec mode kicked in but the queue filled when its size reached " + queueSize + " with " + queuedMsgs + " msgs");
       // make sure we lost a connection to vm0
       WaitCriterion ev = new WaitCriterion() {
         public boolean done() {
@@ -698,7 +699,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
           return "waiting for connection loss";
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 30 * 1000, 200, true);
     }
     finally {
       forceQueueFlush();
@@ -759,7 +760,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
           fail("should have exceeded async-queue-timeout by now");
         }
       }
-      getLogWriter().info("After " + count + " " + VALUE_SIZE + " byte puts slowrec mode kicked in but the queue filled when its size reached " + queueSize + " with " + queuedMsgs + " msgs");
+      LogWriterUtils.getLogWriter().info("After " + count + " " + VALUE_SIZE + " byte puts slowrec mode kicked in but the queue filled when its size reached " + queueSize + " with " + queuedMsgs + " msgs");
       // make sure we lost a connection to vm0
       WaitCriterion ev = new WaitCriterion() {
         public boolean done() {
@@ -772,7 +773,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
           return "waiting for departure";
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 2 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 2 * 1000, 200, true);
     }
     finally {
       getCache().getLogger().info(removeExpected);
@@ -817,7 +818,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
     public final Object CONTROL_LOCK = new Object();
     
     public void afterCreate(EntryEvent event) {
-      getLogWriter().info(event.getRegion().getName() + " afterCreate " + event.getKey());
+      LogWriterUtils.getLogWriter().info(event.getRegion().getName() + " afterCreate " + event.getKey());
       synchronized(this.CONTROL_LOCK) {
         if (event.getCallbackArgument() != null) {
           this.callbackArguments.add(
@@ -829,7 +830,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
       processEvent(event);
     }
     public void afterUpdate(EntryEvent event) {
-      getLogWriter().info(event.getRegion().getName() + " afterUpdate " + event.getKey());
+      LogWriterUtils.getLogWriter().info(event.getRegion().getName() + " afterUpdate " + event.getKey());
       synchronized(this.CONTROL_LOCK) {
         if (event.getCallbackArgument() != null) {
           this.callbackArguments.add(
@@ -883,14 +884,14 @@ public class SlowRecDUnitTest extends CacheTestCase {
     }
     private void processSleep(EntryEvent event) {
       int sleepMs = ((Integer)event.getNewValue()).intValue();
-      getLogWriter().info("[processSleep] sleeping for " + sleepMs);
+      LogWriterUtils.getLogWriter().info("[processSleep] sleeping for " + sleepMs);
       try {
         Thread.sleep(sleepMs);
       } catch (InterruptedException ignore) {fail("interrupted");}
     }
     private void processWait(EntryEvent event) {
       int sleepMs = ((Integer)event.getNewValue()).intValue();
-      getLogWriter().info("[processWait] waiting for " + sleepMs);
+      LogWriterUtils.getLogWriter().info("[processWait] waiting for " + sleepMs);
       synchronized(this.CONTROL_LOCK) {
         try {
           this.CONTROL_LOCK.wait(sleepMs);
@@ -898,7 +899,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
       }
     }
     private void processDisconnect(EntryEvent event) {
-      getLogWriter().info("[processDisconnect] disconnecting");
+      LogWriterUtils.getLogWriter().info("[processDisconnect] disconnecting");
       disconnectFromDS();
     }
   };
@@ -919,7 +920,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
       throw e;
     }
     catch (Throwable t) {
-      getLogWriter().error("Encountered exception: ", t);
+      LogWriterUtils.getLogWriter().error("Encountered exception: ", t);
       throw t;
     }
     finally {
@@ -988,11 +989,11 @@ public class SlowRecDUnitTest extends CacheTestCase {
     });
     
     // put vm0 cache listener into wait
-    getLogWriter().info("[doTestMultipleRegionConflation] about to put vm0 into wait");
+    LogWriterUtils.getLogWriter().info("[doTestMultipleRegionConflation] about to put vm0 into wait");
     r1.put(KEY_WAIT, new Integer(millisToWait));
 
     // build up queue size
-    getLogWriter().info("[doTestMultipleRegionConflation] building up queue size...");
+    LogWriterUtils.getLogWriter().info("[doTestMultipleRegionConflation] building up queue size...");
     final Object key = "key";
     final int socketBufferSize = getSystem().getConfig().getSocketBufferSize();
     final int VALUE_SIZE = socketBufferSize*3;
@@ -1005,7 +1006,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
       r1.put(key, value);
     }
     
-    getLogWriter().info("[doTestMultipleRegionConflation] After " + 
+    LogWriterUtils.getLogWriter().info("[doTestMultipleRegionConflation] After " + 
       count + " puts of size " + VALUE_SIZE + 
       " slowrec mode kicked in with queue size=" + stats.getAsyncQueueSize());
 
@@ -1063,7 +1064,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
         CALLBACK_DESTROY, CALLBACK_CREATE, CALLBACK_UPDATE }; 
 
     // send notify to vm0
-    getLogWriter().info("[doTestMultipleRegionConflation] wake up vm0");
+    LogWriterUtils.getLogWriter().info("[doTestMultipleRegionConflation] wake up vm0");
     getOtherVm().invoke(new SerializableRunnable("Wake up other vm") {
       public void run() {
         synchronized(doTestMultipleRegionConflation_R1_Listener.CONTROL_LOCK) {
@@ -1073,7 +1074,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
     });
     
     // wait for queue to be flushed
-    getLogWriter().info("[doTestMultipleRegionConflation] wait for vm0");
+    LogWriterUtils.getLogWriter().info("[doTestMultipleRegionConflation] wait for vm0");
     getOtherVm().invoke(new SerializableRunnable("Wait for other vm") {
       public void run() {
         try {
@@ -1092,12 +1093,12 @@ public class SlowRecDUnitTest extends CacheTestCase {
     });
     
     // assert values on both listeners
-    getLogWriter().info("[doTestMultipleRegionConflation] assert callback arguments");
+    LogWriterUtils.getLogWriter().info("[doTestMultipleRegionConflation] assert callback arguments");
     getOtherVm().invoke(new SerializableRunnable("Assert callback arguments") {
       public void run() {
         synchronized(doTestMultipleRegionConflation_R1_Listener.CONTROL_LOCK) {
-          getLogWriter().info("doTestMultipleRegionConflation_R1_Listener.callbackArguments=" + doTestMultipleRegionConflation_R1_Listener.callbackArguments);
-          getLogWriter().info("doTestMultipleRegionConflation_R1_Listener.callbackTypes=" + doTestMultipleRegionConflation_R1_Listener.callbackTypes);
+          LogWriterUtils.getLogWriter().info("doTestMultipleRegionConflation_R1_Listener.callbackArguments=" + doTestMultipleRegionConflation_R1_Listener.callbackArguments);
+          LogWriterUtils.getLogWriter().info("doTestMultipleRegionConflation_R1_Listener.callbackTypes=" + doTestMultipleRegionConflation_R1_Listener.callbackTypes);
           assertEquals(doTestMultipleRegionConflation_R1_Listener.callbackArguments.size(),
                        doTestMultipleRegionConflation_R1_Listener.callbackTypes.size());
           int i = 0;
@@ -1111,8 +1112,8 @@ public class SlowRecDUnitTest extends CacheTestCase {
           }
         }
         synchronized(doTestMultipleRegionConflation_R2_Listener.CONTROL_LOCK) {
-          getLogWriter().info("doTestMultipleRegionConflation_R2_Listener.callbackArguments=" + doTestMultipleRegionConflation_R2_Listener.callbackArguments);
-          getLogWriter().info("doTestMultipleRegionConflation_R2_Listener.callbackTypes=" + doTestMultipleRegionConflation_R2_Listener.callbackTypes);
+          LogWriterUtils.getLogWriter().info("doTestMultipleRegionConflation_R2_Listener.callbackArguments=" + doTestMultipleRegionConflation_R2_Listener.callbackArguments);
+          LogWriterUtils.getLogWriter().info("doTestMultipleRegionConflation_R2_Listener.callbackTypes=" + doTestMultipleRegionConflation_R2_Listener.callbackTypes);
           assertEquals(doTestMultipleRegionConflation_R2_Listener.callbackArguments.size(),
                        doTestMultipleRegionConflation_R2_Listener.callbackTypes.size());
           int i = 0;
@@ -1141,7 +1142,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
       throw e;
     }
     catch (Throwable t) {
-      getLogWriter().error("Encountered exception: ", t);
+      LogWriterUtils.getLogWriter().error("Encountered exception: ", t);
       throw t;
     }
     finally {
@@ -1187,13 +1188,13 @@ public class SlowRecDUnitTest extends CacheTestCase {
     });
 
     // put vm0 cache listener into wait
-    getLogWriter().info("[testDisconnectCleanup] about to put vm0 into wait");
+    LogWriterUtils.getLogWriter().info("[testDisconnectCleanup] about to put vm0 into wait");
     int millisToWait = 1000 * 60 * 5; // 5 minutes
     r.put(KEY_WAIT, new Integer(millisToWait));
     r.put(KEY_DISCONNECT, KEY_DISCONNECT);
 
     // build up queue size
-    getLogWriter().info("[testDisconnectCleanup] building up queue size...");
+    LogWriterUtils.getLogWriter().info("[testDisconnectCleanup] building up queue size...");
     final Object key = "key";
     final int socketBufferSize = getSystem().getConfig().getSocketBufferSize();
     final int VALUE_SIZE = socketBufferSize*3;
@@ -1208,7 +1209,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
       assertFalse(System.currentTimeMillis() >= abortMillis);
     }
     
-    getLogWriter().info("[testDisconnectCleanup] After " + 
+    LogWriterUtils.getLogWriter().info("[testDisconnectCleanup] After " + 
       count + " puts of size " + VALUE_SIZE + 
       " slowrec mode kicked in with queue size=" + stats.getAsyncQueueSize());
 
@@ -1221,11 +1222,11 @@ public class SlowRecDUnitTest extends CacheTestCase {
     assertTrue(stats.getAsyncQueuedMsgs() >= 10);
 
     while (stats.getAsyncQueues() < 1) {
-      pause(100);
+      Wait.pause(100);
       assertFalse(System.currentTimeMillis() >= abortMillis);
     }
     
-    getLogWriter().info("[testDisconnectCleanup] After " + 
+    LogWriterUtils.getLogWriter().info("[testDisconnectCleanup] After " + 
       count + " puts of size " + VALUE_SIZE + " queue size has reached " + 
       stats.getAsyncQueueSize() + " bytes and number of queues is " + 
       stats.getAsyncQueues() + ".");
@@ -1237,7 +1238,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
     assertTrue(dm.getOtherDistributionManagerIds().size() > others.size());
     
     // send notify to vm0
-    getLogWriter().info("[testDisconnectCleanup] wake up vm0");
+    LogWriterUtils.getLogWriter().info("[testDisconnectCleanup] wake up vm0");
     getOtherVm().invoke(new SerializableRunnable("Wake up other vm") {
       public void run() {
         synchronized(doTestDisconnectCleanup_Listener.CONTROL_LOCK) {
@@ -1247,7 +1248,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
     });
     
     // make sure we lost a connection to vm0
-    getLogWriter().info("[testDisconnectCleanup] wait for vm0 to disconnect");
+    LogWriterUtils.getLogWriter().info("[testDisconnectCleanup] wait for vm0 to disconnect");
     WaitCriterion ev = new WaitCriterion() {
       public boolean done() {
         return dm.getOtherDistributionManagerIds().size() <= others.size();
@@ -1256,11 +1257,11 @@ public class SlowRecDUnitTest extends CacheTestCase {
         return "waiting for disconnect";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 2 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 2 * 1000, 200, true);
     assertEquals(others, dm.getOtherDistributionManagerIds());
     
     // check free memory... perform wait loop with System.gc
-    getLogWriter().info("[testDisconnectCleanup] wait for queue cleanup");
+    LogWriterUtils.getLogWriter().info("[testDisconnectCleanup] wait for queue cleanup");
     ev = new WaitCriterion() {
       public boolean done() {
         if (stats.getAsyncQueues() <= initialQueues) {
@@ -1273,7 +1274,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
         return "waiting for queue cleanup";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 2 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 2 * 1000, 200, true);
 //    getLogWriter().info("[testDisconnectCleanup] initialQueues=" + 
 //      initialQueues + " asyncQueues=" + stats.getAsyncQueues());
     assertEquals(initialQueues, stats.getAsyncQueues());
@@ -1295,7 +1296,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
       throw e;
     }
     catch (Throwable t) {
-      getLogWriter().error("Encountered exception: ", t);
+      LogWriterUtils.getLogWriter().error("Encountered exception: ", t);
       throw t;
     }
     finally {
@@ -1343,12 +1344,12 @@ public class SlowRecDUnitTest extends CacheTestCase {
     });
 
     // put vm0 cache listener into wait
-    getLogWriter().info("[testPartialMessage] about to put vm0 into wait");
+    LogWriterUtils.getLogWriter().info("[testPartialMessage] about to put vm0 into wait");
     final int millisToWait = 1000 * 60 * 5; // 5 minutes
     r.put(KEY_WAIT, new Integer(millisToWait));
 
     // build up queue size
-    getLogWriter().info("[testPartialMessage] building up queue size...");
+    LogWriterUtils.getLogWriter().info("[testPartialMessage] building up queue size...");
     final Object key = "key";
     final int socketBufferSize = getSystem().getConfig().getSocketBufferSize();
     final int VALUE_SIZE = socketBufferSize*3;
@@ -1364,11 +1365,11 @@ public class SlowRecDUnitTest extends CacheTestCase {
     final int partialId = count;
     assertEquals(0, stats.getAsyncConflatedMsgs());
     
-    getLogWriter().info("[testPartialMessage] After " + 
+    LogWriterUtils.getLogWriter().info("[testPartialMessage] After " + 
       count + " puts of size " + VALUE_SIZE + 
       " slowrec mode kicked in with queue size=" + stats.getAsyncQueueSize());
 
-    pause(2000);
+    Wait.pause(2000);
       
     // conflate 10 times
     while (stats.getAsyncConflatedMsgs() < 10) {
@@ -1393,7 +1394,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
     final int[] expectedArgs = { partialId, conflateId };
 
     // send notify to vm0
-    getLogWriter().info("[testPartialMessage] wake up vm0");
+    LogWriterUtils.getLogWriter().info("[testPartialMessage] wake up vm0");
     getOtherVm().invoke(new SerializableRunnable("Wake up other vm") {
       public void run() {
         synchronized(doTestPartialMessage_Listener.CONTROL_LOCK) {
@@ -1403,7 +1404,7 @@ public class SlowRecDUnitTest extends CacheTestCase {
     });
     
     // wait for queue to be flushed
-    getLogWriter().info("[testPartialMessage] wait for vm0");
+    LogWriterUtils.getLogWriter().info("[testPartialMessage] wait for vm0");
     getOtherVm().invoke(new SerializableRunnable("Wait for other vm") {
       public void run() {
         try {
@@ -1429,11 +1430,11 @@ public class SlowRecDUnitTest extends CacheTestCase {
     });
     
     // assert values on both listeners
-    getLogWriter().info("[testPartialMessage] assert callback arguments");
+    LogWriterUtils.getLogWriter().info("[testPartialMessage] assert callback arguments");
     getOtherVm().invoke(new SerializableRunnable("Assert callback arguments") {
       public void run() {
         synchronized(doTestPartialMessage_Listener.CONTROL_LOCK) {
-          getLogWriter().info("[testPartialMessage] " +
+          LogWriterUtils.getLogWriter().info("[testPartialMessage] " +
               "doTestPartialMessage_Listener.callbackArguments=" + 
               doTestPartialMessage_Listener.callbackArguments);
               

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXDistributedDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXDistributedDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXDistributedDUnitTest.java
index 2c065a1..7a306f0 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXDistributedDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXDistributedDUnitTest.java
@@ -79,11 +79,17 @@ import com.gemstone.gemfire.internal.cache.TXStateProxyImpl;
 import com.gemstone.gemfire.internal.cache.locks.TXLockBatch;
 import com.gemstone.gemfire.internal.cache.locks.TXLockService;
 import com.gemstone.gemfire.internal.cache.locks.TXLockServiceImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class TXDistributedDUnitTest extends CacheTestCase {
   public TXDistributedDUnitTest(String name) {
@@ -107,19 +113,19 @@ public class TXDistributedDUnitTest extends CacheTestCase {
    * Test a remote grantor
    */
   public void testRemoteGrantor() throws Exception {
-    addExpectedException("killing members ds");
+    IgnoredException.addIgnoredException("killing members ds");
     final CacheTransactionManager txMgr = this.getCache().getCacheTransactionManager();
     final String rgnName = getUniqueName();
     Region rgn = getCache().createRegion(rgnName, getRegionAttributes());
     rgn.create("key", null);
 
-    invokeInEveryVM(new SerializableRunnable("testRemoteGrantor: initial configuration") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("testRemoteGrantor: initial configuration") {
         public void run() {
           try {
             Region rgn1 = getCache().createRegion(rgnName, getRegionAttributes());
             rgn1.put("key", "val0");
           } catch (CacheException e) {
-            fail("While creating region", e);
+            Assert.fail("While creating region", e);
           }
         }
       });
@@ -188,11 +194,11 @@ public class TXDistributedDUnitTest extends CacheTestCase {
               assertTrue(!TXLockService.getDTLS().isLockGrantor());
             }
           } catch (CacheException e) {
-            fail("While creating region", e);
+            Assert.fail("While creating region", e);
           }
         }
       };
-    invokeInEveryVM(remoteComm);
+    Invoke.invokeInEveryVM(remoteComm);
     // vm1.invoke(remoteComm);
     // vm2.invoke(remoteComm);
 
@@ -227,7 +233,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
             rgn2.create("key", null);
             rgn3.create("key", null);
           } catch (CacheException e) {
-            fail("While creating region", e);
+            Assert.fail("While creating region", e);
           }
         }
       };
@@ -461,7 +467,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
       });
     Region rgn = getCache().createRegion(rgnName, factory.create());
     
-    invokeInEveryVM(new SerializableRunnable("testDACKLoadedMessage: intial configuration") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("testDACKLoadedMessage: intial configuration") {
         public void run() {
           try {
             AttributesFactory factory2 = new AttributesFactory();
@@ -471,7 +477,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
             factory2.setMirrorType(MirrorType.KEYS);
             getCache().createRegion(rgnName, factory2.create());
           } catch (CacheException e) {
-            fail("While creating region", e);
+            Assert.fail("While creating region", e);
           }
         }
       });
@@ -482,7 +488,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
     txMgr.commit();
     assertEquals("val1", rgn.getEntry("key1").getValue());
 
-    invokeInEveryVM(new SerializableRunnable("testDACKLoadedMessage: confirm standard case") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("testDACKLoadedMessage: confirm standard case") {
         public void run() {
           Region rgn1 = getCache().getRegion(rgnName);
           assertEquals("val1", rgn1.getEntry("key1").getValue());
@@ -495,7 +501,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
     txMgr.commit();
     assertEquals("val2", rgn.getEntry("key2").getValue());
     
-    invokeInEveryVM(new SerializableRunnable("testDACKLoadedMessage: confirm standard case") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("testDACKLoadedMessage: confirm standard case") {
         public void run() {
           Region rgn1 = getCache().getRegion(rgnName);
           assertEquals("val2", rgn1.getEntry("key2").getValue());
@@ -508,7 +514,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
     rgn.get("key4", new Integer(4));
     txMgr.commit();
 
-    invokeInEveryVM(new SerializableRunnable("testDACKLoadedMessage: confirm standard case") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("testDACKLoadedMessage: confirm standard case") {
         public void run() {
           Region rgn1 = getCache().getRegion(rgnName);
           assertEquals("val3", rgn1.getEntry("key3").getValue());
@@ -521,12 +527,12 @@ public class TXDistributedDUnitTest extends CacheTestCase {
   @Override
   public Properties getDistributedSystemProperties() {
     Properties p = super.getDistributedSystemProperties();
-    p.put("log-level", getDUnitLogLevel());
+    p.put("log-level", LogWriterUtils.getDUnitLogLevel());
     return p;
   }
 
   public void testHighAvailabilityFeatures() throws Exception {
-    addExpectedException("DistributedSystemDisconnectedException");
+    IgnoredException.addIgnoredException("DistributedSystemDisconnectedException");
 //    final CacheTransactionManager txMgr = this.getCache().getCacheTransactionManager();
 //    final TXManagerImpl txMgrImpl = (TXManagerImpl) txMgr;
     final String rgnName = getUniqueName();
@@ -534,7 +540,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setEarlyAck(false);
     Region rgn = getCache().createRegion(rgnName, factory.create());
-    invokeInEveryVM(new SerializableRunnable("testHighAvailabilityFeatures: intial region configuration") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("testHighAvailabilityFeatures: intial region configuration") {
         public void run() {
           try {
             AttributesFactory factory2 = new AttributesFactory();
@@ -543,7 +549,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
             factory2.setDataPolicy(DataPolicy.REPLICATE);
             getCache().createRegion(rgnName, factory2.create());
           } catch (CacheException e) {
-            fail("While creating region", e);
+            Assert.fail("While creating region", e);
           }
         }
       });
@@ -615,7 +621,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
               factory2.setDataPolicy(DataPolicy.REPLICATE);
               rgn1 = getCache().createRegion(rgnName, factory2.create());
             } catch (CacheException e) {
-              fail("While creating region", e);
+              Assert.fail("While creating region", e);
             }
           }
           Region.Entry re = rgn1.getEntry("key0");
@@ -626,7 +632,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
           assertEquals("val1_0", re.getValue());
         }
       };
-    invokeInEveryVM(noChangeValidator);
+    Invoke.invokeInEveryVM(noChangeValidator);
 
     // Test that there is no commit after sending to all recipients
     // but prior to sending the "commit process" message
@@ -667,7 +673,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
         }
       });
     // 3. verify on all VMs, including the origin, that the transaction was not committed
-    invokeInEveryVM(noChangeValidator);
+    Invoke.invokeInEveryVM(noChangeValidator);
 
     // Test commit success upon a single commit process message received.
     originVM.invoke(new SerializableRunnable("Flakey DuringIndividualCommitProcess Transaction") {
@@ -727,7 +733,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
               factory2.setDataPolicy(DataPolicy.REPLICATE);
               rgn1 = getCache().createRegion(rgnName, factory2.create());
             } catch (CacheException e) {
-              fail("While creating region", e);
+              Assert.fail("While creating region", e);
             }
           }
           long giveUp = System.currentTimeMillis() + 10000;
@@ -748,7 +754,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
           }
         }
       };
-    invokeInEveryVM(nonSoloChangeValidator1);
+    Invoke.invokeInEveryVM(nonSoloChangeValidator1);
 
     // Verify successful solo region commit after duringIndividualSend
     // (same as afterIndividualSend).
@@ -766,7 +772,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
             rgn1.put("soloKey0", "soloVal0_0");
             rgn1.put("soloKey1", "soloVal1_0");
           } catch (CacheException e) {
-            fail("While creating region", e);
+            Assert.fail("While creating region", e);
           }
         }
       };
@@ -829,7 +835,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
               factory2.setDataPolicy(DataPolicy.REPLICATE);
               soloRgn = getCache().createRegion(soloRegionName, factory2.create());
             } catch (CacheException e) {
-              fail("While creating region ", e);
+              Assert.fail("While creating region ", e);
             }
           }
           Region.Entry re = soloRgn.getEntry("soloKey0");
@@ -843,7 +849,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
     originVM.invoke(soloRegionCommitValidator1);
     soloRegionVM.invoke(soloRegionCommitValidator1);
     // verify no change in nonSolo region, re-establish region in originVM
-    invokeInEveryVM(nonSoloChangeValidator1);
+    Invoke.invokeInEveryVM(nonSoloChangeValidator1);
 
     // Verify no commit for failed send (afterIndividualSend) for solo
     // Region combined with non-solo Region
@@ -893,7 +899,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
     // Origin and Solo Region VM should be the same as last validation
     originVM.invoke(soloRegionCommitValidator1);
     soloRegionVM.invoke(soloRegionCommitValidator1);
-    invokeInEveryVM(nonSoloChangeValidator1);
+    Invoke.invokeInEveryVM(nonSoloChangeValidator1);
 
     // Verify commit after sending a single
     // (duringIndividualCommitProcess) commit process for solo Region
@@ -959,7 +965,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
               factory2.setDataPolicy(DataPolicy.REPLICATE);
               soloRgn = getCache().createRegion(soloRegionName, factory2.create());
             } catch (CacheException e) {
-              fail("While creating region ", e);
+              Assert.fail("While creating region ", e);
             }
           }
           Region.Entry re = soloRgn.getEntry("soloKey0");
@@ -985,7 +991,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
               factory2.setDataPolicy(DataPolicy.REPLICATE);
               rgn1 = getCache().createRegion(rgnName, factory2.create());
             } catch (CacheException e) {
-              fail("While creating region", e);
+              Assert.fail("While creating region", e);
             }
           }
           Region.Entry re = rgn1.getEntry("key0");
@@ -996,7 +1002,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
           assertEquals("val1_5", re.getValue());
         }
       };
-    invokeInEveryVM(nonSoloChangeValidator2);
+    Invoke.invokeInEveryVM(nonSoloChangeValidator2);
   }
   
   /** 
@@ -1110,7 +1116,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
           Region rgn1 = getCache().createRegion(rgnName, getRegionAttributes());
           rgn1.create("key", null);
         } catch (CacheException e) {
-          fail("While creating region", e);
+          Assert.fail("While creating region", e);
         }
       }
     };
@@ -1217,7 +1223,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
           factory.setDataPolicy(DataPolicy.REPLICATE);
           getCache().createRegion(rgnName, factory.create());
         } catch (CacheException e) {
-          fail("While creating region", e);
+          Assert.fail("While creating region", e);
         }
       }
     });
@@ -1409,9 +1415,9 @@ public class TXDistributedDUnitTest extends CacheTestCase {
         }
       }
     };
-    ExpectedException ee = null;
+    IgnoredException ee = null;
     try {
-      ee = addExpectedException(DiskAccessException.class.getName() + "|" +
+      ee = IgnoredException.addIgnoredException(DiskAccessException.class.getName() + "|" +
           CommitIncompleteException.class.getName() + "|" +
           CommitReplyException.class.getName());
       origin.invoke(doTransaction);
@@ -1442,7 +1448,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
       @Override
       public void run2() {
         final Cache c = getCache();
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           @Override
           public boolean done() {
             return c.getRegion(rgnName1) == null;
@@ -1508,7 +1514,7 @@ public class TXDistributedDUnitTest extends CacheTestCase {
     };
     origin.invoke(assertNoContent);
     } finally {
-      invokeInEveryVM(new SerializableCallable() {
+      Invoke.invokeInEveryVM(new SerializableCallable() {
         @Override
         public Object call() throws Exception {
           TXManagerImpl.ALLOW_PERSISTENT_TRANSACTIONS = false;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXOrderDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXOrderDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXOrderDUnitTest.java
index 6f42483..a253f09 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXOrderDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXOrderDUnitTest.java
@@ -52,6 +52,7 @@ import com.gemstone.gemfire.cache.util.TransactionListenerAdapter;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -186,7 +187,7 @@ public class TXOrderDUnitTest extends CacheTestCase {
         af.addCacheListener(cl1);
         CacheLoader cl = new CacheLoader() {
           public Object load(LoaderHelper helper) throws CacheLoaderException {
-            getLogWriter().info("Loading value:"+helper.getKey()+"_value");
+            LogWriterUtils.getLogWriter().info("Loading value:"+helper.getKey()+"_value");
             return helper.getKey()+"_value";
           }
           public void close() {
@@ -205,7 +206,7 @@ public class TXOrderDUnitTest extends CacheTestCase {
         af.setScope(Scope.DISTRIBUTED_ACK);
         CacheListener cl1 = new CacheListenerAdapter() {
           public void afterCreate(EntryEvent e) {
-            getLogWriter().info("op:"+e.getOperation().toString());
+            LogWriterUtils.getLogWriter().info("op:"+e.getOperation().toString());
             assertTrue(!e.getOperation().isLocalLoad());
           }
         };
@@ -399,7 +400,7 @@ public class TXOrderDUnitTest extends CacheTestCase {
     
     SerializableCallable createRegion = new SerializableCallable() {
       public Object call() throws Exception {
-        getCache().createRegionFactory(RegionShortcut.REPLICATE).create(testName);
+        getCache().createRegionFactory(RegionShortcut.REPLICATE).create(getTestMethodName());
         return null;
       }
     };
@@ -409,7 +410,7 @@ public class TXOrderDUnitTest extends CacheTestCase {
     
     vm1.invoke(new SerializableCallable() {
       public Object call() throws Exception {
-        Region r = getCache().getRegion(testName);
+        Region r = getCache().getRegion(getTestMethodName());
         r.put("ikey", "value");
         getCache().getCacheTransactionManager().begin();
         r.put("key1", new byte[20]);
@@ -421,7 +422,7 @@ public class TXOrderDUnitTest extends CacheTestCase {
     
     vm2.invoke(new SerializableCallable() {
       public Object call() throws Exception {
-        Region r = getCache().getRegion(testName);
+        Region r = getCache().getRegion(getTestMethodName());
         Object v = r.get("key1");
         assertNotNull(v);
         assertTrue(v instanceof byte[]);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXRestrictionsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXRestrictionsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXRestrictionsDUnitTest.java
index 14c0bca..d1ec415 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXRestrictionsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TXRestrictionsDUnitTest.java
@@ -37,6 +37,8 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.internal.OSProcess;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 public class TXRestrictionsDUnitTest extends CacheTestCase {
@@ -72,13 +74,13 @@ public class TXRestrictionsDUnitTest extends CacheTestCase {
     final CacheTransactionManager txMgr = this.getCache().getCacheTransactionManager();
     final String misConfigRegionName = getUniqueName();
     Region misConfigRgn = getCache().createRegion(misConfigRegionName, getDiskRegionAttributes());
-    invokeInEveryVM(new SerializableRunnable("testPersistentRestriction: Illegal Region Configuration") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("testPersistentRestriction: Illegal Region Configuration") {
         public void run() {
           try {
             getCache().createRegion(misConfigRegionName, getDiskRegionAttributes());
             // rgn1.put("misConfigKey", "oldmisConfigVal");
           } catch (CacheException e) {
-            fail("While creating region", e);
+            Assert.fail("While creating region", e);
           }
         }
       });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TestCacheCallback.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TestCacheCallback.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TestCacheCallback.java
index 118ebca..b28089a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TestCacheCallback.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/TestCacheCallback.java
@@ -17,8 +17,8 @@
 package com.gemstone.gemfire.cache30;
 
 import com.gemstone.gemfire.cache.*;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * An abstract superclass of implementation of GemFire cache callbacks
@@ -69,7 +69,7 @@ public abstract class TestCacheCallback implements CacheCallback {
           return "listener was never invoked";
         }
       };
-      DistributedTestCase.waitForCriterion(ev, timeoutMs, interval, true);
+      Wait.waitForCriterion(ev, timeoutMs, interval, true);
     }
     return wasInvoked();
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/DistributedMemberDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/DistributedMemberDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/DistributedMemberDUnitTest.java
index 263ccf9..3dc6d75 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/DistributedMemberDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/DistributedMemberDUnitTest.java
@@ -31,6 +31,7 @@ import com.gemstone.gemfire.distributed.internal.DM;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
@@ -407,7 +408,7 @@ public class DistributedMemberDUnitTest extends DistributedTestCase {
           assertTrue("Expected" + expected + " got " + members, members.containsAll(expected));
           assertEquals(4, members.size());
         } catch (UnknownHostException e) {
-          fail("Unable to get IpAddress", e);
+          Assert.fail("Unable to get IpAddress", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/DistributedSystemDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/DistributedSystemDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/DistributedSystemDUnitTest.java
index 3975133..fcaaa2d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/DistributedSystemDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/DistributedSystemDUnitTest.java
@@ -45,7 +45,9 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.SocketCreator;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -256,7 +258,7 @@ public class DistributedSystemDUnitTest extends DistributedTestCase {
   public void testSpecificTcpPort() throws Exception {
     Properties config = new Properties();
     int tcpPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    config.put("locators", "localhost["+getDUnitLocatorPort()+"]");
+    config.put("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     config.setProperty("tcp-port", String.valueOf(tcpPort));
     system = (InternalDistributedSystem)DistributedSystem.connect(config);
     DistributionManager dm = (DistributionManager)system.getDistributionManager();
@@ -284,10 +286,10 @@ public class DistributedSystemDUnitTest extends DistributedTestCase {
     if (loopback != null) {
       Properties config = new Properties();
       config.put(DistributionConfig.MCAST_PORT_NAME, "0");
-      String locators = InetAddress.getLocalHost().getHostName()+":"+getDUnitLocatorPort();
+      String locators = InetAddress.getLocalHost().getHostName()+":"+DistributedTestUtils.getDUnitLocatorPort();
       config.put(DistributionConfig.LOCATORS_NAME, locators);
       config.setProperty(DistributionConfig.BIND_ADDRESS_NAME, loopback.getHostAddress());
-      getLogWriter().info("attempting to connect with " + loopback +" and locators=" + locators);
+      LogWriterUtils.getLogWriter().info("attempting to connect with " + loopback +" and locators=" + locators);
       try {
         system = (InternalDistributedSystem)DistributedSystem.connect(config);
         system.disconnect();
@@ -303,7 +305,7 @@ public class DistributedSystemDUnitTest extends DistributedTestCase {
   public void testUDPPortRange() throws Exception {
     Properties config = new Properties();
     int unicastPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    config.put("locators", "localhost["+getDUnitLocatorPort()+"]");
+    config.put("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     // Minimum 3 ports required in range for UDP, FD_SOCK and TcpConduit.
     config.setProperty(DistributionConfig.MEMBERSHIP_PORT_RANGE_NAME, 
         ""+unicastPort+"-"+(unicastPort+2)); 
@@ -317,7 +319,7 @@ public class DistributedSystemDUnitTest extends DistributedTestCase {
 
   public void testMembershipPortRangeWithExactThreeValues() throws Exception {
     Properties config = new Properties();
-    config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+    config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     config.setProperty(DistributionConfig.MEMBERSHIP_PORT_RANGE_NAME, ""
         + (DistributionConfig.DEFAULT_MEMBERSHIP_PORT_RANGE[1] - 2) + "-"
         + (DistributionConfig.DEFAULT_MEMBERSHIP_PORT_RANGE[1]));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/HostedLocatorsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/HostedLocatorsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/HostedLocatorsDUnitTest.java
index af241c1..fe2202f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/HostedLocatorsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/HostedLocatorsDUnitTest.java
@@ -55,7 +55,8 @@ public class HostedLocatorsDUnitTest extends DistributedTestCase {
     disconnectAllFromDS();
   }
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     disconnectAllFromDS();
   }
   


[47/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/ResourceManagerWithQueryMonitorDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/ResourceManagerWithQueryMonitorDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/ResourceManagerWithQueryMonitorDUnitTest.java
index 372111c..85ae9aa 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/ResourceManagerWithQueryMonitorDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/ResourceManagerWithQueryMonitorDUnitTest.java
@@ -66,9 +66,13 @@ import com.gemstone.gemfire.internal.cache.control.ResourceListener;
 import com.gemstone.gemfire.internal.cache.control.TestMemoryThresholdListener;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 public class ResourceManagerWithQueryMonitorDUnitTest extends ClientServerTestCase {
@@ -84,16 +88,15 @@ public class ResourceManagerWithQueryMonitorDUnitTest extends ClientServerTestCa
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(this.setHeapMemoryMonitorTestMode);
-    addExpectedException("above heap critical threshold");
-    addExpectedException("below heap critical threshold");
+    Invoke.invokeInEveryVM(this.setHeapMemoryMonitorTestMode);
+    IgnoredException.addIgnoredException("above heap critical threshold");
+    IgnoredException.addIgnoredException("below heap critical threshold");
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(resetQueryMonitor);
-    invokeInEveryVM(resetResourceManager);
-    super.tearDown2();
+  protected void preTearDownClientServerTestCase() throws Exception {
+    Invoke.invokeInEveryVM(resetQueryMonitor);
+    Invoke.invokeInEveryVM(resetResourceManager);
   }
 
   private SerializableCallable setHeapMemoryMonitorTestMode = new SerializableCallable() {
@@ -723,7 +726,7 @@ public class ResourceManagerWithQueryMonitorDUnitTest extends ClientServerTestCa
     //unless otherwise configured
     releaseHook(server);
     
-    DistributedTestCase.join(queryExecution, 60000, getLogWriter());
+    ThreadUtils.join(queryExecution, 60000);
     //Make sure no exceptions were thrown during query testing
     try {
       assertEquals(0, queryExecution.getResult());
@@ -1020,7 +1023,7 @@ public class ResourceManagerWithQueryMonitorDUnitTest extends ClientServerTestCa
         getSystem(props);
         
         final ClientCacheFactory ccf = new ClientCacheFactory(props);
-        ccf.addPoolServer(getServerHostName(server.getHost()), port);
+        ccf.addPoolServer(NetworkUtils.getServerHostName(server.getHost()), port);
         ClientCache cache = (ClientCache)getClientCache(ccf);
       }
     });
@@ -1035,7 +1038,7 @@ public class ResourceManagerWithQueryMonitorDUnitTest extends ClientServerTestCa
         getSystem(props);
         
         PoolFactory pf = PoolManager.createFactory();
-        pf.addServer(getServerHostName(server.getHost()), port);
+        pf.addServer(NetworkUtils.getServerHostName(server.getHost()), port);
         pf.create("pool1");
         
         AttributesFactory af = new AttributesFactory();
@@ -1057,7 +1060,7 @@ public class ResourceManagerWithQueryMonitorDUnitTest extends ClientServerTestCa
 
   protected Properties getServerProperties(boolean disableQueryMonitorForMemory, int queryTimeout) {
     Properties p = new Properties();
-    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+getDUnitLocatorPort()+"]");
+    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     return p;
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/SelectStarQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/SelectStarQueryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/SelectStarQueryDUnitTest.java
index b943aca..90ee7f7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/SelectStarQueryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/SelectStarQueryDUnitTest.java
@@ -40,7 +40,10 @@ import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.VMCachedDeserializable;
 import com.gemstone.gemfire.pdx.PdxInstance;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -116,9 +119,9 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(server1.getHost()), port1);
-        cf.addPoolServer(getServerHostName(server2.getHost()), port2);
-        cf.addPoolServer(getServerHostName(server3.getHost()), port3);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server2.getHost()), port2);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server3.getHost()), port3);
         ClientCache cache = getClientCache(cf);
         cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
             .create(regName);
@@ -142,14 +145,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
     client.invoke(new SerializableCallable("Query") {
       @Override
       public Object call() throws Exception {
-        getLogWriter().info("Querying remotely from client");
+        LogWriterUtils.getLogWriter().info("Querying remotely from client");
         QueryService localQS = null;
         QueryService remoteQS = null;
         try {
           localQS = ((ClientCache) getCache()).getLocalQueryService();
           remoteQS = ((ClientCache) getCache()).getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults[][] sr = new SelectResults[1][2];
         SelectResults res = null;
@@ -161,7 +164,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
             sr[0][1] = res;
             CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -215,14 +218,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
         try {
           qs = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         for (int i = 0; i < queries.length; i++) {
           try {
             res = (SelectResults) qs.newQuery(queries[i]).execute();
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -288,7 +291,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(server1.getHost()), port1);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
         ClientCache cache = getClientCache(cf);
         cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
             .create(regName);
@@ -316,14 +319,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
     client.invoke(new SerializableCallable("Query") {
       @Override
       public Object call() throws Exception {
-        getLogWriter().info("Querying remotely from client");
+        LogWriterUtils.getLogWriter().info("Querying remotely from client");
         QueryService localQS = null;
         QueryService remoteQS = null;
         try {
           localQS = ((ClientCache) getCache()).getLocalQueryService();
           remoteQS = ((ClientCache) getCache()).getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         SelectResults[][] sr = new SelectResults[1][2];
@@ -338,7 +341,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
             sr[0][1] = res;
             CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
           } catch (Exception e) {
-            fail("Error executing query: " + multipleRegionQueries[i], e);
+            Assert.fail("Error executing query: " + multipleRegionQueries[i], e);
           }
           assertEquals(resultSize2[i], res.size());
           if (i == 4) {
@@ -395,7 +398,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
         try {
           qs = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         for (int i = 0; i < multipleRegionQueries.length; i++) {
@@ -403,7 +406,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
             res = (SelectResults) qs.newQuery(multipleRegionQueries[i])
                 .execute();
           } catch (Exception e) {
-            fail("Error executing query: " + multipleRegionQueries[i], e);
+            Assert.fail("Error executing query: " + multipleRegionQueries[i], e);
           }
           assertEquals(resultSize2[i], res.size());
           if (i == 4) {
@@ -488,7 +491,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(server1.getHost()), port);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port);
         ClientCache cache = getClientCache(cf);
         cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
             .create(regName);
@@ -512,14 +515,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
     client.invoke(new SerializableCallable("Query") {
       @Override
       public Object call() throws Exception {
-        getLogWriter().info("Querying remotely from client");
+        LogWriterUtils.getLogWriter().info("Querying remotely from client");
         QueryService localQS = null;
         QueryService remoteQS = null;
         try {
           localQS = ((ClientCache) getCache()).getLocalQueryService();
           remoteQS = ((ClientCache) getCache()).getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         SelectResults[][] sr = new SelectResults[1][2];
@@ -532,7 +535,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
             sr[0][1] = res;
             CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -583,14 +586,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
         try {
           qs = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         for (int i = 0; i < 6; i++) {
           try {
             res = (SelectResults) qs.newQuery(queries[i]).execute();
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -661,9 +664,9 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(server1.getHost()), port1);
-        cf.addPoolServer(getServerHostName(server2.getHost()), port2);
-        cf.addPoolServer(getServerHostName(server3.getHost()), port3);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server2.getHost()), port2);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server3.getHost()), port3);
         ClientCache cache = getClientCache(cf);
         cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
             .create(regName);
@@ -687,14 +690,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
     client.invoke(new SerializableCallable("Query") {
       @Override
       public Object call() throws Exception {
-        getLogWriter().info("Querying remotely from client");
+        LogWriterUtils.getLogWriter().info("Querying remotely from client");
         QueryService localQS = null;
         QueryService remoteQS = null;
         try {
           localQS = ((ClientCache) getCache()).getLocalQueryService();
           remoteQS = ((ClientCache) getCache()).getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         SelectResults[][] sr = new SelectResults[1][2];
@@ -707,7 +710,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
             sr[0][1] = res;
             CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -758,14 +761,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
         try {
           qs = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         for (int i = 0; i < 6; i++) {
           try {
             res = (SelectResults) qs.newQuery(queries[i]).execute();
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -817,7 +820,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(server1.getHost()), port1);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
         ClientCache cache = getClientCache(cf);
         cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
             .create(regName);
@@ -850,7 +853,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
           qs = getCache().getQueryService();
           qs.createIndex("status", "status", "/" + regName);
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
 
         return null;
@@ -861,14 +864,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
     client.invoke(new SerializableCallable("Query") {
       @Override
       public Object call() throws Exception {
-        getLogWriter().info("Querying remotely from client");
+        LogWriterUtils.getLogWriter().info("Querying remotely from client");
         QueryService localQS = null;
         QueryService remoteQS = null;
         try {
           localQS = ((ClientCache) getCache()).getLocalQueryService();
           remoteQS = ((ClientCache) getCache()).getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         SelectResults[][] sr = new SelectResults[1][2];
@@ -883,7 +886,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
             sr[0][1] = res;
             CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
           } catch (Exception e) {
-            fail("Error executing query: " + multipleRegionQueries[i], e);
+            Assert.fail("Error executing query: " + multipleRegionQueries[i], e);
           }
           assertEquals(resultSize2[i], res.size());
           if (i == 4) {
@@ -926,7 +929,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
           qs = getCache().getQueryService();
           qs.createIndex("status", "status", "/" + regName2);
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
 
         return null;
@@ -937,14 +940,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
     client.invoke(new SerializableCallable("Query") {
       @Override
       public Object call() throws Exception {
-        getLogWriter().info("Querying remotely from client");
+        LogWriterUtils.getLogWriter().info("Querying remotely from client");
         QueryService localQS = null;
         QueryService remoteQS = null;
         try {
           localQS = ((ClientCache) getCache()).getLocalQueryService();
           remoteQS = ((ClientCache) getCache()).getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         SelectResults[][] sr = new SelectResults[1][2];
@@ -959,7 +962,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
             sr[0][1] = res;
             CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
           } catch (Exception e) {
-            fail("Error executing query: " + multipleRegionQueries[i], e);
+            Assert.fail("Error executing query: " + multipleRegionQueries[i], e);
           }
           assertEquals(resultSize2[i], res.size());
           if (i == 4) {
@@ -1019,7 +1022,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(server1.getHost()), port1);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
         ClientCache cache = getClientCache(cf);
         cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
             .create(regName);
@@ -1043,14 +1046,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
     client.invoke(new SerializableCallable("Query") {
       @Override
       public Object call() throws Exception {
-        getLogWriter().info("Querying remotely from client");
+        LogWriterUtils.getLogWriter().info("Querying remotely from client");
         QueryService localQS = null;
         QueryService remoteQS = null;
         try {
           localQS = ((ClientCache) getCache()).getLocalQueryService();
           remoteQS = ((ClientCache) getCache()).getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         SelectResults[][] sr = new SelectResults[1][2];
@@ -1063,7 +1066,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
             sr[0][1] = res;
             CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -1118,14 +1121,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
         try {
           qs = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         for (int i = 0; i < queries.length; i++) {
           try {
             res = (SelectResults) qs.newQuery(queries[i]).execute();
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -1174,14 +1177,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
         try {
           qs = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         for (int i = 0; i < queries.length; i++) {
           try {
             res = (SelectResults) qs.newQuery(queries[i]).execute();
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -1240,7 +1243,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(server1.getHost()), port1);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
         ClientCache cache = getClientCache(cf);
         cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
             .create(regName);
@@ -1264,14 +1267,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
     client.invoke(new SerializableCallable("Query") {
       @Override
       public Object call() throws Exception {
-        getLogWriter().info("Querying remotely from client");
+        LogWriterUtils.getLogWriter().info("Querying remotely from client");
         QueryService localQS = null;
         QueryService remoteQS = null;
         try {
           localQS = ((ClientCache) getCache()).getLocalQueryService();
           remoteQS = ((ClientCache) getCache()).getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         SelectResults[][] sr = new SelectResults[1][2];
@@ -1284,7 +1287,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
             sr[0][1] = res;
             CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -1341,14 +1344,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
         try {
           qs = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         for (int i = 0; i < queries.length; i++) {
           try {
             res = (SelectResults) qs.newQuery(queries[i]).execute();
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -1397,14 +1400,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
         try {
           qs = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         for (int i = 0; i < queries.length; i++) {
           try {
             res = (SelectResults) qs.newQuery(queries[i]).execute();
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -1467,7 +1470,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(server1.getHost()), port);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port);
         ClientCache cache = getClientCache(cf);
         cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY)
             .create(regName);
@@ -1483,14 +1486,14 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
     client.invoke(new SerializableCallable("Query") {
       @Override
       public Object call() throws Exception {
-        getLogWriter().info("Querying remotely from client");
+        LogWriterUtils.getLogWriter().info("Querying remotely from client");
         QueryService localQS = null;
         QueryService remoteQS = null;
         try {
           localQS = ((ClientCache) getCache()).getLocalQueryService();
           remoteQS = ((ClientCache) getCache()).getQueryService();
         } catch (Exception e) {
-          fail("Exception getting query service ", e);
+          Assert.fail("Exception getting query service ", e);
         }
         SelectResults res = null;
         SelectResults[][] sr = new SelectResults[1][2];
@@ -1503,7 +1506,7 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
             sr[0][1] = res;
             CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
           } catch (Exception e) {
-            fail("Error executing query: " + queries[i], e);
+            Assert.fail("Error executing query: " + queries[i], e);
           }
           assertEquals(resultSize[i], res.size());
           if (i == 3) {
@@ -1602,10 +1605,10 @@ public class SelectStarQueryDUnitTest extends CacheTestCase {
     public void beforeIterationEvaluation(CompiledValue executer,
         Object currentObject) {
       if (currentObject instanceof VMCachedDeserializable) {
-        getLogWriter().fine("currentObject is serialized object");
+        LogWriterUtils.getLogWriter().fine("currentObject is serialized object");
         isObjectSerialized = true;
       } else {
-        getLogWriter().fine("currentObject is deserialized object");
+        LogWriterUtils.getLogWriter().fine("currentObject is deserialized object");
       }
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/IndexCreationDeadLockJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/IndexCreationDeadLockJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/IndexCreationDeadLockJUnitTest.java
index 37abacf..57846a8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/IndexCreationDeadLockJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/IndexCreationDeadLockJUnitTest.java
@@ -46,7 +46,7 @@ import com.gemstone.gemfire.cache.query.IndexType;
 import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -190,7 +190,7 @@ public class IndexCreationDeadLockJUnitTest
 
     Thread th = new IndexCreationDeadLockJUnitTest.PutThread("put thread");
     th.start();
-    DistributedTestCase.join(th, 60 * 1000, null);
+    ThreadUtils.join(th, 60 * 1000);
   }
 
   /**
@@ -294,7 +294,7 @@ public class IndexCreationDeadLockJUnitTest
         Thread indxCreationThread = new HelperThread("index creator thread");
         indxCreationThread.start();
         try {
-          DistributedTestCase.join(indxCreationThread, 30 * 1000, null);
+          ThreadUtils.join(indxCreationThread, 30 * 1000);
         }
         catch (Exception e) {
           e.printStackTrace();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/IndexMaintenanceAsynchJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/IndexMaintenanceAsynchJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/IndexMaintenanceAsynchJUnitTest.java
index 84612b5..ac3b8bb 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/IndexMaintenanceAsynchJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/IndexMaintenanceAsynchJUnitTest.java
@@ -45,8 +45,8 @@ import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.cache.query.internal.QueryObserverAdapter;
 import com.gemstone.gemfire.cache.query.internal.QueryObserverHolder;
 import com.gemstone.gemfire.cache.query.internal.index.IndexProtocol;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -137,7 +137,7 @@ public class IndexMaintenanceAsynchJUnitTest {
             return "index updates never became 8";
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 5000, 200, true);
+        Wait.waitForCriterion(ev, 5000, 200, true);
 
         //queryString= "SELECT DISTINCT * FROM /portfolios p, p.positions.values pos where pos.secId='IBM'";
         queryString= "SELECT DISTINCT * FROM /portfolios where status = 'active'";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/LikePredicateJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/LikePredicateJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/LikePredicateJUnitTest.java
index 3e4a570..0bfad5e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/LikePredicateJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/functional/LikePredicateJUnitTest.java
@@ -57,7 +57,7 @@ import com.gemstone.gemfire.cache.query.internal.ResultsCollectionWrapper;
 import com.gemstone.gemfire.cache.query.internal.index.IndexManager;
 import com.gemstone.gemfire.cache.query.internal.index.IndexManager.TestHook;
 import com.gemstone.gemfire.cache.query.internal.types.ObjectTypeImpl;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -2054,7 +2054,7 @@ public class LikePredicateJUnitTest {
     assertEquals(5, rs[0][0].size());
 
     // wait for remove to complete
-    DistributedTestCase.join(LikeQueryIndexTestHook.th, 60 * 1000, null);
+    ThreadUtils.join(LikeQueryIndexTestHook.th, 60 * 1000);
 
     // The index should have been removed by now
     assertEquals(0, cache.getQueryService().getIndexes().size());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/ExecutionContextJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/ExecutionContextJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/ExecutionContextJUnitTest.java
index 35b5995..41d6d62 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/ExecutionContextJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/ExecutionContextJUnitTest.java
@@ -44,7 +44,7 @@ import com.gemstone.gemfire.cache.query.SelectResults;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.cache.query.data.Position;
 import com.gemstone.gemfire.internal.cache.InternalCache;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -368,7 +368,7 @@ public class ExecutionContextJUnitTest {
     
     for (int i =0; i < th.length ;++i) {
       try {
-        DistributedTestCase.join(th[i], 30 * 1000, null);
+        ThreadUtils.join(th[i], 30 * 1000);
       }catch(Exception e) {
         fail(e.toString());
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/AsynchIndexMaintenanceJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/AsynchIndexMaintenanceJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/AsynchIndexMaintenanceJUnitTest.java
index 7ad5f66..eeaa36c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/AsynchIndexMaintenanceJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/AsynchIndexMaintenanceJUnitTest.java
@@ -28,8 +28,9 @@ import com.gemstone.gemfire.cache.query.CacheUtils;
 import com.gemstone.gemfire.cache.query.IndexType;
 import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 import java.util.HashSet;
@@ -113,7 +114,7 @@ public class AsynchIndexMaintenanceJUnitTest {
         return "valueToEntriesMap never became 50";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 3000, 200, true);
+    Wait.waitForCriterion(ev, 3000, 200, true);
   }
   
   @Test
@@ -140,7 +141,7 @@ public class AsynchIndexMaintenanceJUnitTest {
       }
     };
 
-    DistributedTestCase.waitForCriterion(evSize, 17 * 1000, 200, true);
+    Wait.waitForCriterion(evSize, 17 * 1000, 200, true);
     
     // clear region.
     region.clear();
@@ -153,7 +154,7 @@ public class AsynchIndexMaintenanceJUnitTest {
         return "valueToEntriesMap never became size :" + 0;
       }
     };
-    DistributedTestCase.waitForCriterion(evClear, 17 * 1000, 200, true);
+    Wait.waitForCriterion(evClear, 17 * 1000, 200, true);
     
     // Add to region.
     for( int i=0; i<size ; ++i) {
@@ -161,7 +162,7 @@ public class AsynchIndexMaintenanceJUnitTest {
       idSet.add((i+1) + "");
     }    
     //assertEquals(0, getIndexSize(ri));
-    DistributedTestCase.waitForCriterion(evSize, 17 * 1000, 200, true);
+    Wait.waitForCriterion(evSize, 17 * 1000, 200, true);
   }
   
   @Test
@@ -183,7 +184,7 @@ public class AsynchIndexMaintenanceJUnitTest {
         return "valueToEntries map never became size 3";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 10 * 1000, 200, true);
   }
   
   @Test
@@ -247,7 +248,7 @@ public class AsynchIndexMaintenanceJUnitTest {
     }
     try {
       for (int i = 0; i < TOTAL_THREADS; ++i) {
-        DistributedTestCase.join(threads[i], 30 * 1000, null);
+        ThreadUtils.join(threads[i], 30 * 1000);
       }
     }
     catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
index 3e69990..5528299 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexInitOnOverflowRegionDUnitTest.java
@@ -45,9 +45,13 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * @author shobhit
@@ -112,7 +116,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
           RegionFactory regionFactory = cache.createRegionFactory(attr.create());
           partitionRegion = regionFactory.create(name);
         } catch (IllegalStateException ex) {
-          getLogWriter().warning("Creation caught IllegalStateException", ex);
+          LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
         }
         assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
         assertNotNull("Region ref null", partitionRegion);
@@ -165,7 +169,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         Cache cache = PRQHelp.getCache();
 
         while (!hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         // Create and hence initialize Index
         try {
@@ -180,10 +184,8 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
     });
 
     // If we take more than 30 seconds then its a deadlock.
-    DistributedTestCase.join(asyncInv2, 30 * 1000, PRQHelp.getCache()
-        .getLogger());
-    DistributedTestCase.join(asyncInv1, 30 * 1000, PRQHelp.getCache()
-        .getLogger());
+    ThreadUtils.join(asyncInv2, 30 * 1000);
+    ThreadUtils.join(asyncInv1, 30 * 1000);
   }
 
   /**
@@ -194,8 +196,8 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
     
-    addExpectedException("Unexpected IOException:");
-    addExpectedException("java.net.SocketException");
+    IgnoredException.addIgnoredException("Unexpected IOException:");
+    IgnoredException.addIgnoredException("java.net.SocketException");
 
     name = "PartionedPortfoliosPR";
     // Create Overflow Persistent Partition Region
@@ -232,7 +234,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
           RegionFactory regionFactory = cache.createRegionFactory(attr.create());
           partitionRegion = regionFactory.create(name);
         } catch (IllegalStateException ex) {
-          getLogWriter().warning("Creation caught IllegalStateException", ex);
+          LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
         } catch (IOException e) {
           e.printStackTrace();
         }
@@ -254,7 +256,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
     
     final int port = vm0.invokeInt(ConcurrentIndexInitOnOverflowRegionDUnitTest.class,
     "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     // Start changing the value in Region which should turn into a deadlock if
     // the fix is not there
@@ -308,7 +310,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         Cache cache = PRQHelp.getCache();
 
         while (!hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         // Create Indexes
         try {
@@ -323,10 +325,8 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
     });
 
     // If we take more than 30 seconds then its a deadlock.
-    DistributedTestCase.join(asyncInv2, 30 * 1000, PRQHelp.getCache()
-        .getLogger());
-    DistributedTestCase.join(asyncInv1, 30 * 1000, PRQHelp.getCache()
-        .getLogger());
+    ThreadUtils.join(asyncInv2, 30 * 1000);
+    ThreadUtils.join(asyncInv1, 30 * 1000);
     
     vm0.invoke(new CacheSerializableRunnable("Set Test Hook") {
       
@@ -378,7 +378,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         case 6: // processAction in IndexManager
           hooked = true;
           //wait untill some thread unhooks.
-          while (hooked) { pause(20); }
+          while (hooked) { Wait.pause(20); }
           break;
         default:
           break;
@@ -397,7 +397,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         for (int i=0; i<100; i++) {
           if (i == 50) IndexManager.testHook = new LocalTestHook();
           region.put(i, new Portfolio(i));
-          if (i == 50) pause(20);
+          if (i == 50) Wait.pause(20);
         }
       }
     });
@@ -410,7 +410,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
         Region region = PRQHelp.getCache().getRegion(regionName);
         
         while(!hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         if (hooked) {
           hooked = false;
@@ -431,8 +431,7 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
     });
 
     // Kill asynch thread
-    DistributedTestCase.join(indexUpdateAsysnch, 20000, PRQHelp.getCache()
-        .getLogger());
+    ThreadUtils.join(indexUpdateAsysnch, 20000);
 
     //Verify region size which must be 50
     vm0.invoke(new CacheSerializableRunnable("Check region size") {
@@ -453,8 +452,8 @@ public class ConcurrentIndexInitOnOverflowRegionDUnitTest extends CacheTestCase
       switch (spot) {
       case 6: // Before Index update and after region entry lock.
         hooked = true;
-        getLogWriter().fine("IndexManagerTestHook is hooked.");
-        pause(10000);
+        LogWriterUtils.getLogWriter().fine("IndexManagerTestHook is hooked.");
+        Wait.pause(10000);
         hooked = false;
         break;
       default:

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
index e999787..81482b4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexOperationsOnOverflowRegionDUnitTest.java
@@ -45,9 +45,11 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionQueryEvaluator.TestHook;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Test creates a persistent-overflow region and performs updates in region
@@ -115,7 +117,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
           RegionFactory regionFactory = cache.createRegionFactory(attr.create());
           partitionRegion = regionFactory.create(name);
         } catch (IllegalStateException ex) {
-          getLogWriter().warning("Creation caught IllegalStateException", ex);
+          LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
         }
         assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
         assertNotNull("Region ref null", partitionRegion);
@@ -167,7 +169,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         try {
           PRQHelp.getCache().getLogger().fine("Querying the region");
@@ -180,8 +182,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
     });
 
     //If we take more than 30 seconds then its a deadlock.
-    DistributedTestCase.join(asyncInv2, 30*1000, PRQHelp.getCache().getLogger());
-    DistributedTestCase.join(asyncInv1, 30*1000, PRQHelp.getCache().getLogger());
+    ThreadUtils.join(asyncInv2, 30*1000);
+    ThreadUtils.join(asyncInv1, 30*1000);
   }
 
   /**
@@ -217,7 +219,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
           RegionFactory regionFactory = cache.createRegionFactory(attr.create());
           partitionRegion = regionFactory.create(name);
         } catch (IllegalStateException ex) {
-          getLogWriter().warning("Creation caught IllegalStateException", ex);
+          LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
         }
         assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
         assertNotNull("Region ref null", partitionRegion);
@@ -269,7 +271,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         try {
           PRQHelp.getCache().getLogger().fine("Querying the region");
@@ -282,8 +284,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
     });
 
     //If we take more than 30 seconds then its a deadlock.
-    DistributedTestCase.join(asyncInv2, 30*1000, PRQHelp.getCache().getLogger());
-    DistributedTestCase.join(asyncInv1, 30*1000, PRQHelp.getCache().getLogger());
+    ThreadUtils.join(asyncInv2, 30*1000);
+    ThreadUtils.join(asyncInv1, 30*1000);
   }
 
   /**
@@ -322,7 +324,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
           RegionFactory regionFactory = cache.createRegionFactory(attr.create());
           partitionRegion = regionFactory.create(name);
         } catch (IllegalStateException ex) {
-          getLogWriter().warning("Creation caught IllegalStateException", ex);
+          LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
         }
         assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
         assertNotNull("Region ref null", partitionRegion);
@@ -378,7 +380,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         try {
           PRQHelp.getCache().getLogger().fine("Querying the region");
@@ -391,10 +393,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
     });
 
     // If we take more than 30 seconds then its a deadlock.
-    DistributedTestCase.join(asyncInv2, 30 * 1000, PRQHelp.getCache()
-        .getLogger());
-    DistributedTestCase.join(asyncInv1, 30 * 1000, PRQHelp.getCache()
-        .getLogger());
+    ThreadUtils.join(asyncInv2, 30 * 1000);
+    ThreadUtils.join(asyncInv1, 30 * 1000);
   }
 
   /**
@@ -433,7 +433,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
           RegionFactory regionFactory = cache.createRegionFactory(attr.create());
           partitionRegion = regionFactory.create(name);
         } catch (IllegalStateException ex) {
-          getLogWriter().warning("Creation caught IllegalStateException", ex);
+          LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
         }
         assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
         assertNotNull("Region ref null", partitionRegion);
@@ -489,7 +489,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
             .newQuery("select * from /" + name + " p where p.ID > -1");
 
         while (!hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         try {
           PRQHelp.getCache().getLogger().fine("Querying the region");
@@ -502,10 +502,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
     });
 
     // If we take more than 30 seconds then its a deadlock.
-    DistributedTestCase.join(asyncInv2, 30 * 1000, PRQHelp.getCache()
-        .getLogger());
-    DistributedTestCase.join(asyncInv1, 30 * 1000, PRQHelp.getCache()
-        .getLogger());
+    ThreadUtils.join(asyncInv2, 30 * 1000);
+    ThreadUtils.join(asyncInv1, 30 * 1000);
   }
 
   /**
@@ -532,7 +530,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
          RegionFactory regionFactory = cache.createRegionFactory(attr.create());
          partitionRegion = regionFactory.create(name);
        } catch (IllegalStateException ex) {
-         getLogWriter().warning("Creation caught IllegalStateException", ex);
+         LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
        }
        assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
        assertNotNull("Region ref null", partitionRegion);
@@ -584,7 +582,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
            .newQuery("select * from /" + name + " p where p.ID > -1");
 
        while (!hooked) {
-         pause(10);
+         Wait.pause(10);
        }
        try {
          PRQHelp.getCache().getLogger().fine("Querying the region");
@@ -597,8 +595,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
    });
 
    //If we take more than 30 seconds then its a deadlock.
-   DistributedTestCase.join(asyncInv2, 30*1000, PRQHelp.getCache().getLogger());
-   DistributedTestCase.join(asyncInv1, 30*1000, PRQHelp.getCache().getLogger());
+   ThreadUtils.join(asyncInv2, 30*1000);
+   ThreadUtils.join(asyncInv1, 30*1000);
  }
 
  /**
@@ -625,7 +623,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
          RegionFactory regionFactory = cache.createRegionFactory(attr.create());
          partitionRegion = regionFactory.create(name);
        } catch (IllegalStateException ex) {
-         getLogWriter().warning("Creation caught IllegalStateException", ex);
+         LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
        }
        assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
        assertNotNull("Region ref null", partitionRegion);
@@ -677,7 +675,7 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
            .newQuery("select * from /" + name + " p where p.ID > -1");
 
        while (!hooked) {
-         pause(10);
+         Wait.pause(10);
        }
        try {
          PRQHelp.getCache().getLogger().fine("Querying the region");
@@ -690,8 +688,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
    });
 
    //If we take more than 30 seconds then its a deadlock.
-   DistributedTestCase.join(asyncInv2, 30*1000, PRQHelp.getCache().getLogger());
-   DistributedTestCase.join(asyncInv1, 30*1000, PRQHelp.getCache().getLogger());
+   ThreadUtils.join(asyncInv2, 30*1000);
+   ThreadUtils.join(asyncInv1, 30*1000);
  }
 
   public class IndexManagerTestHook implements com.gemstone.gemfire.cache.query.internal.index.IndexManager.TestHook{
@@ -699,8 +697,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
       switch (spot) {
       case 5: //Before Index update and after region entry lock.
         hooked  = true;
-        getLogWriter().fine("IndexManagerTestHook is hooked.");
-        pause(10000);
+        LogWriterUtils.getLogWriter().fine("IndexManagerTestHook is hooked.");
+        Wait.pause(10000);
         //hooked = false;
         break;
       default:
@@ -713,8 +711,8 @@ public class ConcurrentIndexOperationsOnOverflowRegionDUnitTest extends
       switch (spot) {
       case 5: //Before Index update and after region entry lock.
         hooked  = true;
-        getLogWriter().fine("IndexManagerTestHook is hooked.");
-        pause(100);
+        LogWriterUtils.getLogWriter().fine("IndexManagerTestHook is hooked.");
+        Wait.pause(100);
        // hooked = false;
         break;
       default:

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
index 5faba97..6cbbef2 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.java
@@ -41,11 +41,15 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.RegionEntry;
 import com.gemstone.gemfire.internal.cache.Token;
 import com.gemstone.gemfire.internal.cache.persistence.query.CloseableIterator;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 
 /**
  * This test is similar to {@link ConcurrentIndexUpdateWithoutWLDUnitTest} except
@@ -88,7 +92,7 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
 
   @Override
   public void setUp() throws Exception {
-    invokeInEveryVM(new CacheSerializableRunnable("Set INPLACE_OBJECT_MODIFICATION false") {
+    Invoke.invokeInEveryVM(new CacheSerializableRunnable("Set INPLACE_OBJECT_MODIFICATION false") {
       
       @Override
       public void run2() throws CacheException {
@@ -103,21 +107,18 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
    * Tear down a PartitionedRegionTestCase by cleaning up the existing cache
    * (mainly because we want to destroy any existing PartitionedRegions)
    */
-  public void tearDown2() throws Exception {
-    try {
-      invokeInEveryVM(new CacheSerializableRunnable("Set INPLACE_OBJECT_MODIFICATION false") {
-        
-        @Override
-        public void run2() throws CacheException {
-          //System.setProperty("gemfire.index.INPLACE_OBJECT_MODIFICATION", "false");
-          IndexManager.INPLACE_OBJECT_MODIFICATION_FOR_TEST = false;
-        }
-      });
-      invokeInEveryVM(ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.class, "destroyRegions");
-      invokeInEveryVM(CacheTestCase.class, "closeCache");
-    } finally {
-      super.tearDown2();
-    }
+  @Override
+  protected final void preTearDown() throws Exception {
+    Invoke.invokeInEveryVM(new CacheSerializableRunnable("Set INPLACE_OBJECT_MODIFICATION false") {
+      
+      @Override
+      public void run2() throws CacheException {
+        //System.setProperty("gemfire.index.INPLACE_OBJECT_MODIFICATION", "false");
+        IndexManager.INPLACE_OBJECT_MODIFICATION_FOR_TEST = false;
+      }
+    });
+    Invoke.invokeInEveryVM(ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest.class, "destroyRegions");
+    Invoke.invokeInEveryVM(CacheTestCase.class, "closeCache");
   }
 
   public static synchronized void destroyRegions() {
@@ -145,12 +146,12 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
     asyncInvs[1] = vm0.invokeAsync(helper.getCacheSerializableRunnableForPRRandomOps(regionName, 0, stepSize));
     
     for (AsyncInvocation inv : asyncInvs) {
-      DistributedTestCase.join(inv, 30*000, helper.getCache().getLogger());
+      ThreadUtils.join(inv, 30*000);
     }
     
     for (AsyncInvocation inv : asyncInvs) {
       if (inv.exceptionOccurred()) {
-        fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
+        Assert.fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
       }
     }
     
@@ -190,11 +191,11 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
     asyncInvs[1] = vm0.invokeAsync(helper.getCacheSerializableRunnableForPRRandomOps(regionName, 0, totalDataSize));
     
     for (AsyncInvocation inv : asyncInvs) {
-      DistributedTestCase.join(inv, 30*000, helper.getCache().getLogger());
+      ThreadUtils.join(inv, 30*000);
     }
     for (AsyncInvocation inv : asyncInvs) {
       if (inv.exceptionOccurred()) {
-        fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
+        Assert.fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
       }
     }
     
@@ -248,12 +249,12 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
     asyncInvs[11] = vm3.invokeAsync(helper.getCacheSerializableRunnableForPRRandomOps(regionName, (3 * (stepSize)), totalDataSize ));
     
     for (AsyncInvocation inv : asyncInvs) {
-      DistributedTestCase.join(inv, 60*000, helper.getCache().getLogger());
+      ThreadUtils.join(inv, 60*000);
     }
     
     for (AsyncInvocation inv : asyncInvs) {
       if (inv.exceptionOccurred()) {
-        fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
+        Assert.fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
       }
     }
     vm0.invoke(getCacheSerializableRunnableForIndexValidation(regionName, indexName));
@@ -311,11 +312,11 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
     asyncInvs[11] = vm3.invokeAsync(helper.getCacheSerializableRunnableForPRRandomOps(regionName, (3 * (stepSize)), totalDataSize ));
     
     for (AsyncInvocation inv : asyncInvs) {
-      DistributedTestCase.join(inv, 60*000, helper.getCache().getLogger());
+      ThreadUtils.join(inv, 60*000);
     }
     for (AsyncInvocation inv : asyncInvs) {
       if (inv.exceptionOccurred()) {
-        fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
+        Assert.fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
       }
     }
     vm0.invoke(getCacheSerializableRunnableForIndexValidation(regionName, rindexName));
@@ -390,7 +391,7 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
           if (index instanceof CompactRangeIndex) {
             // Ignore invalid values.
             if (value != Token.INVALID && value != Token.TOMBSTONE) {
-              getLogWriter().info("Portfolio: "+ ((Portfolio)value));
+              LogWriterUtils.getLogWriter().info("Portfolio: "+ ((Portfolio)value));
               Integer ID = ((Portfolio) value).getID();
 
               assertTrue("Did not find index key for REgionEntry [key: "
@@ -428,7 +429,7 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
                 expectedNullEntries++;
               }
             } else {
-              getLogWriter().info(internalEntry.getKey()+"");
+              LogWriterUtils.getLogWriter().info(internalEntry.getKey()+"");
               expectedUndefinedEntries++;
             }
           }
@@ -440,7 +441,7 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
               Collection<Position> positions = ((Portfolio)value).positions.values();
               for (Position pos : positions) {
                 if (pos != null) {
-                  getLogWriter().info("Portfolio: "+ ((Portfolio)value) + "Position: " + pos);
+                  LogWriterUtils.getLogWriter().info("Portfolio: "+ ((Portfolio)value) + "Position: " + pos);
                   String secId = pos.secId;
                   assertTrue("Did not find index key for REgionEntry [key: "
                       + internalEntry.getKey() + " , value: " + value
@@ -523,21 +524,21 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
             .toArray()) {
           getLogWriter().info(((RegionEntry) obj).getKey() + "");
         }
-*/        getLogWriter().info(
+*/        LogWriterUtils.getLogWriter().info(
             " Expected Size of Index is: " + expectedIndexSize
                 + " Undefined size is: " + expectedUndefinedEntries
                 + " And NULL size is: " + expectedNullEntries);
         assertEquals("No of index keys NOT equals the no shown in statistics for index:" + index.getName(), ((CompactRangeIndex) index).getIndexStorage().size(), stats.getNumberOfKeys());
       } else {
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " Actual Size of Index is: " + actualSize + " Undefined size is: "
                 + ((RangeIndex) index).undefinedMappedEntries.getNumEntries()
                 + " And NULL size is: "
                 + ((RangeIndex) index).nullMappedEntries.getNumEntries());
         for (Object obj : ((RangeIndex) index).undefinedMappedEntries.map.keySet()) {
-          getLogWriter().info(((RegionEntry) obj).getKey() + "");
+          LogWriterUtils.getLogWriter().info(((RegionEntry) obj).getKey() + "");
         }
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " Expected Size of Index is: " + expectedIndexSize
                 + " Undefined size is: " + expectedUndefinedEntries
                 + " And NULL size is: " + expectedNullEntries);
@@ -583,7 +584,7 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
             if (index instanceof CompactRangeIndex) {
               // Ignore invalid values.
               if (value != Token.INVALID && value != Token.TOMBSTONE) {
-                getLogWriter().info("Portfolio: "+ ((Portfolio)value));
+                LogWriterUtils.getLogWriter().info("Portfolio: "+ ((Portfolio)value));
                 Integer ID = ((Portfolio) value).getID();
   
                 assertTrue("Did not find index key for REgionEntry [key: "
@@ -631,7 +632,7 @@ public class ConcurrentIndexUpdateWithInplaceObjectModFalseDUnitTest extends
                 Collection<Position> positions = ((Portfolio)value).positions.values();
                 for (Position pos : positions) {
                   if (pos != null) {
-                    getLogWriter().info("Portfolio: "+ ((Portfolio)value) + "Position: " + pos);
+                    LogWriterUtils.getLogWriter().info("Portfolio: "+ ((Portfolio)value) + "Position: " + pos);
                     String secId = pos.secId;
                     assertTrue("Did not find index key for REgionEntry [key: "
                         + internalEntry.getKey() + " , value: " + value

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
index fbead52..2ac564a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/ConcurrentIndexUpdateWithoutWLDUnitTest.java
@@ -42,11 +42,15 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.RegionEntry;
 import com.gemstone.gemfire.internal.cache.Token;
 import com.gemstone.gemfire.internal.cache.persistence.query.CloseableIterator;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 
 /**
  * 
@@ -88,13 +92,10 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
    * Tear down a PartitionedRegionTestCase by cleaning up the existing cache
    * (mainly because we want to destroy any existing PartitionedRegions)
    */
-  public void tearDown2() throws Exception {
-    try {
-      invokeInEveryVM(ConcurrentIndexUpdateWithoutWLDUnitTest.class, "destroyRegions");
-      invokeInEveryVM(CacheTestCase.class, "closeCache");
-    } finally {
-      super.tearDown2();
-    }
+  @Override
+  protected final void preTearDown() throws Exception {
+    Invoke.invokeInEveryVM(ConcurrentIndexUpdateWithoutWLDUnitTest.class, "destroyRegions");
+    Invoke.invokeInEveryVM(CacheTestCase.class, "closeCache");
   }
 
   public static synchronized void destroyRegions() {
@@ -121,12 +122,12 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     asyncInvs[1] = vm0.invokeAsync(helper.getCacheSerializableRunnableForPRRandomOps(regionName, 0, stepSize));
     
     for (AsyncInvocation inv : asyncInvs) {
-      DistributedTestCase.join(inv, 30*000, helper.getCache().getLogger());
+      ThreadUtils.join(inv, 30*000);
     }
     
     for (AsyncInvocation inv : asyncInvs) {
       if (inv.exceptionOccurred()) {
-        fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
+        Assert.fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
       }
     }
     
@@ -162,12 +163,12 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     asyncInvs[1] = vm0.invokeAsync(helper.getCacheSerializableRunnableForPRRandomOps(regionName, 0, stepSize));
     
     for (AsyncInvocation inv : asyncInvs) {
-      DistributedTestCase.join(inv, 30*000, helper.getCache().getLogger());
+      ThreadUtils.join(inv, 30*000);
     }
     
     for (AsyncInvocation inv : asyncInvs) {
       if (inv.exceptionOccurred()) {
-        fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
+        Assert.fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
       }
     }
     
@@ -206,11 +207,11 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     asyncInvs[1] = vm0.invokeAsync(helper.getCacheSerializableRunnableForPRRandomOps(regionName, 0, totalDataSize));
     
     for (AsyncInvocation inv : asyncInvs) {
-      DistributedTestCase.join(inv, 30*000, helper.getCache().getLogger());
+      ThreadUtils.join(inv, 30*000);
     }
     for (AsyncInvocation inv : asyncInvs) {
       if (inv.exceptionOccurred()) {
-        fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
+        Assert.fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
       }
     }
     
@@ -264,12 +265,12 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     asyncInvs[11] = vm3.invokeAsync(helper.getCacheSerializableRunnableForPRRandomOps(regionName, (3 * (stepSize)), totalDataSize ));
     
     for (AsyncInvocation inv : asyncInvs) {
-      DistributedTestCase.join(inv, 60*000, helper.getCache().getLogger());
+      ThreadUtils.join(inv, 60*000);
     }
     
     for (AsyncInvocation inv : asyncInvs) {
       if (inv.exceptionOccurred()) {
-        fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
+        Assert.fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
       }
     }
     vm0.invoke(getCacheSerializableRunnableForIndexValidation(regionName, indexName));
@@ -326,11 +327,11 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     asyncInvs[11] = vm3.invokeAsync(helper.getCacheSerializableRunnableForPRRandomOps(regionName, (3 * (stepSize)), totalDataSize ));
     
     for (AsyncInvocation inv : asyncInvs) {
-      DistributedTestCase.join(inv, 60*000, helper.getCache().getLogger());
+      ThreadUtils.join(inv, 60*000);
     }
     for (AsyncInvocation inv : asyncInvs) {
       if (inv.exceptionOccurred()) {
-        fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
+        Assert.fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
       }
     }
     vm0.invoke(getCacheSerializableRunnableForIndexValidation(regionName, rindexName));
@@ -398,12 +399,12 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
     asyncInvs[11] = vm3.invokeAsync(helper.getCacheSerializableRunnableForPRRandomOps(regionName, (3 * (stepSize)), totalDataSize ));
     
     for (AsyncInvocation inv : asyncInvs) {
-      DistributedTestCase.join(inv, 60*000, helper.getCache().getLogger());
+      ThreadUtils.join(inv, 60*000);
     }
     
     for (AsyncInvocation inv : asyncInvs) {
       if (inv.exceptionOccurred()) {
-        fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
+        Assert.fail("Random region operation failed on VM_"+inv.getId(), inv.getException());
       }
     }
     vm0.invoke(getCacheSerializableRunnableForIndexValidation(regionName, indexName));
@@ -477,7 +478,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
           if (index instanceof CompactRangeIndex) {
             // Ignore invalid values.
             if (value != Token.INVALID && value != Token.TOMBSTONE) {
-              getLogWriter().info("Portfolio: "+ ((Portfolio)value));
+              LogWriterUtils.getLogWriter().info("Portfolio: "+ ((Portfolio)value));
               Integer ID = ((Portfolio) value).getID();
 
               assertTrue("Did not find index key for REgionEntry [key: "
@@ -513,7 +514,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
                 expectedNullEntries++;
               }
             } else {
-              getLogWriter().info(internalEntry.getKey()+"");
+              LogWriterUtils.getLogWriter().info(internalEntry.getKey()+"");
               expectedUndefinedEntries++;
             }
           }
@@ -525,7 +526,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
               Collection<Position> positions = ((Portfolio)value).positions.values();
               for (Position pos : positions) {
                 if (pos != null) {
-                  getLogWriter().info("Portfolio: "+ ((Portfolio)value) + "Position: " + pos);
+                  LogWriterUtils.getLogWriter().info("Portfolio: "+ ((Portfolio)value) + "Position: " + pos);
                   String secId = pos.secId;
                   assertTrue("Did not find index key for REgionEntry [key: "
                       + internalEntry.getKey() + " , value: " + value
@@ -599,7 +600,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
 
       IndexStatistics stats = index.getStatistics();
       if (index instanceof CompactRangeIndex) {
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " Actual Size of Index is: " + actualSize);
 /*        getLogWriter().info(
             " Actual Size of Index is: " + actualSize + " Undefined size is: "
@@ -611,21 +612,21 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
           getLogWriter().info(((RegionEntry) obj).getKey() + "");
         }
 */
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " Expected Size of Index is: " + expectedIndexSize
                 + " Undefined size is: " + expectedUndefinedEntries
                 + " And NULL size is: " + expectedNullEntries);
         assertEquals("No of index keys NOT equals the no shown in statistics for index:" + index.getName(), ((CompactRangeIndex) index).getIndexStorage().size(), stats.getNumberOfKeys());
       } else {
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " Actual Size of Index is: " + actualSize + " Undefined size is: "
                 + ((RangeIndex) index).undefinedMappedEntries.getNumEntries()
                 + " And NULL size is: "
                 + ((RangeIndex) index).nullMappedEntries.getNumEntries());
         for (Object obj : ((RangeIndex) index).undefinedMappedEntries.map.keySet()) {
-          getLogWriter().info(((RegionEntry) obj).getKey() + "");
+          LogWriterUtils.getLogWriter().info(((RegionEntry) obj).getKey() + "");
         }
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             " Expected Size of Index is: " + expectedIndexSize
                 + " Undefined size is: " + expectedUndefinedEntries
                 + " And NULL size is: " + expectedNullEntries);
@@ -670,7 +671,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
             if (index instanceof CompactRangeIndex) {
               // Ignore invalid values.
               if (value != Token.INVALID && value != Token.TOMBSTONE) {
-                getLogWriter().info("Portfolio: "+ ((Portfolio)value));
+                LogWriterUtils.getLogWriter().info("Portfolio: "+ ((Portfolio)value));
                 Integer ID = ((Portfolio) value).getID();
   
                 assertTrue("Did not find index key for REgionEntry [key: "
@@ -718,7 +719,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
                 Collection<Position> positions = ((Portfolio)value).positions.values();
                 for (Position pos : positions) {
                   if (pos != null) {
-                    getLogWriter().info("Portfolio: "+ ((Portfolio)value) + "Position: " + pos);
+                    LogWriterUtils.getLogWriter().info("Portfolio: "+ ((Portfolio)value) + "Position: " + pos);
                     String secId = pos.secId;
                     assertTrue("Did not find index key for REgionEntry [key: "
                         + internalEntry.getKey() + " , value: " + value
@@ -765,7 +766,7 @@ public class ConcurrentIndexUpdateWithoutWLDUnitTest extends
           try {
             iter = ((CompactRangeIndex) index).getIndexStorage().iterator(null);
             while (iter.hasNext()) {
-              getLogWriter().info(
+              LogWriterUtils.getLogWriter().info(
                   "Index Values : " + iter.next().getDeserializedValue());
               actualValueSize++;
             }


[46/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/CopyOnReadIndexDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/CopyOnReadIndexDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/CopyOnReadIndexDUnitTest.java
index a176ee7..d44c7f6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/CopyOnReadIndexDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/CopyOnReadIndexDUnitTest.java
@@ -42,11 +42,16 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class CopyOnReadIndexDUnitTest extends CacheTestCase {
 
@@ -62,7 +67,7 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
   public void setUp() throws Exception {
     super.setUp();
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -73,7 +78,8 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
     vm2 = host.getVM(2);
   }
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS();
   }
   
@@ -159,13 +165,13 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
           //numPortfoliosPerVM instances of Portfolio created for put operation
           //Due to index, we have deserialized all of the entries this vm currently host
           Index index = getCache().getQueryService().getIndex(region, "idIndex");
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount((int)index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount((int)index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
         }
         else {
           //operations we have done on this vm consist of:
           //numPortfoliosPerVM instances of Portfolio created for put operation
           //We do not have an index, so we have not deserialized any values
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
         }
         return null;
       }
@@ -191,13 +197,13 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
             QueryTestUtils utils = new QueryTestUtils();
             index = utils.createIndex("idIndex", "p.ID", "/portfolios p");
           }
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount((int)index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount((int)index.getStatistics().getNumberOfValues() + numPortfoliosPerVM), 5000, 200, true);
         }
         else {
           //operations we have done on this vm consist of:
           //numPortfoliosPerVM instances of Portfolio created for put operation
           //We do not have an index, so we have not deserialized any values
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount(numPortfoliosPerVM), 5000, 200, true);
         }
         return null;
       }
@@ -228,14 +234,14 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
           //Due to index, we have deserialized all of the entries this vm currently host
           //Since we have deserialized and cached these values, we just need to add the number of results we did a copy of due to copy on read
           Index index = getCache().getQueryService().getIndex(region, "idIndex");
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount((int)index.getStatistics().getNumberOfValues() + numPortfoliosPerVM + numExpectedResults), 5000, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount((int)index.getStatistics().getNumberOfValues() + numPortfoliosPerVM + numExpectedResults), 5000, 200, true);
         }
         else {
           //operations we have done on this vm consist of:
           //50 instances of Portfolio created for put operation
           //Due to the query we deserialized the number of entries this vm currently hosts
           //We had to deserialized the results from the other data nodes when we iterated through the results as well as our own
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount((int)((PartitionedRegion)region).getLocalSize() + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount((int)((PartitionedRegion)region).getLocalSize() + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
         }
         return null;
       }
@@ -247,11 +253,11 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
         if (hasIndex) {
           //After vm0 executed the query, we already had the values deserialized in our cache
           //So it's the same total as before
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount((int)((PartitionedRegion)region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount((int)((PartitionedRegion)region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
         }
         else {
           //After vm0 executed the query, we had to deserialize the values in our vm
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount((int)((PartitionedRegion)region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount((int)((PartitionedRegion)region).getLocalSize() + numPortfoliosPerVM), 5000, 200, true);
         }
         return null;
       }
@@ -281,7 +287,7 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
           //Due to index, we have deserialized all of the entries this vm currently host
           //This is the second query, because we have deserialized and cached these values, we just need to add the number of results a second time
           Index index = getCache().getQueryService().getIndex(region, "idIndex");
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount((int)index.getStatistics().getNumberOfValues() + numExpectedResults + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount((int)index.getStatistics().getNumberOfValues() + numExpectedResults + numExpectedResults + numPortfoliosPerVM), 5000, 200, true);
         }
         else {
         //operations we have done on this vm consist of:
@@ -289,7 +295,7 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
           //Due to index, we have deserialized all of the entries this vm currently host
           //This is the second query, because we have deserialized and cached these values, we just need to add the number of results a second time
           //Because we have no index, we have to again deserialize all the values that this vm is hosting
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount((int)(((PartitionedRegion)region).getLocalSize() + ((PartitionedRegion)region).getLocalSize() + numExpectedResults + numExpectedResults + numPortfoliosPerVM)), 5000, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount((int)(((PartitionedRegion)region).getLocalSize() + ((PartitionedRegion)region).getLocalSize() + numExpectedResults + numExpectedResults + numPortfoliosPerVM)), 5000, 200, true);
         }
         return null;
       }
@@ -342,7 +348,7 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
         }
         
         //We should have the same number of portfolio objects that we created for the put
-        DistributedTestCase.waitForCriterion(verifyPortfolioCount(numPortfolios), 5000, 200, true);
+        Wait.waitForCriterion(verifyPortfolioCount(numPortfolios), 5000, 200, true);
         return null;
       }
     });
@@ -351,7 +357,7 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
       public Object call() throws Exception {
         //At this point, we should only have serialized values in this vm
         Region region = getCache().getRegion("/portfolios");
-        DistributedTestCase.waitForCriterion(verifyPortfolioCount(0), 0, 200, true);
+        Wait.waitForCriterion(verifyPortfolioCount(0), 0, 200, true);
         return null;
       }
     });
@@ -361,10 +367,10 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
         //There is an index for vm2, so we should have deserialized values at this point,
         Region region = getCache().getRegion("/portfolios");
         if (hasIndex) {
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount(numPortfolios), 0, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount(numPortfolios), 0, 200, true);
         }
         else {
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount(0), 0, 200, true);
+          Wait.waitForCriterion(verifyPortfolioCount(0), 0, 200, true);
         }
         return null;
       }
@@ -401,12 +407,12 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
         txManager.commit();
         }
         catch (CommitConflictException conflict) {
-          fail("commit conflict exception", conflict);
+          Assert.fail("commit conflict exception", conflict);
         }
         
         //We have created puts from our previous callable
         //Now we have copied the results from the query 
-        DistributedTestCase.waitForCriterion(verifyPortfolioCount(numExpectedResults + numPortfolios), 0, 200, true);
+        Wait.waitForCriterion(verifyPortfolioCount(numExpectedResults + numPortfolios), 0, 200, true);
         return null;
       }
     });
@@ -434,7 +440,7 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
         }
         //first it must deserialize the portfolios in the replicated region
         //then we do a copy on read of these deserialized objects for the final result set
-        DistributedTestCase.waitForCriterion(verifyPortfolioCount(numExpectedResults + numPortfolios), 0, 200, true);
+        Wait.waitForCriterion(verifyPortfolioCount(numExpectedResults + numPortfolios), 0, 200, true);
 
         results = (SelectResults) query.execute();
         assertEquals(numExpectedResults, results.size());
@@ -452,7 +458,7 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
       
         //we never created index on vm1
         //so in this case, we always have to deserialize the value from the region
-        DistributedTestCase.waitForCriterion(verifyPortfolioCount(numPortfolios * 2 + numExpectedResults * 2), 0, 200, true);
+        Wait.waitForCriterion(verifyPortfolioCount(numPortfolios * 2 + numExpectedResults * 2), 0, 200, true);
         return null;
       }
     });
@@ -479,7 +485,7 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
           }
         }
         //with or without index, the values had to have been deserialized at one point
-        DistributedTestCase.waitForCriterion(verifyPortfolioCount(numPortfolios + numExpectedResults), 0, 200, true);        
+        Wait.waitForCriterion(verifyPortfolioCount(numPortfolios + numExpectedResults), 0, 200, true);        
         results = (SelectResults) query.execute();
         assertEquals(numExpectedResults, results.size());
         for (Object o: results) {
@@ -499,12 +505,12 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
           //we have an index, so the values are already deserialized
           //total is now our original deserialization amount : numPortfolios
           //two query results copied.
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount(numPortfolios + numExpectedResults * 2), 0, 200, true);        
+          Wait.waitForCriterion(verifyPortfolioCount(numPortfolios + numExpectedResults * 2), 0, 200, true);        
         }
         else {
           //we never created index on vm1
           //so in this case, we always have to deserialize the value from the region
-          DistributedTestCase.waitForCriterion(verifyPortfolioCount(numPortfolios * 2 + numExpectedResults * 2), 0, 200, true);        
+          Wait.waitForCriterion(verifyPortfolioCount(numPortfolios * 2 + numExpectedResults * 2), 0, 200, true);        
         }
         return null;
       }
@@ -531,7 +537,7 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
         }
         
         //with or without index, the values we put in the region were already deserialized values
-        DistributedTestCase.waitForCriterion(verifyPortfolioCount(numExpectedResults * 2 + numPortfolios), 0, 200, true);        
+        Wait.waitForCriterion(verifyPortfolioCount(numExpectedResults * 2 + numPortfolios), 0, 200, true);        
         return null;
       }
     });
@@ -607,7 +613,7 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
         getSystem(props);
         
         final ClientCacheFactory ccf = new ClientCacheFactory(props);
-        ccf.addPoolServer(getServerHostName(server.getHost()), port);
+        ccf.addPoolServer(NetworkUtils.getServerHostName(server.getHost()), port);
         ccf.setPoolSubscriptionEnabled(true);
         
         ClientCache cache = (ClientCache)getClientCache(ccf);
@@ -624,7 +630,7 @@ public class CopyOnReadIndexDUnitTest extends CacheTestCase {
 
   protected Properties getServerProperties() {
     Properties p = new Properties();
-    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+getDUnitLocatorPort()+"]");
+    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     return p;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexCreationInternalsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexCreationInternalsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexCreationInternalsJUnitTest.java
index 3f23810..7df9a5b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexCreationInternalsJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexCreationInternalsJUnitTest.java
@@ -45,7 +45,7 @@ import com.gemstone.gemfire.cache.query.internal.CompiledRegion;
 import com.gemstone.gemfire.cache.query.internal.QCompiler;
 import com.gemstone.gemfire.cache.query.internal.types.TypeUtils;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -199,8 +199,8 @@ public class IndexCreationInternalsJUnitTest {
     th1.start();
     th2.start();
     name = imgr.putCanonicalizedIteratorNameIfAbsent("index_iter1.coll1");
-    DistributedTestCase.join(th1, 30 * 1000, null);
-    DistributedTestCase.join(th2, 30 * 1000, null);
+    ThreadUtils.join(th1, 30 * 1000);
+    ThreadUtils.join(th2, 30 * 1000);
     if( !(name.equals(this.childThreadName1) && name.equals(this.childThreadName2)) ) {
      fail("Canonicalization name generation test failed in concurrent scenario as first name is "+this.childThreadName1 + "and second is "+name + " and third is "+this.childThreadName2);   
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexMaintainceJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexMaintainceJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexMaintainceJUnitTest.java
index 9b9b1f7..c1d39b9 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexMaintainceJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexMaintainceJUnitTest.java
@@ -51,7 +51,7 @@ import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.cache.query.functional.StructSetOrResultsSet;
 import com.gemstone.gemfire.cache.query.internal.QueryObserverAdapter;
 import com.gemstone.gemfire.cache.query.internal.QueryObserverHolder;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -322,7 +322,7 @@ public class IndexMaintainceJUnitTest {
             }
           });
           th.start();
-          DistributedTestCase.join(th, 30 * 1000, null);
+          ThreadUtils.join(th, 30 * 1000);
           assertTrue(IndexMaintainceJUnitTest.region.size() == 1);
         }
       });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexTrackingQueryObserverDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexTrackingQueryObserverDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexTrackingQueryObserverDUnitTest.java
index f12df85..d9b0d5e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexTrackingQueryObserverDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/IndexTrackingQueryObserverDUnitTest.java
@@ -42,10 +42,14 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionQueryEvaluator.TestHook;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author shobhit
@@ -121,11 +125,11 @@ public class IndexTrackingQueryObserverDUnitTest extends CacheTestCase {
     });
 
     if (async1.exceptionOccurred()) {
-      fail("", async1.getException());
+      Assert.fail("", async1.getException());
     }
 
     if (async1.exceptionOccurred()) {
-      fail("", async1.getException());
+      Assert.fail("", async1.getException());
     }
   }
 
@@ -195,7 +199,7 @@ public class IndexTrackingQueryObserverDUnitTest extends CacheTestCase {
             assertTrue(keyIndex1 instanceof PartitionedIndex);
           }
         } catch (Exception e) {
-          fail("While creating Index on PR", e);
+          Assert.fail("While creating Index on PR", e);
         }
         Region region = getCache().getRegion("portfolio");
         //Inject TestHook in QueryObserver before running query.
@@ -224,7 +228,7 @@ public class IndexTrackingQueryObserverDUnitTest extends CacheTestCase {
         try {
           results = (SelectResults) query.execute();
         } catch (Exception e) {
-          fail("While running query on PR", e);
+          Assert.fail("While running query on PR", e);
         }
 
         // The query should return all elements in region.
@@ -249,7 +253,7 @@ public class IndexTrackingQueryObserverDUnitTest extends CacheTestCase {
         final IndexTrackingTestHook th = (IndexTrackingTestHook) ((IndexTrackingQueryObserver) observer)
             .getTestHook();
 
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
 
           public boolean done() {
             if(th.getRegionMap() != null) {
@@ -271,7 +275,7 @@ public class IndexTrackingQueryObserverDUnitTest extends CacheTestCase {
           totalResults += i.intValue();
         }
         
-        getLogWriter().fine("Index Info result size is " + totalResults);
+        LogWriterUtils.getLogWriter().fine("Index Info result size is " + totalResults);
         assertEquals(results, totalResults);
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
index 6b07d48..bfc431e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/InitializeIndexEntryDestroyQueryDUnitTest.java
@@ -34,10 +34,13 @@ import com.gemstone.gemfire.cache.query.internal.Undefined;
 import com.gemstone.gemfire.cache.query.partitioned.PRQueryDUnitHelper;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Test creates a local region. Creates and removes index in a parallel running thread.
@@ -87,7 +90,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
           RegionFactory regionFactory = cache.createRegionFactory(attr.create());
           localRegion = regionFactory.create(name);
         } catch (IllegalStateException ex) {
-          getLogWriter().warning("Creation caught IllegalStateException", ex);
+          LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
         }
         assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
         assertNotNull("Region ref null", localRegion);
@@ -121,11 +124,11 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
           }
           assertNotNull(index);
 
-          pause(100);
+          Wait.pause(100);
 
           PRQHelp.getCache().getQueryService().removeIndex(index);
 
-          pause(100);
+          Wait.pause(100);
         }
       }
     });
@@ -147,7 +150,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
           PRQHelp.getCache().getLogger().fine("Going to destroy the value" + p);
           r.destroy(j);
 
-          pause(100);
+          Wait.pause(100);
 
           //Put the value back again.
           PRQHelp.getCache().getLogger().fine("Putting the value back" + p);
@@ -194,14 +197,14 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
       }
     });
     
-    DistributedTestCase.join(asyInvk0, 1000 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk0, 1000 * 1000);
     if (asyInvk0.exceptionOccurred()) {
-      fail("asyInvk0 failed", asyInvk0.getException());
+      Assert.fail("asyInvk0 failed", asyInvk0.getException());
     }
     
-    DistributedTestCase.join(asyInvk1, 1000 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk1, 1000 * 1000);
     if (asyInvk1.exceptionOccurred()) {
-      fail("asyInvk1 failed", asyInvk1.getException());
+      Assert.fail("asyInvk1 failed", asyInvk1.getException());
     }
   }
 
@@ -224,7 +227,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
           RegionFactory regionFactory = cache.createRegionFactory(attr.create());
           partitionRegion = regionFactory.create(name);
         } catch (IllegalStateException ex) {
-          getLogWriter().warning("Creation caught IllegalStateException", ex);
+          LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
         }
         assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
         assertNotNull("Region ref null", partitionRegion);
@@ -283,7 +286,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
           PRQHelp.getCache().getLogger().fine("Going to destroy the value" + p);
           r.destroy(j);
 
-          pause(20);
+          Wait.pause(20);
 
           //Put the value back again.
           PRQHelp.getCache().getLogger().fine("Putting the value back" + p);
@@ -330,14 +333,14 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
       }
     });
 
-    DistributedTestCase.join(asyInvk0, 1000 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk0, 1000 * 1000);
     if (asyInvk0.exceptionOccurred()) {
-      fail("asyInvk0 failed", asyInvk0.getException());
+      Assert.fail("asyInvk0 failed", asyInvk0.getException());
     }
     
-    DistributedTestCase.join(asyInvk1, 1000 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk1, 1000 * 1000);
     if (asyInvk1.exceptionOccurred()) {
-      fail("asyInvk1 failed", asyInvk1.getException());
+      Assert.fail("asyInvk1 failed", asyInvk1.getException());
     }
   }
 
@@ -360,7 +363,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
           RegionFactory regionFactory = cache.createRegionFactory(attr.create());
           partitionRegion = regionFactory.create(name);
         } catch (IllegalStateException ex) {
-          getLogWriter().warning("Creation caught IllegalStateException", ex);
+          LogWriterUtils.getLogWriter().warning("Creation caught IllegalStateException", ex);
         }
         assertNotNull("Region " + name + " not in cache", cache.getRegion(name));
         assertNotNull("Region ref null", partitionRegion);
@@ -419,7 +422,7 @@ public class InitializeIndexEntryDestroyQueryDUnitTest extends CacheTestCase {
             PRQHelp.getCache().getLogger().fine("Querying the region with " + query);
             results = (SelectResults)query.execute();
           } catch (Exception e) {
-            fail("Query: " + query + " execution failed with exception", e);
+            Assert.fail("Query: " + query + " execution failed with exception", e);
           }
 
           for (Object obj : results) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/MultiIndexCreationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/MultiIndexCreationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/MultiIndexCreationDUnitTest.java
index a81078a..cea00c1 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/MultiIndexCreationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/MultiIndexCreationDUnitTest.java
@@ -31,10 +31,13 @@ import com.gemstone.gemfire.cache.query.internal.QueryObserverHolder;
 import com.gemstone.gemfire.cache.query.internal.index.IndexManager.TestHook;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 public class MultiIndexCreationDUnitTest extends CacheTestCase {
 
@@ -87,8 +90,8 @@ public class MultiIndexCreationDUnitTest extends CacheTestCase {
       public Object call() throws Exception {
         long giveupTime = System.currentTimeMillis() + 60000;
         while (!hooked && System.currentTimeMillis() < giveupTime) {
-          getLogWriter().info("Query Waiting for index hook.");
-          pause(100);
+          LogWriterUtils.getLogWriter().info("Query Waiting for index hook.");
+          Wait.pause(100);
         }
         assertTrue(hooked);
         
@@ -124,12 +127,12 @@ public class MultiIndexCreationDUnitTest extends CacheTestCase {
       }
     });
     
-    DistributedTestCase.join(a1, 6000, this.getLogWriter());
+    ThreadUtils.join(a1, 6000);
     
     if(a1.exceptionOccurred()) {
       fail(a1.getException().getMessage());
     }
-    DistributedTestCase.join(a2, 6000, this.getLogWriter());
+    ThreadUtils.join(a2, 6000);
     if(a2.exceptionOccurred()) {
       fail(a2.getException().getMessage());
     }
@@ -169,11 +172,14 @@ public class MultiIndexCreationDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     hooked = false;
-    invokeInEveryVM(CacheTestCase.class, "disconnectFromDS");
-    super.tearDown2();
-    invokeInEveryVM(QueryObserverHolder.class, "reset");
+    Invoke.invokeInEveryVM(CacheTestCase.class, "disconnectFromDS");
+  }
+  
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(QueryObserverHolder.class, "reset");
   }
 
   private static class MultiIndexCreationTestHook implements TestHook {
@@ -183,10 +189,10 @@ public class MultiIndexCreationDUnitTest extends CacheTestCase {
       long giveupTime = System.currentTimeMillis() + 60000;
       if (spot == 13) {
         hooked = true;
-        getLogWriter().info("MultiIndexCreationTestHook is hooked in create defined indexes.");
+        LogWriterUtils.getLogWriter().info("MultiIndexCreationTestHook is hooked in create defined indexes.");
         while (hooked && System.currentTimeMillis() < giveupTime) {
-          getLogWriter().info("MultiIndexCreationTestHook waiting.");
-          pause(100);
+          LogWriterUtils.getLogWriter().info("MultiIndexCreationTestHook waiting.");
+          Wait.pause(100);
         }
         assertEquals(hooked, false);
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/PutAllWithIndexPerfDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/PutAllWithIndexPerfDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/PutAllWithIndexPerfDUnitTest.java
index 040a671..fd50e63 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/PutAllWithIndexPerfDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/PutAllWithIndexPerfDUnitTest.java
@@ -39,7 +39,10 @@ import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.AvailablePort;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
@@ -65,13 +68,9 @@ public class PutAllWithIndexPerfDUnitTest extends CacheTestCase {
     disconnectAllFromDS();
   }
 
-  public void tearDown2() throws Exception {
-    try {
-      super.tearDown2();
-    }
-    finally {
-      disconnectAllFromDS();
-    }
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
+    disconnectAllFromDS();
   }
 
   public void testPutAllWithIndexes() {
@@ -85,7 +84,7 @@ public class PutAllWithIndexPerfDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
         public void run2() throws CacheException {
           Properties config = new Properties();
-          config.put("locators", "localhost["+getDUnitLocatorPort()+"]");
+          config.put("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
           Cache cache = new CacheFactory(config).create();
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.LOCAL);
@@ -93,20 +92,20 @@ public class PutAllWithIndexPerfDUnitTest extends CacheTestCase {
           try {
             startBridgeServer(0, false);
           } catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
           //Create Index on empty region
           try {
             cache.getQueryService().createIndex("idIndex", "ID", "/"+name);
           } catch (Exception e) {
-            fail("index creation failed", e);
+            Assert.fail("index creation failed", e);
           }
         }
       });
 
     // Create client region
     final int port = vm0.invokeInt(PutAllWithIndexPerfDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
     vm1.invoke(new CacheSerializableRunnable("Create region") {
         public void run2() throws CacheException {
           Properties config = new Properties();
@@ -157,7 +156,7 @@ public class PutAllWithIndexPerfDUnitTest extends CacheTestCase {
           cache.getRegion(name).clear();
           cache.getQueryService().createIndex("idIndex", "p.ID", "/"+name+" p");
         } catch (Exception e) {
-          fail("index creation failed", e);
+          Assert.fail("index creation failed", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
index a72dec2..30a5d6f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDUnitTest.java
@@ -25,10 +25,12 @@ import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -76,7 +78,7 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreationDUnitTest.testPRBasicIndexCreate started ....");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
@@ -86,7 +88,7 @@ public class PRBasicIndexCreationDUnitTest extends
     // Scope.DISTRIBUTED_ACK, redundancy));
 
     // Creating the Datastores Nodes in the VM1.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info("PRBasicIndexCreationDUnitTest : creating all the prs ");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
@@ -110,7 +112,7 @@ public class PRBasicIndexCreationDUnitTest extends
         name, "PrIndexOnStatus", "p.status",null, "p"));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForDuplicatePRIndexCreate(
         name, "PrIndexOnStatus", "p.status",null, "p"));
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreationDUnitTest.testPRBasicIndexCreate is done ");
   }
 
@@ -126,7 +128,7 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testPRMultiIndexCreation Test Started");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
@@ -150,7 +152,7 @@ public class PRBasicIndexCreationDUnitTest extends
     // will throw a RuntimeException.
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
         "PrIndexOnID", "p.ID", null, "p"));
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRQBasicIndexCreationTest.testPRMultiIndexCreation ENDED");
 
   }
@@ -168,7 +170,7 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testPRMultiIndexCreation Test Started");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
@@ -232,7 +234,7 @@ public class PRBasicIndexCreationDUnitTest extends
     // Check getIndex() on datastore
     vm1.invoke(getIndexCheck);
 
-    getLogWriter().info("PRQBasicIndexCreationTest.testPRMultiIndexCreation ENDED");
+    LogWriterUtils.getLogWriter().info("PRQBasicIndexCreationTest.testPRMultiIndexCreation ENDED");
   }
   
   /**
@@ -248,28 +250,28 @@ public class PRBasicIndexCreationDUnitTest extends
 //    VM vm3 = host.getVM(3);
     // closeAllCache();
     final String fileName = "PRIndexCreation.xml";
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML started");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Starting and initializing partitioned regions and indexes using xml");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Starting a pr asynchronously using an xml file name : " + fileName);
     AsyncInvocation asyInvk0 = vm0.invokeAsync(PRQHelp
         .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
     AsyncInvocation asyInvk1 = vm1.invokeAsync(PRQHelp
         .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
-    DistributedTestCase.join(asyInvk1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk1, 30 * 1000);
     if (asyInvk1.exceptionOccurred()) {
-      fail("asyInvk1 failed", asyInvk1.getException());
+      Assert.fail("asyInvk1 failed", asyInvk1.getException());
     }
-    DistributedTestCase.join(asyInvk0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk0, 30 * 1000);
     if (asyInvk0.exceptionOccurred()) {
-      fail("asyInvk0 failed", asyInvk0.getException());
+      Assert.fail("asyInvk0 failed", asyInvk0.getException());
     }
     // printing all the indexes are created.
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexCreationCheck(name));
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML is done  " );
 
   }
@@ -291,7 +293,7 @@ public class PRBasicIndexCreationDUnitTest extends
     // final String fileName = "PRIndexCreation.xml";
     // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
     // fileName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedRegionThroughXMLAndAPI started ");
     // creating all the prs
@@ -339,7 +341,7 @@ public class PRBasicIndexCreationDUnitTest extends
     // final String fileName = "PRIndexCreation.xml";
     // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
     // fileName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedIndexWithNoAliasAfterPuts started ");
     // creating all the prs
@@ -385,7 +387,7 @@ public class PRBasicIndexCreationDUnitTest extends
     // final String fileName = "PRIndexCreation.xml";
     // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
     // fileName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedIndexWithNoAliasBeforePuts started ");
     // creating all the prs
@@ -464,7 +466,7 @@ public class PRBasicIndexCreationDUnitTest extends
     // final String fileName = "PRIndexCreation.xml";
     // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
     // fileName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedIndexUsageWithPRQuery started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -492,7 +494,7 @@ public class PRBasicIndexCreationDUnitTest extends
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedIndexUsageWithPRQuery done ");
   }
@@ -512,7 +514,7 @@ public class PRBasicIndexCreationDUnitTest extends
     // fileName));
     
     int redundancy = 1;
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedIndexCreationDuringPersistentRecovery started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
@@ -551,7 +553,7 @@ public class PRBasicIndexCreationDUnitTest extends
     //The indexes may not have been completely created yet, because the buckets
     //may still be recovering from disk.
 //    vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedIndexCreationDuringPersistentRecovery done ");
   }
@@ -569,7 +571,7 @@ public class PRBasicIndexCreationDUnitTest extends
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedQueryWithIndexOnIdBug37089 started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -593,7 +595,7 @@ public class PRBasicIndexCreationDUnitTest extends
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
         name, localName));
     // validation on index usage with queries over a pr
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedQueryWithIndexOnIdBug37089 done ");
   }
@@ -611,11 +613,11 @@ public class PRBasicIndexCreationDUnitTest extends
 //    VM vm3 = host.getVM(3);
     // closeAllCache();
     final String fileName = "PRIndexCreation.xml";
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML started");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Starting and initializing partitioned regions and indexes using xml");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Starting a pr asynchronously using an xml file name : " + fileName);
    // AsyncInvocation asyInvk0 = vm0.invokeAsync(PRQHelp
    //     .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
@@ -661,7 +663,7 @@ public class PRBasicIndexCreationDUnitTest extends
         cnt, cntDest));
     
     
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML is done  " );
     
 
@@ -755,7 +757,7 @@ public class PRBasicIndexCreationDUnitTest extends
    int totalDataSize = 90;
    final int i = 0;
    
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
          "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Querying PR Test with DACK Started*****");
 
@@ -767,7 +769,7 @@ public class PRBasicIndexCreationDUnitTest extends
    VM vm3 = host.getVM(3);
 
    // Creating PR's on the participating VM's
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -778,17 +780,17 @@ public class PRBasicIndexCreationDUnitTest extends
                                                               redundancy, valueConstraint));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
                                                               redundancy, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
    // creating a local region on one of the JVM's
    vm0.invoke(PRQHelp
               .getCacheSerializableRunnableForLocalRegionCreation(localName, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -806,7 +808,7 @@ public class PRBasicIndexCreationDUnitTest extends
                                                             (2 * step), (3 * step)));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
                                                             (3 * (step)), dataSize));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -824,14 +826,14 @@ public class PRBasicIndexCreationDUnitTest extends
    vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
        "PrIndexOnKeyID", "key.ID","/" + name + ".keys key", null));
    
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data over Local Region on VM0");
 
    // querying the VM for data
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPROrderByQueryAndCompareResults(
                                                                               name, localName));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
  }
@@ -853,7 +855,7 @@ public class PRBasicIndexCreationDUnitTest extends
    int totalDataSize = 90;
    final int i = 0;
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
    "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Querying PR Test with DACK Started*****");
 
@@ -865,7 +867,7 @@ public class PRBasicIndexCreationDUnitTest extends
    VM vm3 = host.getVM(3);
 
    // Creating PR's on the participating VM's
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
    "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -876,17 +878,17 @@ public class PRBasicIndexCreationDUnitTest extends
        redundancy, valueConstraint));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
        redundancy, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
    // creating a local region on one of the JVM's
    vm0.invoke(PRQHelp
        .getCacheSerializableRunnableForLocalRegionCreation(localName, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -904,7 +906,7 @@ public class PRBasicIndexCreationDUnitTest extends
        (2 * step), (3 * step)));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
        (3 * (step)), dataSize));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -922,14 +924,14 @@ public class PRBasicIndexCreationDUnitTest extends
    vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name,
        "PrIndexOnKeyID", "key.ID","/" + name + ".keys key", null));
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data over Local Region on VM0");
 
    // querying the VM for data
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPROrderByQueryAndVerifyOrder(
        name, localName));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
  }
@@ -941,7 +943,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
    int totalDataSize = 90;
    final int i = 0;
    
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
          "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Querying PR Test with DACK Started*****");
 
@@ -953,7 +955,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
    VM vm3 = host.getVM(3);
 
    // Creating PR's on the participating VM's
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -964,17 +966,17 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
                                                               redundancy, valueConstraint));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
                                                               redundancy, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
    // creating a local region on one of the JVM's
    vm0.invoke(PRQHelp
               .getCacheSerializableRunnableForLocalRegionCreation(localName, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -992,7 +994,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
                                                             (2 * step), (3 * step)));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
                                                             (3 * (step)), dataSize));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -1032,14 +1034,14 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
        "rrIndexOnKeyStatus", "key.status", "/" + localName + ".keys key", null));
 
    
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data over Local Region on VM0");
 
    // querying the VM for data
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPROrderByQueryWithLimit(
                                                                               name, localName));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
  }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDeadlockDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDeadlockDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDeadlockDUnitTest.java
index 07345b4..b09eb31 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDeadlockDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicIndexCreationDeadlockDUnitTest.java
@@ -30,10 +30,11 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * @author rdubey
@@ -142,10 +143,10 @@ public class PRBasicIndexCreationDeadlockDUnitTest extends
           IndexUtilTestHook hook = (IndexUtilTestHook) IndexUtils.testHook;
           while (hook == null) {
             hook = (IndexUtilTestHook) IndexUtils.testHook;
-            pause(20);
+            Wait.pause(20);
           }
           while (!hook.isHooked()) {
-            pause(30);
+            Wait.pause(30);
           }
           //hook.setHooked(false);
           hook_vm1 = true;
@@ -165,7 +166,7 @@ public class PRBasicIndexCreationDeadlockDUnitTest extends
         }
       });
 
-      pause(2000);
+      Wait.pause(2000);
       
       vm0.invoke(new CacheSerializableRunnable("Checking hook in VM0 cache again") {
         
@@ -174,10 +175,10 @@ public class PRBasicIndexCreationDeadlockDUnitTest extends
           IndexUtilTestHook hook = (IndexUtilTestHook) IndexUtils.testHook;
           while (hook == null) {
             hook = (IndexUtilTestHook) IndexUtils.testHook;
-            pause(20);
+            Wait.pause(20);
           }
           while (!hook.isHooked()) {
-            pause(30);
+            Wait.pause(30);
           }
           hook.setHooked(false);
           hook_vm1 = false;
@@ -185,7 +186,7 @@ public class PRBasicIndexCreationDeadlockDUnitTest extends
       });  
 
       for (AsyncInvocation async: asyns) {
-        DistributedTestCase.join(async, 10000, null);
+        ThreadUtils.join(async, 10000);
       }
     } finally {
       
@@ -223,7 +224,7 @@ public class PRBasicIndexCreationDeadlockDUnitTest extends
       switch (spot) {
       case 0:
         hooked = true;
-        while(hooked) {pause(300);}
+        while(hooked) {Wait.pause(300);}
         break;
 
       default:

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
index 2326709..d020ef6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicMultiIndexCreationDUnitTest.java
@@ -28,6 +28,7 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -73,7 +74,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreationDUnitTest.testPRBasicIndexCreate started ....");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
@@ -83,7 +84,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     // Scope.DISTRIBUTED_ACK, redundancy));
 
     // Creating the Datastores Nodes in the VM1.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info("PRBasicIndexCreationDUnitTest : creating all the prs ");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
@@ -115,7 +116,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
         name, "PrIndexOnStatus", "p.status",null, "p"));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForDuplicatePRIndexCreate(
         name, "PrIndexOnStatus", "p.status",null, "p"));
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreationDUnitTest.testPRBasicIndexCreate is done ");
   }
 
@@ -134,7 +135,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testPRMultiIndexCreation Test Started");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
@@ -202,7 +203,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     vm2.invoke(getIndexCheck);
     vm3.invoke(getIndexCheck);
 
-    getLogWriter().info("PRQBasicIndexCreationTest.testPRMultiIndexCreation ENDED");
+    LogWriterUtils.getLogWriter().info("PRQBasicIndexCreationTest.testPRMultiIndexCreation ENDED");
   }
   
 
@@ -224,7 +225,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     // final String fileName = "PRIndexCreation.xml";
     // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
     // fileName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedRegionThroughXMLAndAPI started ");
     // creating all the prs
@@ -278,7 +279,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     // final String fileName = "PRIndexCreation.xml";
     // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
     // fileName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedIndexWithNoAliasAfterPuts started ");
     // creating all the prs
@@ -335,7 +336,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     // final String fileName = "PRIndexCreation.xml";
     // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
     // fileName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testCreatePartitionedIndexWithNoAliasBeforePuts started ");
     // creating all the prs
@@ -425,7 +426,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     // final String fileName = "PRIndexCreation.xml";
     // vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreateThrougXML(name,
     // fileName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedIndexUsageWithPRQuery started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -459,7 +460,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedIndexUsageWithPRQuery done ");
   }
@@ -479,7 +480,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     // fileName));
     
     int redundancy = 1;
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedIndexCreationDuringPersistentRecovery started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPersistentPRCreate(name,
@@ -523,7 +524,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     //The indexes may not have been completely created yet, because the buckets
     //may still be recovering from disk.
 //    vm0.invoke(PRQHelp.getCacheSerializableRunnableForIndexUsageCheck(name));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedIndexCreationDuringPersistentRecovery done ");
   }
@@ -541,7 +542,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRBasicIndexCreationDUnitTest.testPartitionedQueryWithIndexOnIdBug37089 started ");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -571,7 +572,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
         name, localName));
     // validation on index usage with queries over a pr
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
     .info(
         "PRBasicIndexCreationDUnitTest.testPartitionedQueryWithIndexOnIdBug37089 done ");
   }
@@ -589,11 +590,11 @@ public class PRBasicMultiIndexCreationDUnitTest extends
 //    VM vm3 = host.getVM(3);
     // closeAllCache();
     final String fileName = "PRIndexCreation.xml";
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML started");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Starting and initializing partitioned regions and indexes using xml");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Starting a pr asynchronously using an xml file name : " + fileName);
    // AsyncInvocation asyInvk0 = vm0.invokeAsync(PRQHelp
    //     .getCacheSerializableRunnableForPRCreateThrougXML(name, fileName));
@@ -648,7 +649,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
         cnt, cntDest));
     
     
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicIndexCreation.testCreatePartitionedIndexThroughXML is done  " );
     
 
@@ -749,7 +750,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
    int totalDataSize = 90;
    final int i = 0;
    
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
          "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Querying PR Test with DACK Started*****");
 
@@ -761,7 +762,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
    VM vm3 = host.getVM(3);
 
    // Creating PR's on the participating VM's
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -772,17 +773,17 @@ public class PRBasicMultiIndexCreationDUnitTest extends
                                                               redundancy, valueConstraint));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
                                                               redundancy, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
    // creating a local region on one of the JVM's
    vm0.invoke(PRQHelp
               .getCacheSerializableRunnableForLocalRegionCreation(localName, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -800,7 +801,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
                                                             (2 * step), (3 * step)));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
                                                             (3 * (step)), dataSize));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -821,14 +822,14 @@ public class PRBasicMultiIndexCreationDUnitTest extends
    
    vm1.invoke(PRQHelp.getCacheSerializableRunnableForDefineIndex(name, names, exps));
   
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data over Local Region on VM0");
 
    // querying the VM for data
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPROrderByQueryAndCompareResults(
                                                                               name, localName));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
  }
@@ -850,7 +851,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
    int totalDataSize = 90;
    final int i = 0;
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
    "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Querying PR Test with DACK Started*****");
 
@@ -862,7 +863,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
    VM vm3 = host.getVM(3);
 
    // Creating PR's on the participating VM's
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
    "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -873,17 +874,17 @@ public class PRBasicMultiIndexCreationDUnitTest extends
        redundancy, valueConstraint));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
        redundancy, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
    // creating a local region on one of the JVM's
    vm0.invoke(PRQHelp
        .getCacheSerializableRunnableForLocalRegionCreation(localName, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -901,7 +902,7 @@ public class PRBasicMultiIndexCreationDUnitTest extends
        (2 * step), (3 * step)));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
        (3 * (step)), dataSize));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -922,14 +923,14 @@ public class PRBasicMultiIndexCreationDUnitTest extends
    
    vm1.invoke(PRQHelp.getCacheSerializableRunnableForDefineIndex(name, names, exps));
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data over Local Region on VM0");
 
    // querying the VM for data
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPROrderByQueryAndVerifyOrder(
        name, localName));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
  }
@@ -941,7 +942,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
   int totalDataSize = 90;
   final int i = 0;
    
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
          "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Querying PR Test with DACK Started*****");
 
@@ -953,7 +954,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
    VM vm3 = host.getVM(3);
 
    // Creating PR's on the participating VM's
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -964,17 +965,17 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
                                                               redundancy, valueConstraint));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
                                                               redundancy, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
    // creating a local region on one of the JVM's
    vm0.invoke(PRQHelp
               .getCacheSerializableRunnableForLocalRegionCreation(localName, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -992,7 +993,7 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
                                                             (2 * step), (3 * step)));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
                                                             (3 * (step)), totalDataSize));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -1048,14 +1049,14 @@ public void testIndexQueryingWithOrderByLimit() throws Exception
    
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForDefineIndex(localName, names2, exps2, fromClause2));
  
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data over Local Region on VM0");
 
    // querying the VM for data
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPROrderByQueryWithLimit(
                                                                               name, localName));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
  }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicQueryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicQueryDUnitTest.java
index 50cf97e..147ed4d 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicQueryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicQueryDUnitTest.java
@@ -28,6 +28,7 @@ import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -76,13 +77,13 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm0 = host.getVM(0); 
     VM vm1 = host.getVM(1);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating Accessor node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Accessor node in the PR");
 
@@ -92,22 +93,22 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
 //    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(localName,
 //        Scope.DISTRIBUTED_ACK, redundancy));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Accessor node in the PR");
 
     // Creating the Datastores Nodes in the VM1.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest:testPRBasicQuerying ----- Creating the Datastore node in the PR");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created the Datastore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -121,7 +122,7 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRDuplicatePuts(name, portfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
@@ -135,7 +136,7 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
         name, localName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -147,13 +148,13 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRCountStarQuery: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating Accessor node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRCountStarQuery: Creating the Accessor node in the PR");
 
@@ -163,12 +164,12 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
 //    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(localName,
 //        Scope.DISTRIBUTED_ACK, redundancy));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, Portfolio.class));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRCountStarQuery: Successfully created the Accessor node in the PR");
 
     // Creating the Datastores Nodes in the VM1.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest:testPRCountStarQuery ----- Creating the Datastore node in the PR");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -178,11 +179,11 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
         redundancy, Portfolio.class));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(localName, Portfolio.class));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRCountStarQuery: Successfully Created the Datastore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRCountStarQuery: Successfully Created PR's across all VM's");
 
@@ -196,7 +197,7 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRDuplicatePuts(name, portfolio,
         cnt, cntDest+100));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRCountStarQuery: Inserted Portfolio data across PR's");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
@@ -216,7 +217,7 @@ public class PRBasicQueryDUnitTest extends PartitionedRegionDUnitTestCase
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCountStarQueries(
         name, localName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRCountStarQuery: Querying PR's Test ENDED");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicRemoveIndexDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicRemoveIndexDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicRemoveIndexDUnitTest.java
index f0c927e..81ee3bf 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicRemoveIndexDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRBasicRemoveIndexDUnitTest.java
@@ -19,6 +19,7 @@ package com.gemstone.gemfire.cache.query.partitioned;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -68,7 +69,7 @@ public class PRBasicRemoveIndexDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
     
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicRemoveIndexDUnitTest.testPRBasicIndexCreate test now starts ....");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
@@ -96,7 +97,7 @@ public class PRBasicRemoveIndexDUnitTest extends PartitionedRegionDUnitTestCase
     //remove indexes
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForRemoveIndex(name, false));
     
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
     "PRBasicRemoveIndexDUnitTest.testPRBasicRemoveIndex test now  ends sucessfully");
 
   }
@@ -111,7 +112,7 @@ public class PRBasicRemoveIndexDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
     
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PRBasicRemoveIndexDUnitTest.testPRBasicIndexCreate test now starts ....");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));


[15/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart2DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart2DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart2DUnitTest.java
index 74aedd1..3d462f4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart2DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart2DUnitTest.java
@@ -25,6 +25,7 @@ import com.gemstone.gemfire.internal.logging.LogWriterImpl;
 import com.gemstone.gemfire.management.cli.Result;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -96,11 +97,11 @@ public class MiscellaneousCommandsExportLogsPart2DUnitTest extends CliCommandTes
     Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForLogLevel" + dir, null, null, logLevel, false,
         false, start, end, 1);
 
-    getLogWriter().info("testExportLogsForLogLevel command=" + cmdResult);
+    LogWriterUtils.getLogWriter().info("testExportLogsForLogLevel command=" + cmdResult);
 
     if (cmdResult != null) {
       String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
-      getLogWriter().info("testExportLogsForLogLevel cmdStringRsult=" + cmdStringRsult);
+      LogWriterUtils.getLogWriter().info("testExportLogsForLogLevel cmdStringRsult=" + cmdStringRsult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testExportLogsForLogLevel failed as did not get CommandResult");
@@ -128,11 +129,11 @@ public class MiscellaneousCommandsExportLogsPart2DUnitTest extends CliCommandTes
     Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForLogLevelWithUPTOLOGLEVEL" + dir, null, null,
         logLevel, true, false, start, end, 1);
 
-    getLogWriter().info("testExportLogsForLogLevelWithUPTOLOGLEVEL command=" + cmdResult);
+    LogWriterUtils.getLogWriter().info("testExportLogsForLogLevelWithUPTOLOGLEVEL command=" + cmdResult);
 
     if (cmdResult != null) {
       String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
-      getLogWriter().info("testExportLogsForLogLevelWithUPTOLOGLEVEL cmdStringRsult=" + cmdStringRsult);
+      LogWriterUtils.getLogWriter().info("testExportLogsForLogLevelWithUPTOLOGLEVEL cmdStringRsult=" + cmdStringRsult);
 
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
@@ -140,9 +141,4 @@ public class MiscellaneousCommandsExportLogsPart2DUnitTest extends CliCommandTes
     }
     FileUtil.delete(new File("testExportLogsForLogLevelWithUPTOLOGLEVEL" + dir));
   }
-
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart3DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart3DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart3DUnitTest.java
index 9bce503..7d67b61 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart3DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart3DUnitTest.java
@@ -26,6 +26,7 @@ import com.gemstone.gemfire.internal.logging.LogWriterImpl;
 import com.gemstone.gemfire.management.cli.Result;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -103,10 +104,10 @@ public class MiscellaneousCommandsExportLogsPart3DUnitTest extends CliCommandTes
     Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForGroup" + dir, groups, null, logLevel, false,
         false, start, end, 1);
 
-    getLogWriter().info("testExportLogsForGroup command result =" + cmdResult);
+    LogWriterUtils.getLogWriter().info("testExportLogsForGroup command result =" + cmdResult);
     if (cmdResult != null) {
       String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
-      getLogWriter().info("testExportLogsForGroup cmdStringRsult=" + cmdStringRsult);
+      LogWriterUtils.getLogWriter().info("testExportLogsForGroup cmdStringRsult=" + cmdStringRsult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testExportLogsForGroup failed as did not get CommandResult");
@@ -136,11 +137,11 @@ public class MiscellaneousCommandsExportLogsPart3DUnitTest extends CliCommandTes
     Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForMember" + dir, null, vm1MemberId, logLevel,
         false, false, start, end, 1);
 
-    getLogWriter().info("testExportLogsForMember command result =" + cmdResult);
+    LogWriterUtils.getLogWriter().info("testExportLogsForMember command result =" + cmdResult);
 
     if (cmdResult != null) {
       String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
-      getLogWriter().info("testExportLogsForMember cmdStringRsult=" + cmdStringRsult);
+      LogWriterUtils.getLogWriter().info("testExportLogsForMember cmdStringRsult=" + cmdStringRsult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testExportLogsForMember failed as did not get CommandResult");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart4DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart4DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart4DUnitTest.java
index 11c3afd..2d11580 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart4DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart4DUnitTest.java
@@ -25,6 +25,7 @@ import com.gemstone.gemfire.internal.logging.LogWriterImpl;
 import com.gemstone.gemfire.management.cli.Result;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -51,11 +52,6 @@ public class MiscellaneousCommandsExportLogsPart4DUnitTest extends CliCommandTes
     return cache.getDistributedSystem().getDistributedMember().getId();
   }
 
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   void setupForExportLogs() {
     final VM vm1 = Host.getHost(0).getVM(1);
     createDefaultSetup(null);
@@ -99,11 +95,11 @@ public class MiscellaneousCommandsExportLogsPart4DUnitTest extends CliCommandTes
     Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForTimeRange1" + dir, null, null, logLevel, false,
         false, start, end, 1);
 
-    getLogWriter().info("testExportLogsForTimeRange1 command result =" + cmdResult);
+    LogWriterUtils.getLogWriter().info("testExportLogsForTimeRange1 command result =" + cmdResult);
 
     if (cmdResult != null) {
       String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
-      getLogWriter().info("testExportLogsForTimeRange1 cmdStringRsult=" + cmdStringRsult);
+      LogWriterUtils.getLogWriter().info("testExportLogsForTimeRange1 cmdStringRsult=" + cmdStringRsult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testExportLogsForTimeRange1 failed as did not get CommandResult");
@@ -127,11 +123,11 @@ public class MiscellaneousCommandsExportLogsPart4DUnitTest extends CliCommandTes
     Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForTimeRangeForOnlyStartTime" + dir, null, null,
         logLevel, false, false, s, null, 1);
 
-    getLogWriter().info("testExportLogsForTimeRangeForOnlyStartTime command result =" + cmdResult);
+    LogWriterUtils.getLogWriter().info("testExportLogsForTimeRangeForOnlyStartTime command result =" + cmdResult);
 
     if (cmdResult != null) {
       String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
-      getLogWriter().info("testExportLogsForTimeRangeForOnlyStartTime cmdStringRsult=" + cmdStringRsult);
+      LogWriterUtils.getLogWriter().info("testExportLogsForTimeRangeForOnlyStartTime cmdStringRsult=" + cmdStringRsult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testExportLogsForTimeRangeForOnlyStartTime failed as did not get CommandResult");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/QueueCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/QueueCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/QueueCommandsDUnitTest.java
index 78540ac..e6c1e47 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/QueueCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/QueueCommandsDUnitTest.java
@@ -29,10 +29,13 @@ import com.gemstone.gemfire.management.cli.Result;
 import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -256,7 +259,7 @@ public class QueueCommandsDUnitTest extends CliCommandTestBase {
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
               locatorProps);
 
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -267,7 +270,7 @@ public class QueueCommandsDUnitTest extends CliCommandTestBase {
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+          Wait.waitForCriterion(wc, 5000, 500, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }
@@ -334,7 +337,7 @@ public class QueueCommandsDUnitTest extends CliCommandTestBase {
           xmlFromConfig = sharedConfig.getConfiguration(groupName).getCacheXmlContent();
           assertTrue(xmlFromConfig.contains(queueName));
         } catch (Exception e) {
-          fail("Error occurred in cluster configuration service", e);
+          Assert.fail("Error occurred in cluster configuration service", e);
         }
       }
     });
@@ -366,7 +369,7 @@ public class QueueCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCliCommandTestBase() throws Exception {
     for (String path : this.filesToBeDeleted) {
       try {
         final File fileToDelete = new File(path);
@@ -375,11 +378,10 @@ public class QueueCommandsDUnitTest extends CliCommandTestBase {
           executeCommand("undeploy --jar=" + fileToDelete.getName());
         }
       } catch (IOException e) {
-        getLogWriter().error("Unable to delete file", e);
+        LogWriterUtils.getLogWriter().error("Unable to delete file", e);
       }
     }
     this.filesToBeDeleted.clear();
-    super.tearDown2();
   }
 
   private void writeJarBytesToFile(File jarFile, byte[] jarBytes) throws IOException {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/SharedConfigurationCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/SharedConfigurationCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/SharedConfigurationCommandsDUnitTest.java
index b937fa9..9dc9506 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/SharedConfigurationCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/SharedConfigurationCommandsDUnitTest.java
@@ -34,11 +34,14 @@ import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
 import com.gemstone.gemfire.management.internal.configuration.SharedConfigurationDUnitTest;
 import com.gemstone.gemfire.management.internal.configuration.domain.Configuration;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import org.apache.commons.io.FileUtils;
 
@@ -121,7 +124,7 @@ public class SharedConfigurationCommandsDUnitTest extends CliCommandTestBase {
         try {
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locator1Port, locatorLogFile,
               null, locatorProps);
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -132,7 +135,7 @@ public class SharedConfigurationCommandsDUnitTest extends CliCommandTestBase {
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, TIMEOUT, INTERVAL, true);
+          Wait.waitForCriterion(wc, TIMEOUT, INTERVAL, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }
@@ -211,23 +214,23 @@ public class SharedConfigurationCommandsDUnitTest extends CliCommandTestBase {
     cmdResult = executeCommand(commandStringBuilder.getCommandString());
     String resultString = commandResultToString(cmdResult);
 
-    getLogWriter().info("#SB Result\n");
-    getLogWriter().info(resultString);
+    LogWriterUtils.getLogWriter().info("#SB Result\n");
+    LogWriterUtils.getLogWriter().info(resultString);
     assertEquals(true, cmdResult.getStatus().equals(Status.OK));
 
     commandStringBuilder = new CommandStringBuilder(CliStrings.STATUS_SHARED_CONFIG);
     cmdResult = executeCommand(commandStringBuilder.getCommandString());
     resultString = commandResultToString(cmdResult);
-    getLogWriter().info("#SB Result\n");
-    getLogWriter().info(resultString);
+    LogWriterUtils.getLogWriter().info("#SB Result\n");
+    LogWriterUtils.getLogWriter().info(resultString);
     assertEquals(Status.OK, cmdResult.getStatus());
 
     commandStringBuilder = new CommandStringBuilder(CliStrings.EXPORT_SHARED_CONFIG);
     commandStringBuilder.addOption(CliStrings.EXPORT_SHARED_CONFIG__FILE, sharedConfigZipFileName);
     cmdResult = executeCommand(commandStringBuilder.getCommandString());
     resultString = commandResultToString(cmdResult);
-    getLogWriter().info("#SB Result\n");
-    getLogWriter().info(resultString);
+    LogWriterUtils.getLogWriter().info("#SB Result\n");
+    LogWriterUtils.getLogWriter().info(resultString);
     assertEquals(Status.OK, cmdResult.getStatus());
 
     //Import into a running system should fail
@@ -287,7 +290,7 @@ public class SharedConfigurationCommandsDUnitTest extends CliCommandTestBase {
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locator2Port, locatorLogFile,
               null, locatorProps);
 
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -298,7 +301,7 @@ public class SharedConfigurationCommandsDUnitTest extends CliCommandTestBase {
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+          Wait.waitForCriterion(wc, 5000, 500, true);
 
           SharedConfiguration sc = locator.getSharedConfiguration();
           assertNotNull(sc);
@@ -318,7 +321,7 @@ public class SharedConfigurationCommandsDUnitTest extends CliCommandTestBase {
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         } catch (Exception e) {
-          fail("Error occurred in cluster configuration service", e);
+          Assert.fail("Error occurred in cluster configuration service", e);
         }
       }
     });
@@ -330,8 +333,7 @@ public class SharedConfigurationCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     for (int i = 0; i < 4; i++) {
       Host.getHost(0).getVM(i).invoke(SharedConfigurationDUnitTest.locatorCleanup);
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShellCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShellCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShellCommandsDUnitTest.java
index c8695ad..2ff86ba 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShellCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShellCommandsDUnitTest.java
@@ -28,6 +28,8 @@ import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+
 import org.junit.Before;
 
 import java.io.File;
@@ -65,7 +67,7 @@ public class ShellCommandsDUnitTest extends CliCommandTestBase {
     assertEquals(String.valueOf(jmxManagerPort), System.getProperty("gemfire.jmx-manager-port"));
     assertEquals("0", System.getProperty("gemfire.jmx-manager-http-port"));
 
-    final String pathname = (getClass().getSimpleName() + "_" + getTestName());
+    final String pathname = (getClass().getSimpleName() + "_" + getTestMethodName());
     final File workingDirectory = new File(pathname);
 
     workingDirectory.mkdir();
@@ -104,7 +106,7 @@ public class ShellCommandsDUnitTest extends CliCommandTestBase {
     if (gfshInstance == null) {
       fail("In testEcho command gfshInstance is null");
     }
-    getLogWriter().info("Gsh " + gfshInstance);
+    LogWriterUtils.getLogWriter().info("Gsh " + gfshInstance);
 
     gfshInstance.setEnvProperty("TESTSYS", "SYS_VALUE");
     printAllEnvs(gfshInstance);
@@ -334,9 +336,9 @@ public class ShellCommandsDUnitTest extends CliCommandTestBase {
 
     if (cmdResult != null) {
       assertEquals(Result.Status.OK, cmdResult.getStatus());
-      getLogWriter().info("testClearHistory cmdResult=" + commandResultToString(cmdResult));
+      LogWriterUtils.getLogWriter().info("testClearHistory cmdResult=" + commandResultToString(cmdResult));
       String resultString = commandResultToString(cmdResult);
-      getLogWriter().info("testClearHistory resultString=" + resultString);
+      LogWriterUtils.getLogWriter().info("testClearHistory resultString=" + resultString);
       assertTrue(resultString.contains(CliStrings.HISTORY__MSG__CLEARED_HISTORY));
       assertTrue(gfshInstance.getGfshHistory().size()<= 1);
     } else {
@@ -346,18 +348,18 @@ public class ShellCommandsDUnitTest extends CliCommandTestBase {
 
   private static void printCommandOutput(CommandResult cmdResult) {
     assertNotNull(cmdResult);
-    getLogWriter().info("Command Output : ");
+    LogWriterUtils.getLogWriter().info("Command Output : ");
     StringBuilder sb = new StringBuilder();
     cmdResult.resetToFirstLine();
     while (cmdResult.hasNextLine()) {
       sb.append(cmdResult.nextLine()).append(DataCommandRequest.NEW_LINE);
     }
-    getLogWriter().info(sb.toString());
-    getLogWriter().info("");
+    LogWriterUtils.getLogWriter().info(sb.toString());
+    LogWriterUtils.getLogWriter().info("");
   }
 
   private void printAllEnvs(Gfsh gfsh) {
-    getLogWriter().info("printAllEnvs : " + StringUtils.objectToString(gfsh.getEnv(), false, 0));
+    LogWriterUtils.getLogWriter().info("printAllEnvs : " + StringUtils.objectToString(gfsh.getEnv(), false, 0));
     /*
     getLogWriter().info("Gfsh printAllEnvs : " + HydraUtil.ObjectToString(getDefaultShell().getEnv()));    
     getLogWriter().info("gfsh " + gfsh + " default shell " + getDefaultShell());*/

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowDeadlockDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowDeadlockDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowDeadlockDUnitTest.java
index 7a72865..ba942c6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowDeadlockDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowDeadlockDUnitTest.java
@@ -32,7 +32,10 @@ import com.gemstone.gemfire.management.internal.cli.CliUtil;
 import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.remote.CommandProcessor;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -70,8 +73,8 @@ public class ShowDeadlockDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable() {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       private static final long serialVersionUID = 1L;
 
       public void run() {
@@ -110,7 +113,7 @@ public class ShowDeadlockDUnitTest extends CacheTestCase {
 
     String deadLockOutputFromCommand = getResultAsString(result);
 
-    getLogWriter().info("output = " + deadLockOutputFromCommand);
+    LogWriterUtils.getLogWriter().info("output = " + deadLockOutputFromCommand);
     assertEquals(true, result.hasIncomingFiles());
     assertEquals(true, result.getStatus().equals(Status.OK));
     assertEquals(true, deadLockOutputFromCommand.startsWith(CliStrings.SHOW_DEADLOCK__NO__DEADLOCK));
@@ -146,7 +149,7 @@ public class ShowDeadlockDUnitTest extends CacheTestCase {
     Result result = commandProcessor.createCommandStatement(csb.toString(), EMPTY_ENV).process();
 
     String deadLockOutputFromCommand = getResultAsString(result);
-    getLogWriter().info("Deadlock = " + deadLockOutputFromCommand);
+    LogWriterUtils.getLogWriter().info("Deadlock = " + deadLockOutputFromCommand);
     result.saveIncomingFiles(null);
     assertEquals(true, deadLockOutputFromCommand.startsWith(CliStrings.SHOW_DEADLOCK__DEADLOCK__DETECTED));
     assertEquals(true, result.getStatus().equals(Status.OK));
@@ -183,7 +186,7 @@ public class ShowDeadlockDUnitTest extends CacheTestCase {
         try {
           Thread.sleep(1000);
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         }
         ResultCollector collector = FunctionService.onMember(system, member).execute(new TestFunction());
         //wait the function to lock the lock on member.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowMetricsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowMetricsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowMetricsDUnitTest.java
index c327f41..a34b185 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowMetricsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowMetricsDUnitTest.java
@@ -36,9 +36,12 @@ import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.remote.CommandProcessor;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import javax.management.ObjectName;
 import java.io.File;
@@ -94,11 +97,11 @@ public class ShowMetricsDUnitTest extends CliCommandTestBase {
       @Override
       public Object call() throws Exception {
         WaitCriterion wc = createMBeanWaitCriterion(1, "", null, 0);
-        waitForCriterion(wc, 5000, 500, true);
+        Wait.waitForCriterion(wc, 5000, 500, true);
         CommandProcessor commandProcessor = new CommandProcessor();
         Result result = commandProcessor.createCommandStatement("show metrics", Collections.EMPTY_MAP).process();
         String resultStr = commandResultToString((CommandResult) result);
-        getLogWriter().info(resultStr);
+        LogWriterUtils.getLogWriter().info(resultStr);
         assertEquals(resultStr, true, result.getStatus().equals(Status.OK));
         return resultStr;
       }
@@ -110,8 +113,8 @@ public class ShowMetricsDUnitTest extends CliCommandTestBase {
 
     String managerResult = (String) managerResultObj;
 
-    getLogWriter().info("#SB Manager");
-    getLogWriter().info(managerResult);
+    LogWriterUtils.getLogWriter().info("#SB Manager");
+    LogWriterUtils.getLogWriter().info(managerResult);
   }
 
   public void systemSetUp() {
@@ -141,7 +144,7 @@ public class ShowMetricsDUnitTest extends CliCommandTestBase {
       @Override
       public Object call() throws Exception {
         WaitCriterion wc = createMBeanWaitCriterion(2, regionName, null, 0);
-        waitForCriterion(wc, 5000, 500, true);
+        Wait.waitForCriterion(wc, 5000, 500, true);
         CommandProcessor commandProcessor = new CommandProcessor();
         Result result = commandProcessor.createCommandStatement("show metrics --region=REGION1",
             Collections.EMPTY_MAP).process();
@@ -157,8 +160,8 @@ public class ShowMetricsDUnitTest extends CliCommandTestBase {
 
     String managerResult = (String) managerResultObj;
 
-    getLogWriter().info("#SB Manager");
-    getLogWriter().info(managerResult);
+    LogWriterUtils.getLogWriter().info("#SB Manager");
+    LogWriterUtils.getLogWriter().info(managerResult);
   }
 
   /***
@@ -228,9 +231,9 @@ public class ShowMetricsDUnitTest extends CliCommandTestBase {
       public Object call() throws Exception {
 
         WaitCriterion wc = createMBeanWaitCriterion(3, "", distributedMember, 0);
-        waitForCriterion(wc, 5000, 500, true);
+        Wait.waitForCriterion(wc, 5000, 500, true);
         wc = createMBeanWaitCriterion(5, "", distributedMember, cacheServerPort);
-        waitForCriterion(wc, 10000, 500, true);
+        Wait.waitForCriterion(wc, 10000, 500, true);
 
         final String command = CliStrings.SHOW_METRICS + " --" + CliStrings.SHOW_METRICS__MEMBER + "=" + distributedMember.getId() + " --" + CliStrings.SHOW_METRICS__CACHESERVER__PORT + "=" + cacheServerPort + " --" + CliStrings.SHOW_METRICS__FILE + "=" + exportFileName;
 
@@ -255,8 +258,8 @@ public class ShowMetricsDUnitTest extends CliCommandTestBase {
 
     String managerResult = (String) managerResultObj;
 
-    getLogWriter().info("#SB Manager");
-    getLogWriter().info(managerResult);
+    LogWriterUtils.getLogWriter().info("#SB Manager");
+    LogWriterUtils.getLogWriter().info(managerResult);
     cs.stop();
   }
 
@@ -273,7 +276,7 @@ public class ShowMetricsDUnitTest extends CliCommandTestBase {
       public Object call() throws Exception {
 
         WaitCriterion wc = createMBeanWaitCriterion(4, regionName, distributedMember, 0);
-        waitForCriterion(wc, 5000, 500, true);
+        Wait.waitForCriterion(wc, 5000, 500, true);
         CommandProcessor commandProcessor = new CommandProcessor();
         Result result = commandProcessor.createCommandStatement(
             "show metrics --region=" + regionName + " --member=" + distributedMember.getName() + " --file=" + exportFileName,
@@ -296,8 +299,8 @@ public class ShowMetricsDUnitTest extends CliCommandTestBase {
 
     String managerResult = (String) managerResultObj;
 
-    getLogWriter().info("#SB Manager");
-    getLogWriter().info(managerResult);
+    LogWriterUtils.getLogWriter().info("#SB Manager");
+    LogWriterUtils.getLogWriter().info(managerResult);
   }
 
   public void testShowMetricsRegionFromMemberWithCategories() throws ClassNotFoundException, IOException, InterruptedException {
@@ -313,7 +316,7 @@ public class ShowMetricsDUnitTest extends CliCommandTestBase {
       public Object call() throws Exception {
 
         WaitCriterion wc = createMBeanWaitCriterion(4, regionName, distributedMember, 0);
-        waitForCriterion(wc, 5000, 500, true);
+        Wait.waitForCriterion(wc, 5000, 500, true);
         CommandProcessor commandProcessor = new CommandProcessor();
         Result result = commandProcessor.createCommandStatement(
             "show metrics --region=" + regionName + " --member=" + distributedMember.getName() + " --file=" + exportFileName + " --categories=region,eviction",
@@ -336,7 +339,7 @@ public class ShowMetricsDUnitTest extends CliCommandTestBase {
 
     String managerResult = (String) managerResultObj;
 
-    getLogWriter().info("#SB Manager");
-    getLogWriter().info(managerResult);
+    LogWriterUtils.getLogWriter().info("#SB Manager");
+    LogWriterUtils.getLogWriter().info(managerResult);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowStackTraceDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowStackTraceDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowStackTraceDUnitTest.java
index b0f0495..11bd352 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowStackTraceDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ShowStackTraceDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -93,9 +94,9 @@ public class ShowStackTraceDUnitTest extends CliCommandTestBase {
     CommandStringBuilder csb = new CommandStringBuilder(CliStrings.EXPORT_STACKTRACE);
     csb.addOption(CliStrings.EXPORT_STACKTRACE__FILE, allStacktracesFile.getCanonicalPath());
     String commandString = csb.toString();
-    getLogWriter().info("CommandString : " + commandString);
+    LogWriterUtils.getLogWriter().info("CommandString : " + commandString);
     CommandResult commandResult = executeCommand(commandString);
-    getLogWriter().info("Output : \n" + commandResultToString(commandResult));
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultToString(commandResult));
     assertTrue(commandResult.getStatus().equals(Status.OK));
 
     File mgrStacktraceFile = new File("managerStacktrace.txt");
@@ -105,9 +106,9 @@ public class ShowStackTraceDUnitTest extends CliCommandTestBase {
     csb.addOption(CliStrings.EXPORT_STACKTRACE__FILE, mgrStacktraceFile.getCanonicalPath());
     csb.addOption(CliStrings.EXPORT_STACKTRACE__MEMBER, "Manager");
     commandString = csb.toString();
-    getLogWriter().info("CommandString : " + commandString);
+    LogWriterUtils.getLogWriter().info("CommandString : " + commandString);
     commandResult = executeCommand(commandString);
-    getLogWriter().info("Output : \n" + commandResultToString(commandResult));
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultToString(commandResult));
     assertTrue(commandResult.getStatus().equals(Status.OK));
 
     File serverStacktraceFile = new File("serverStacktrace.txt");
@@ -117,9 +118,9 @@ public class ShowStackTraceDUnitTest extends CliCommandTestBase {
     csb.addOption(CliStrings.EXPORT_STACKTRACE__FILE, serverStacktraceFile.getCanonicalPath());
     csb.addOption(CliStrings.EXPORT_STACKTRACE__MEMBER, "Server");
     commandString = csb.toString();
-    getLogWriter().info("CommandString : " + commandString);
+    LogWriterUtils.getLogWriter().info("CommandString : " + commandString);
     commandResult = executeCommand(commandString);
-    getLogWriter().info("Output : \n" + commandResultToString(commandResult));
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultToString(commandResult));
     assertTrue(commandResult.getStatus().equals(Status.OK));
 
     File groupStacktraceFile = new File("groupstacktrace.txt");
@@ -129,9 +130,9 @@ public class ShowStackTraceDUnitTest extends CliCommandTestBase {
     csb.addOption(CliStrings.EXPORT_STACKTRACE__FILE, groupStacktraceFile.getCanonicalPath());
     csb.addOption(CliStrings.EXPORT_STACKTRACE__GROUP, "G2");
     commandString = csb.toString();
-    getLogWriter().info("CommandString : " + commandString);
+    LogWriterUtils.getLogWriter().info("CommandString : " + commandString);
     commandResult = executeCommand(commandString);
-    getLogWriter().info("Output : \n" + commandResultToString(commandResult));
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultToString(commandResult));
     assertTrue(commandResult.getStatus().equals(Status.OK));
 
     File wrongStackTraceFile = new File("wrongStackTrace.txt");
@@ -141,9 +142,9 @@ public class ShowStackTraceDUnitTest extends CliCommandTestBase {
     csb.addOption(CliStrings.EXPORT_STACKTRACE__FILE, wrongStackTraceFile.getCanonicalPath());
     csb.addOption(CliStrings.EXPORT_STACKTRACE__MEMBER, "WrongMember");
     commandString = csb.toString();
-    getLogWriter().info("CommandString : " + commandString);
+    LogWriterUtils.getLogWriter().info("CommandString : " + commandString);
     commandResult = executeCommand(commandString);
-    getLogWriter().info("Output : \n" + commandResultToString(commandResult));
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultToString(commandResult));
     assertFalse(commandResult.getStatus().equals(Status.OK));
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/UserCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/UserCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/UserCommandsDUnitTest.java
index dcbba2f..c7a8f5d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/UserCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/UserCommandsDUnitTest.java
@@ -58,8 +58,7 @@ public class UserCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     if (this.deleteJarDirectory) {
       FileUtil.delete(this.jarDirectory);
     } else {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationDUnitTest.java
index 955d9f9..d583e51 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationDUnitTest.java
@@ -49,11 +49,12 @@ import com.gemstone.gemfire.management.internal.configuration.domain.XmlEntity.X
 import com.gemstone.gemfire.management.internal.configuration.handlers.ConfigurationRequestHandler;
 import com.gemstone.gemfire.management.internal.configuration.messages.ConfigurationRequest;
 import com.gemstone.gemfire.management.internal.configuration.messages.ConfigurationResponse;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /***
  * Tests the starting up of shared configuration, installation of {@link ConfigurationRequestHandler}
@@ -118,7 +119,7 @@ public class SharedConfigurationDUnitTest extends CacheTestCase {
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locator1Port, locatorLogFile, null,
               locatorProps);
 
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -129,7 +130,7 @@ public class SharedConfigurationDUnitTest extends CacheTestCase {
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, TIMEOUT, INTERVAL, true);
+          Wait.waitForCriterion(wc, TIMEOUT, INTERVAL, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }
@@ -256,7 +257,7 @@ public class SharedConfigurationDUnitTest extends CacheTestCase {
         try {
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locator1Port, locatorLogFile, null,
               locatorProps);
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -267,7 +268,7 @@ public class SharedConfigurationDUnitTest extends CacheTestCase {
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, TIMEOUT, INTERVAL, true);
+          Wait.waitForCriterion(wc, TIMEOUT, INTERVAL, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }
@@ -347,7 +348,7 @@ public class SharedConfigurationDUnitTest extends CacheTestCase {
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locator2Port, locatorLogFile, null,
               locatorProps);
 
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -358,7 +359,7 @@ public class SharedConfigurationDUnitTest extends CacheTestCase {
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, TIMEOUT, INTERVAL, true);
+          Wait.waitForCriterion(wc, TIMEOUT, INTERVAL, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }
@@ -433,10 +434,8 @@ public class SharedConfigurationDUnitTest extends CacheTestCase {
     });
   }    
   
-  
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     for (int i=0; i<4; i++) {
       Host.getHost(0).getVM(i).invoke(SharedConfigurationDUnitTest.locatorCleanup);
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestClientIdsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestClientIdsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestClientIdsDUnitTest.java
index 88b2b3d..696de8e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestClientIdsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestClientIdsDUnitTest.java
@@ -37,12 +37,16 @@ import com.gemstone.gemfire.management.CacheServerMXBean;
 import com.gemstone.gemfire.management.MBeanUtil;
 import com.gemstone.gemfire.management.ManagementTestBase;
 import com.gemstone.gemfire.management.internal.cli.CliUtil;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This is for testing client IDs
@@ -86,17 +90,14 @@ public class TestClientIdsDUnitTest extends DistributedTestCase {
     client2 = host.getVM(3);
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-
+  @Override
+  protected final void preTearDown() throws Exception {
     helper.closeCache(managingNode);
     helper.closeCache(server);
     helper.closeCache(client);
     helper.closeCache(client2);
 
     disconnectFromDS();
-
-    
   }
 
   private static final long serialVersionUID = 1L;
@@ -106,8 +107,8 @@ public class TestClientIdsDUnitTest extends DistributedTestCase {
     helper.startManagingNode(managingNode);
     int port = (Integer) createServerCache(server);
     DistributedMember serverMember = helper.getMember(server);
-    createClientCache(client, getServerHostName(server.getHost()), port);
-    createClientCache(client2, getServerHostName(server.getHost()), port);
+    createClientCache(client, NetworkUtils.getServerHostName(server.getHost()), port);
+    createClientCache(client2, NetworkUtils.getServerHostName(server.getHost()), port);
     put(client);
     put(client2);
     verifyClientIds(managingNode, serverMember, port);
@@ -229,7 +230,7 @@ public class TestClientIdsDUnitTest extends DistributedTestCase {
                   }
                 } 
               }catch (Exception e) {                 
-                getLogWriter().info("exception occured " + e.getMessage() + CliUtil.stackTraceAsString((Throwable)e));
+                LogWriterUtils.getLogWriter().info("exception occured " + e.getMessage() + CliUtil.stackTraceAsString((Throwable)e));
               }
               return false;
             }
@@ -238,12 +239,12 @@ public class TestClientIdsDUnitTest extends DistributedTestCase {
               return "wait for getNumOfClients bean to complete and get results";
             }
           };
-          waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);          
+          Wait.waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);          
           
           //Now it is sure that bean would be available
           CacheServerMXBean bean = MBeanUtil.getCacheServerMbeanProxy(
               serverMember, serverPort);
-          getLogWriter().info("verifyClientIds = " + bean.getClientIds().length);
+          LogWriterUtils.getLogWriter().info("verifyClientIds = " + bean.getClientIds().length);
           assertEquals(true, bean.getClientIds().length > 0 ? true : false);
         } catch (Exception e) {
           fail("Error while verifying cache server from remote member " + e);
@@ -289,7 +290,7 @@ public class TestClientIdsDUnitTest extends DistributedTestCase {
           }
           r1.clear();
         } catch (Exception ex) {
-          fail("failed while put", ex);
+          Assert.fail("failed while put", ex);
         }
       }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestFunctionsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestFunctionsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestFunctionsDUnitTest.java
index 1ebbab7..c2f2b60 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestFunctionsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestFunctionsDUnitTest.java
@@ -24,8 +24,11 @@ import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.management.DistributedSystemMXBean;
 import com.gemstone.gemfire.management.ManagementService;
 import com.gemstone.gemfire.management.ManagementTestBase;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This is for testing running functions
@@ -46,10 +49,6 @@ public class TestFunctionsDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   public static Integer getNumOfRunningFunction() {
 
     final WaitCriterion waitCriteria = new WaitCriterion() {
@@ -74,7 +73,7 @@ public class TestFunctionsDUnitTest extends ManagementTestBase {
       }
     };
 
-    waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);
+    Wait.waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);
     final DistributedSystemMXBean bean = getManagementService()
         .getDistributedSystemMXBean();
     assertNotNull(bean);
@@ -98,7 +97,7 @@ public class TestFunctionsDUnitTest extends ManagementTestBase {
     });
     Integer numOfRunningFunctions = (Integer) managingNode.invoke(
         TestFunctionsDUnitTest.class, "getNumOfRunningFunction");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "TestNumOfFunctions numOfRunningFunctions= " + numOfRunningFunctions);
     assertTrue(numOfRunningFunctions > 0 ? true : false);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestHeapDUnitTest.java
index 4a8783a..f8abe35 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestHeapDUnitTest.java
@@ -19,7 +19,10 @@ package com.gemstone.gemfire.management.internal.pulse;
 import com.gemstone.gemfire.management.DistributedSystemMXBean;
 import com.gemstone.gemfire.management.ManagementService;
 import com.gemstone.gemfire.management.ManagementTestBase;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This is for testing heap size from Mbean  
@@ -40,10 +43,6 @@ public class TestHeapDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   public static long getHeapSizeOfClient() {    
     return (Runtime.getRuntime().totalMemory() -   Runtime.getRuntime().freeMemory());
   }
@@ -68,7 +67,7 @@ public class TestHeapDUnitTest extends ManagementTestBase {
       }
     };
 
-    waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);
+    Wait.waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);
     final DistributedSystemMXBean bean = getManagementService()
         .getDistributedSystemMXBean();
     assertNotNull(bean);
@@ -86,7 +85,7 @@ public class TestHeapDUnitTest extends ManagementTestBase {
     long totalHeapSizeFromMXBean = ((Number) managingNode.invoke(
         TestHeapDUnitTest.class, "getHeapSizeOfDS")).intValue();
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testTotalHeapSize totalHeapSizeFromMXBean = "
             + totalHeapSizeFromMXBean + " totalHeapSizeOnAll = "
             + totalHeapSizeOnAll);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestLocatorsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestLocatorsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestLocatorsDUnitTest.java
index 2e9a239..e9b976a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestLocatorsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestLocatorsDUnitTest.java
@@ -19,6 +19,9 @@ package com.gemstone.gemfire.management.internal.pulse;
 import com.gemstone.gemfire.management.DistributedSystemMXBean;
 import com.gemstone.gemfire.management.ManagementService;
 import com.gemstone.gemfire.management.ManagementTestBase;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This is for testing locators from MBean
@@ -39,10 +42,6 @@ public class TestLocatorsDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   public static int getNumOfLocatorFromMBean() {
 
     final WaitCriterion waitCriteria = new WaitCriterion() {
@@ -63,7 +62,7 @@ public class TestLocatorsDUnitTest extends ManagementTestBase {
         return "wait for getNumOfLocatorFromMBean to complete and get results";
       }
     };
-    waitForCriterion(waitCriteria, 2 * 60 * 1000, 2000, true);    
+    Wait.waitForCriterion(waitCriteria, 2 * 60 * 1000, 2000, true);    
     final DistributedSystemMXBean bean = getManagementService().getDistributedSystemMXBean();
     assertNotNull(bean);    
     return bean.getLocatorCount();
@@ -73,7 +72,7 @@ public class TestLocatorsDUnitTest extends ManagementTestBase {
     initManagement(false);
     int locatorCount = ((Number) managingNode.invoke(
         TestLocatorsDUnitTest.class, "getNumOfLocatorFromMBean")).intValue();
-    getLogWriter().info("TestLocatorsDUnitTest locatorCount =" + locatorCount);
+    LogWriterUtils.getLogWriter().info("TestLocatorsDUnitTest locatorCount =" + locatorCount);
     assertEquals(1, locatorCount);
 
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestSubscriptionsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestSubscriptionsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestSubscriptionsDUnitTest.java
index b415eac..ef491b0 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestSubscriptionsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestSubscriptionsDUnitTest.java
@@ -37,11 +37,16 @@ import com.gemstone.gemfire.management.DistributedRegionMXBean;
 import com.gemstone.gemfire.management.DistributedSystemMXBean;
 import com.gemstone.gemfire.management.ManagementService;
 import com.gemstone.gemfire.management.ManagementTestBase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This is for testing subscriptions
@@ -78,14 +83,13 @@ public class TestSubscriptionsDUnitTest extends DistributedTestCase {
     client2 = host.getVM(3);
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     helper.closeCache(managingNode);
     helper.closeCache(server);
     helper.closeCache(client);
     helper.closeCache(client2);
     disconnectFromDS();
-    
   }
 
   private static final long serialVersionUID = 1L;
@@ -97,8 +101,8 @@ public class TestSubscriptionsDUnitTest extends DistributedTestCase {
 
     int port = (Integer) createServerCache(server);
     DistributedMember serverMember = helper.getMember(server);
-    createClientCache(client, getServerHostName(server.getHost()), port);
-    createClientCache(client2, getServerHostName(server.getHost()), port);
+    createClientCache(client, NetworkUtils.getServerHostName(server.getHost()), port);
+    createClientCache(client2, NetworkUtils.getServerHostName(server.getHost()), port);
     put(client);
     put(client2);
     registerInterest(client);
@@ -235,11 +239,11 @@ public class TestSubscriptionsDUnitTest extends DistributedTestCase {
               return "TestSubscriptionsDUnitTest wait for getDistributedSystemMXBean to complete and get results";
             }
           };
-          waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);
+          Wait.waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);
           final DistributedSystemMXBean dsBean = ManagementService
               .getExistingManagementService(cache).getDistributedSystemMXBean();
           assertNotNull(dsBean);
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "TestSubscriptionsDUnitTest dsBean.getNumSubscriptions() ="
                   + dsBean.getNumSubscriptions());
           assertTrue(dsBean.getNumSubscriptions() == 2 ? true : false);
@@ -271,7 +275,7 @@ public class TestSubscriptionsDUnitTest extends DistributedTestCase {
           r1.registerInterest(k1);
           r1.registerInterest(k2);
         } catch (Exception ex) {
-          fail("TestSubscriptionsDUnitTest failed while register Interest", ex);
+          Assert.fail("TestSubscriptionsDUnitTest failed while register Interest", ex);
         }
       }
 
@@ -292,7 +296,7 @@ public class TestSubscriptionsDUnitTest extends DistributedTestCase {
           r1.put(k2, client_k2);
           assertEquals(r1.getEntry(k2).getValue(), client_k2);
         } catch (Exception ex) {
-          fail("failed while put", ex);
+          Assert.fail("failed while put", ex);
         }
       }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/ClientsWithVersioningRetryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/ClientsWithVersioningRetryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/ClientsWithVersioningRetryDUnitTest.java
index 0704121..40592ff 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/ClientsWithVersioningRetryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/ClientsWithVersioningRetryDUnitTest.java
@@ -56,10 +56,15 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.ClientProxyMembershipID;
 import com.gemstone.gemfire.internal.cache.tier.sockets.command.Put70;
 import com.gemstone.gemfire.internal.cache.versions.VMVersionTag;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * @author dsmith
@@ -67,7 +72,7 @@ import com.gemstone.gemfire.test.dunit.VM;
  */
 public class ClientsWithVersioningRetryDUnitTest extends CacheTestCase {
   // list of expected exceptions to remove in tearDown2()
-  static List<ExpectedException> expectedExceptions = new LinkedList<ExpectedException>();
+  static List<IgnoredException> expectedExceptions = new LinkedList<IgnoredException>();
 
   public ClientsWithVersioningRetryDUnitTest(String name) {
     super(name);
@@ -76,7 +81,7 @@ public class ClientsWithVersioningRetryDUnitTest extends CacheTestCase {
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(new SerializableRunnable() {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       @Override
       public void run() {
         //Disable endpoint shuffling, so that the client will always connect
@@ -87,17 +92,15 @@ public class ClientsWithVersioningRetryDUnitTest extends CacheTestCase {
     });
   }
   
-
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    invokeInEveryVM(new SerializableRunnable() {
-      @Override      public void run() {
+  protected final void postTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
+      @Override      
+      public void run() {
         System.setProperty("gemfire.bridge.disableShufflingOfEndpoints", "false");
       }
-      
     });
-    for (ExpectedException ex: expectedExceptions) {
+    for (IgnoredException ex: expectedExceptions) {
       ex.remove();
     }
   }
@@ -279,7 +282,7 @@ public class ClientsWithVersioningRetryDUnitTest extends CacheTestCase {
     final VM vm2 = host.getVM(2);
     final VM vm3 = host.getVM(3);
     
-    getLogWriter().info("creating region in vm0");
+    LogWriterUtils.getLogWriter().info("creating region in vm0");
     createRegionInPeer(vm0, RegionShortcut.PARTITION_REDUNDANT_PERSISTENT);
     
     vm0.invoke(new SerializableRunnable() {
@@ -291,14 +294,14 @@ public class ClientsWithVersioningRetryDUnitTest extends CacheTestCase {
       }
     });
     
-    getLogWriter().info("creating region in vm1");
+    LogWriterUtils.getLogWriter().info("creating region in vm1");
     createRegionInPeer(vm1, RegionShortcut.PARTITION_REDUNDANT_PERSISTENT);
-    getLogWriter().info("creating region in vm2");
+    LogWriterUtils.getLogWriter().info("creating region in vm2");
     createRegionInPeer(vm2, RegionShortcut.PARTITION_REDUNDANT_PERSISTENT);    
-    getLogWriter().info("creating region in vm3");
+    LogWriterUtils.getLogWriter().info("creating region in vm3");
     createRegionInPeer(vm3, RegionShortcut.PARTITION_PROXY);
     
-    expectedExceptions.add(addExpectedException("RuntimeException", vm2));
+    expectedExceptions.add(IgnoredException.addIgnoredException("RuntimeException", vm2));
     vm2.invoke(new SerializableRunnable("install message listener to ignore update") {
       public void run() {
         //Add a listener to close vm2 when we send a distributed put all operation
@@ -311,7 +314,7 @@ public class ClientsWithVersioningRetryDUnitTest extends CacheTestCase {
               DistributionMessage msg) {
             if(msg instanceof DistributedPutAllOperation.PutAllMessage) {
               DistributionMessageObserver.setInstance(null);
-              pause(5000); // give vm1 time to process the message that we're ignoring
+              Wait.pause(5000); // give vm1 time to process the message that we're ignoring
               disconnectFromDS(vm0);
               // no reply will be sent to vm0 due to this exception, but that's okay
               // because vm0 has been shut down
@@ -494,8 +497,8 @@ public class ClientsWithVersioningRetryDUnitTest extends CacheTestCase {
     SerializableCallable createRegion = new SerializableCallable("create client region in " + vm) {
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(vm.getHost()), port1);
-        cf.addPoolServer(getServerHostName(vm.getHost()), port2);
+        cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port1);
+        cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port2);
         cf.setPoolPRSingleHopEnabled(false);
         cf.setPoolThreadLocalConnections(threadLocalConnections);
         cf.setPoolReadTimeout(10 * 60 * 1000);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/DistributedSystemIdDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/DistributedSystemIdDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/DistributedSystemIdDUnitTest.java
index 5a355ea..2d20275 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/DistributedSystemIdDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/DistributedSystemIdDUnitTest.java
@@ -26,6 +26,7 @@ import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -69,7 +70,7 @@ public class DistributedSystemIdDUnitTest extends DistributedTestCase {
   }
   
   public void testMismatch() {
-    addExpectedException("Rejected new system node");
+    IgnoredException.addIgnoredException("Rejected new system node");
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/JSONPdxClientServerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/JSONPdxClientServerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/JSONPdxClientServerDUnitTest.java
index a8b4e8d..937ff31 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/JSONPdxClientServerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/JSONPdxClientServerDUnitTest.java
@@ -42,6 +42,7 @@ import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.pdx.internal.json.PdxToJSON;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.util.test.TestUtil;
@@ -59,7 +60,7 @@ public class JSONPdxClientServerDUnitTest extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() {
+  protected final void preTearDownCacheTestCase() {
     // this test creates client caches in some VMs and so
     // breaks the contract of CacheTestCase to hold caches in
     // that class's "cache" instance variable
@@ -594,7 +595,7 @@ public class JSONPdxClientServerDUnitTest extends CacheTestCase {
     SerializableCallable createRegion = new SerializableCallable() {
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(vm.getHost()), port);
+        cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port);
         cf.setPoolThreadLocalConnections(threadLocalConnections);
         ClientCache cache = getClientCache(cf);
         cache.createClientRegionFactory(ClientRegionShortcut.PROXY)
@@ -610,7 +611,7 @@ public class JSONPdxClientServerDUnitTest extends CacheTestCase {
     SerializableCallable createRegion = new SerializableCallable() {
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(vm.getHost()), port);
+        cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port);
         cf.setPoolThreadLocalConnections(threadLocalConnections);
         cf.setPdxReadSerialized(isPdxReadSerialized);
         ClientCache cache = getClientCache(cf);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxClientServerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxClientServerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxClientServerDUnitTest.java
index 98da0fc..7720890 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxClientServerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxClientServerDUnitTest.java
@@ -42,6 +42,8 @@ import com.gemstone.gemfire.internal.PdxSerializerObject;
 import com.gemstone.gemfire.internal.Version;
 import com.gemstone.gemfire.pdx.internal.AutoSerializableManager;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -171,7 +173,7 @@ public class PdxClientServerDUnitTest extends CacheTestCase {
     VM vm2 = host.getVM(2);
 
     System.setProperty("gemfire.auto.serialization.no.hardcoded.excludes", "true");
-    invokeInEveryVM(new SerializableRunnable() {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       public void run() {
         System.setProperty("gemfire.auto.serialization.no.hardcoded.excludes", "true");
       }
@@ -239,7 +241,7 @@ public class PdxClientServerDUnitTest extends CacheTestCase {
     });
     } finally { 
       System.setProperty("gemfire.auto.serialization.no.hardcoded.excludes", "false");
-      invokeInEveryVM(new SerializableRunnable() {
+      Invoke.invokeInEveryVM(new SerializableRunnable() {
         public void run() {
           System.setProperty("gemfire.auto.serialization.no.hardcoded.excludes", "false");
         }
@@ -392,11 +394,11 @@ public class PdxClientServerDUnitTest extends CacheTestCase {
         getSystem(props);
         Cache cache = getCache();
         PoolFactory pf = PoolManager.createFactory();
-        pf.addServer(getServerHostName(vm0.getHost()), port1);
+        pf.addServer(NetworkUtils.getServerHostName(vm0.getHost()), port1);
         pf.create("pool1");
         
         pf = PoolManager.createFactory();
-        pf.addServer(getServerHostName(vm0.getHost()), port2);
+        pf.addServer(NetworkUtils.getServerHostName(vm0.getHost()), port2);
         pf.create("pool2");
         
         AttributesFactory af = new AttributesFactory();
@@ -497,7 +499,7 @@ public class PdxClientServerDUnitTest extends CacheTestCase {
         getSystem(props);
         Cache cache = getCache();
         PoolFactory pf = PoolManager.createFactory();
-        pf.addServer(getServerHostName(vm0.getHost()), port);
+        pf.addServer(NetworkUtils.getServerHostName(vm0.getHost()), port);
         pf.create("pool");
         
         AttributesFactory af = new AttributesFactory();
@@ -551,7 +553,7 @@ public class PdxClientServerDUnitTest extends CacheTestCase {
         DataSerializer.writeObject(new SimpleClass(57, (byte) 3), out);
         
         PoolFactory pf = PoolManager.createFactory();
-        pf.addServer(getServerHostName(vm0.getHost()), port);
+        pf.addServer(NetworkUtils.getServerHostName(vm0.getHost()), port);
         try {
           pf.create("pool");
           fail("should have received an exception");
@@ -702,7 +704,7 @@ public class PdxClientServerDUnitTest extends CacheTestCase {
 		  System.setProperty("gemfire.ON_DISCONNECT_CLEAR_PDXTYPEIDS", "true");	
 		}
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(vm.getHost()), port);
+        cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port);
         cf.setPoolThreadLocalConnections(threadLocalConnections);
         if(autoSerializerPatterns != null && autoSerializerPatterns.length != 0) {
           cf.setPdxSerializer(new ReflectionBasedAutoSerializer(autoSerializerPatterns));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxDeserializationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxDeserializationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxDeserializationDUnitTest.java
index 3310d95..10b21d0 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxDeserializationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxDeserializationDUnitTest.java
@@ -38,10 +38,13 @@ import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.pdx.PdxReader;
 import com.gemstone.gemfire.pdx.PdxSerializable;
 import com.gemstone.gemfire.pdx.PdxWriter;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * A test to ensure that we do not deserialize PDX objects
@@ -268,7 +271,7 @@ public class PdxDeserializationDUnitTest extends CacheTestCase {
   
   protected void checkClientValue(final Region<Object, Object> region) {
     //Because register interest is asynchronous, we need to wait for the value to arrive.
-    waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       
       public boolean done() {
         return region.get("A") != null;
@@ -310,7 +313,7 @@ public class PdxDeserializationDUnitTest extends CacheTestCase {
       }
       
     } catch (Exception e) {
-      fail("got exception from query", e);
+      Assert.fail("got exception from query", e);
     }
     
 
@@ -352,7 +355,7 @@ public class PdxDeserializationDUnitTest extends CacheTestCase {
     try {
       server.start();
     } catch (IOException e) {
-      fail("got exception", e);
+      Assert.fail("got exception", e);
     }
     return server;
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxTypeExportDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxTypeExportDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxTypeExportDUnitTest.java
index 001aab4..c820e72 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxTypeExportDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/PdxTypeExportDUnitTest.java
@@ -36,6 +36,7 @@ import com.gemstone.gemfire.pdx.internal.EnumInfo;
 import com.gemstone.gemfire.pdx.internal.PdxType;
 import com.gemstone.gemfire.pdx.internal.TypeRegistry;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 
 public class PdxTypeExportDUnitTest extends CacheTestCase {
@@ -115,7 +116,7 @@ public class PdxTypeExportDUnitTest extends CacheTestCase {
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory()
           .setPdxSerializer(new MyPdxSerializer())
-          .addPoolServer(getServerHostName(host), port);
+          .addPoolServer(NetworkUtils.getServerHostName(host), port);
     
         ClientCache cache = getClientCache(cf);
         Region r = cache.createClientRegionFactory(ClientRegionShortcut.PROXY).create("pdxtest");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/VersionClassLoader.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/VersionClassLoader.java b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/VersionClassLoader.java
index be4d9c7..f2e6b5a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/VersionClassLoader.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/pdx/VersionClassLoader.java
@@ -25,7 +25,7 @@ import java.lang.reflect.Constructor;
 import java.net.URL;
 import java.net.URLClassLoader;
 
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 
 public class VersionClassLoader {
 
@@ -42,16 +42,16 @@ public class VersionClassLoader {
     String alternateVersionClassPath =  System.getProperty("JTESTS") +
     File.separator + ".." + File.separator + ".." + File.separator +
     "classes" + File.separator + "version" + classVersion;
-    DistributedTestCase.getLogWriter().info("Initializing the class loader :" + alternateVersionClassPath);
+    LogWriterUtils.getLogWriter().info("Initializing the class loader :" + alternateVersionClassPath);
     ClassLoader versionCL = null;
     try {
       versionCL = new URLClassLoader(new URL[]{new File(alternateVersionClassPath).toURI().toURL()}, cl);
       Thread.currentThread().setContextClassLoader(versionCL); 
     } catch (Exception e) {
-      DistributedTestCase.getLogWriter().info("error", e);
+      LogWriterUtils.getLogWriter().info("error", e);
       throw new Exception("Failed to initialize the class loader. " + e.getMessage());
     }
-    DistributedTestCase.getLogWriter().info("Setting/adding class loader with " + alternateVersionClassPath);
+    LogWriterUtils.getLogWriter().info("Setting/adding class loader with " + alternateVersionClassPath);
     return versionCL;
   }
 
@@ -90,7 +90,7 @@ public class VersionClassLoader {
         newObj = constructor.newInstance();
       }
     } catch (Exception e) {
-      DistributedTestCase.getLogWriter().info("error", e);
+      LogWriterUtils.getLogWriter().info("error", e);
       throw new Exception("Failed to get the class instance. ClassName: " + className + "  error: ", e);
     }
     return newObj;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/redis/RedisDistDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/redis/RedisDistDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/redis/RedisDistDUnitTest.java
index 28e2940..f787909 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/redis/RedisDistDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/redis/RedisDistDUnitTest.java
@@ -25,7 +25,10 @@ import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.SocketCreator;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -70,7 +73,7 @@ public class RedisDistDUnitTest extends DistributedTestCase {
     client1 = host.getVM(2);
     client2 = host.getVM(3);  
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int locatorPort = getDUnitLocatorPort();
+    final int locatorPort = DistributedTestUtils.getDUnitLocatorPort();
     final SerializableCallable<Object> startRedisAdapter = new SerializableCallable<Object>() {
 
       private static final long serialVersionUID = 1978017907725504294L;
@@ -80,7 +83,7 @@ public class RedisDistDUnitTest extends DistributedTestCase {
         int port = ports[VM.getCurrentVMNum()];
         CacheFactory cF = new CacheFactory();
         String locator = SocketCreator.getLocalHost().getHostName() + "[" + locatorPort + "]";
-        cF.set("log-level", getDUnitLogLevel());
+        cF.set("log-level", LogWriterUtils.getDUnitLogLevel());
         cF.set("redis-bind-address", localHost);
         cF.set("redis-port", ""+port);
         cF.set("mcast-port", "0");
@@ -99,8 +102,7 @@ public class RedisDistDUnitTest extends DistributedTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void preTearDown() throws Exception {
     disconnectAllFromDS();
   }
 
@@ -139,8 +141,8 @@ public class RedisDistDUnitTest extends DistributedTestCase {
   }
 
   public void testConcCreateDestroy() throws Throwable {
-    addExpectedException("RegionDestroyedException");
-    addExpectedException("IndexInvalidException");
+    IgnoredException.addIgnoredException("RegionDestroyedException");
+    IgnoredException.addIgnoredException("IndexInvalidException");
     final int ops = 40;
     final String hKey = TEST_KEY+"hash";
     final String lKey = TEST_KEY+"list";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/rest/internal/web/controllers/RestAPITestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/rest/internal/web/controllers/RestAPITestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/rest/internal/web/controllers/RestAPITestBase.java
index 40acd62..7cd95fd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/rest/internal/web/controllers/RestAPITestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/rest/internal/web/controllers/RestAPITestBase.java
@@ -28,6 +28,7 @@ import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 public class RestAPITestBase extends DistributedTestCase {
   private static final long serialVersionUID = 1L;
@@ -46,7 +47,7 @@ public class RestAPITestBase extends DistributedTestCase {
   public void setUp() throws Exception {
     super.setUp();
     disconnectAllFromDS();
-    pause(5000);
+    Wait.pause(5000);
     final Host host = Host.getHost(0);
     vm0 = host.getVM(0);
     vm1 = host.getVM(1);
@@ -58,8 +59,7 @@ public class RestAPITestBase extends DistributedTestCase {
    * close the clients and teh servers
    */
   @Override
-  public void tearDown2() throws Exception
-  {
+  protected final void preTearDown() throws Exception {
     vm0.invoke(getClass(), "closeCache");
     vm1.invoke(getClass(), "closeCache");
     vm2.invoke(getClass(), "closeCache");
@@ -79,7 +79,7 @@ public class RestAPITestBase extends DistributedTestCase {
   
   protected static String createCache(VM currentVM) {
     
-    RestAPITestBase test = new RestAPITestBase(testName);
+    RestAPITestBase test = new RestAPITestBase(getTestMethodName());
     
     final String hostName = currentVM.getHost().getHostName();
     final int serverPort = AvailablePortHelper.getRandomAvailableTCPPort();
@@ -98,7 +98,7 @@ public class RestAPITestBase extends DistributedTestCase {
   }
   
   public static String createCacheWithGroups (VM vm, final String groups, final String regionName ) {
-    RestAPITestBase test = new RestAPITestBase(testName);
+    RestAPITestBase test = new RestAPITestBase(getTestMethodName());
     
     final String hostName = vm.getHost().getHostName(); 
     final int serverPort = AvailablePortHelper.getRandomAvailableTCPPort();


[06/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryMonitorDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryMonitorDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryMonitorDUnitTest.java
index 14884bd..febe78e 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryMonitorDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryMonitorDUnitTest.java
@@ -48,11 +48,16 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfigImpl;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Tests for QueryMonitoring service.
@@ -155,7 +160,7 @@ public class QueryMonitorDUnitTest extends CacheTestCase {
     
     r = new SerializableRunnable("getClientSystem") {
       public void run() {
-        Properties props = getAllDistributedSystemProperties(new Properties());
+        Properties props = DistributedTestUtils.getAllDistributedSystemProperties(new Properties());
         props.put(DistributionConfigImpl.LOCATORS_NAME, "");
         getSystem(props);
       }
@@ -167,14 +172,13 @@ public class QueryMonitorDUnitTest extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     Host host = Host.getHost(0);
     disconnectFromDS();
     // shut down clients before servers
     for (int i=numServers; i<4; i++) {
       host.getVM(i).invoke(CacheTestCase.class, "disconnectFromDS");
     }
-    super.tearDown2();
   }
   
   public void createRegion(VM vm){
@@ -227,7 +231,7 @@ public class QueryMonitorDUnitTest extends CacheTestCase {
         try {
           startBridgeServer(0, false);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
         Cache cache = getCache();
         GemFireCacheImpl.getInstance().TEST_MAX_QUERY_EXECUTION_TIME = queryMonitorTime;
@@ -260,7 +264,7 @@ public class QueryMonitorDUnitTest extends CacheTestCase {
     for (int i=0; i < server.length; i++){
       port[i] = server[i].invokeInt(QueryMonitorDUnitTest.class, "getCacheServerPort");
     }
-    final String host0 = getServerHostName(server[0].getHost());
+    final String host0 = NetworkUtils.getServerHostName(server[0].getHost());
 
     SerializableRunnable initClient = new CacheSerializableRunnable("Init client") {
       public void run2() throws CacheException {
@@ -618,7 +622,7 @@ public class QueryMonitorDUnitTest extends CacheTestCase {
           }
           
         } catch (Exception ex){
-          fail("Exception creating the query service", ex);
+          Assert.fail("Exception creating the query service", ex);
         }
       }      
     };
@@ -1016,7 +1020,7 @@ public class QueryMonitorDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server, 0, true);
     final int port = server.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Create client.
     cqDUnitTest.createClient(client, port, host0);
@@ -1133,7 +1137,7 @@ public class QueryMonitorDUnitTest extends CacheTestCase {
             exampleRegion.put(""+i, new Portfolio(i));
           }
         }
-        getLogWriter().info("### Completed updates in server1 in testCacheOpAfterQueryCancel");
+        LogWriterUtils.getLogWriter().info("### Completed updates in server1 in testCacheOpAfterQueryCancel");
       }
     });
 
@@ -1146,7 +1150,7 @@ public class QueryMonitorDUnitTest extends CacheTestCase {
             exampleRegion.put(""+i, new Portfolio(i));
           }
         }
-        getLogWriter().info("### Completed updates in server2 in testCacheOpAfterQueryCancel");
+        LogWriterUtils.getLogWriter().info("### Completed updates in server2 in testCacheOpAfterQueryCancel");
       }
     });
 
@@ -1167,19 +1171,19 @@ public class QueryMonitorDUnitTest extends CacheTestCase {
               Query query = queryService.newQuery(qStr);
               query.execute();
             } catch (QueryExecutionTimeoutException qet) {
-              getLogWriter().info("### Got Expected QueryExecutionTimeout exception. " +
+              LogWriterUtils.getLogWriter().info("### Got Expected QueryExecutionTimeout exception. " +
                   qet.getMessage());
               if (qet.getMessage().contains("cancelled after exceeding max execution")){
-                getLogWriter().info("### Doing a put operation");
+                LogWriterUtils.getLogWriter().info("### Doing a put operation");
                 exampleRegion.put(""+i, new Portfolio(i));
               }
             } catch (Exception e){
               fail("Exception executing query." + e.getMessage());
             }
           }
-          getLogWriter().info("### Completed Executing queries in testCacheOpAfterQueryCancel");
+          LogWriterUtils.getLogWriter().info("### Completed Executing queries in testCacheOpAfterQueryCancel");
         } catch (Exception ex){
-          fail("Exception creating the query service", ex);
+          Assert.fail("Exception creating the query service", ex);
         }
       }      
     };
@@ -1187,23 +1191,23 @@ public class QueryMonitorDUnitTest extends CacheTestCase {
     AsyncInvocation ai3 = server3.invokeAsync(executeQuery);
     AsyncInvocation ai4 = server4.invokeAsync(executeQuery);
     
-    getLogWriter().info("### Waiting for async threads to join in testCacheOpAfterQueryCancel");
+    LogWriterUtils.getLogWriter().info("### Waiting for async threads to join in testCacheOpAfterQueryCancel");
     try {
-      DistributedTestCase.join(ai1, 5 * 60 * 1000, null);
-      DistributedTestCase.join(ai2, 5 * 60 * 1000, null);
-      DistributedTestCase.join(ai3, 5 * 60 * 1000, null);
-      DistributedTestCase.join(ai4, 5 * 60 * 1000, null);
+      ThreadUtils.join(ai1, 5 * 60 * 1000);
+      ThreadUtils.join(ai2, 5 * 60 * 1000);
+      ThreadUtils.join(ai3, 5 * 60 * 1000);
+      ThreadUtils.join(ai4, 5 * 60 * 1000);
     } catch (Exception ex) {
       fail("Async thread join failure");
     }
-    getLogWriter().info("### DONE Waiting for async threads to join in testCacheOpAfterQueryCancel");
+    LogWriterUtils.getLogWriter().info("### DONE Waiting for async threads to join in testCacheOpAfterQueryCancel");
     
     validateQueryMonitorThreadCnt(server1, 0, 1000);
     validateQueryMonitorThreadCnt(server2, 0, 1000);
     validateQueryMonitorThreadCnt(server3, 0, 1000);
     validateQueryMonitorThreadCnt(server4, 0, 1000);
     
-    getLogWriter().info("### DONE validating query monitor threads testCacheOpAfterQueryCancel");
+    LogWriterUtils.getLogWriter().info("### DONE validating query monitor threads testCacheOpAfterQueryCancel");
     
     stopServer(server1);
     stopServer(server2);
@@ -1223,7 +1227,7 @@ public class QueryMonitorDUnitTest extends CacheTestCase {
         while (true) {
           if (qm.getQueryMonitorThreadCount() != threadCount) {
             if (waited <= waitTime) {
-              pause(10);
+              Wait.pause(10);
               waited+=10;
               continue;
             } else {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/snapshot/ClientSnapshotDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/snapshot/ClientSnapshotDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/snapshot/ClientSnapshotDUnitTest.java
index cc053fc..540b37b 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/snapshot/ClientSnapshotDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/snapshot/ClientSnapshotDUnitTest.java
@@ -41,6 +41,8 @@ import com.gemstone.gemfire.cache.util.CqListenerAdapter;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 
 public class ClientSnapshotDUnitTest extends CacheTestCase {
@@ -253,9 +255,9 @@ public class ClientSnapshotDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory()
-          .set("log-level", getDUnitLogLevel())
+          .set("log-level", LogWriterUtils.getDUnitLogLevel())
           .setPdxSerializer(new MyPdxSerializer())
-          .addPoolServer(getServerHostName(host), port)
+          .addPoolServer(NetworkUtils.getServerHostName(host), port)
           .setPoolSubscriptionEnabled(true)
           .setPoolPRSingleHopEnabled(false);
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/PRDeltaPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/PRDeltaPropagationDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/PRDeltaPropagationDUnitTest.java
index f77f845..8a4eeaa 100755
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/PRDeltaPropagationDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/PRDeltaPropagationDUnitTest.java
@@ -56,10 +56,14 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientNotifier;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * 
@@ -784,7 +788,7 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
       assertNotNull(cache);
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      Assert.fail("Failed while creating the cache", e);
     }
   }
 
@@ -820,7 +824,7 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
     assertNotNull(cache);
     deltaPR = cache.createRegion(partitionedRegionName, attr.create());
     assertNotNull(deltaPR);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + partitionedRegionName
             + " created Successfully :" + deltaPR);
   }
@@ -842,7 +846,7 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
       server1.start();
     }
     catch (IOException e) {
-      fail("Failed to start the Server", e);
+      Assert.fail("Failed to start the Server", e);
     }
     assertTrue(server1.isRunning());
     return new Integer(server1.getPort());
@@ -1107,7 +1111,7 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
         return "Last key NOT received.";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10*1000, 100, true);
+    Wait.waitForCriterion(wc, 10*1000, 100, true);
   }
 
   public static Boolean verifyQueryUpdateExecuted() {
@@ -1138,14 +1142,13 @@ public class PRDeltaPropagationDUnitTest extends DistributedTestCase {
     ConflationDUnitTest.unsetIsSlowStart();
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();    
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
     client1.invoke(PRDeltaPropagationDUnitTest.class, "closeCache");
     dataStore1.invoke(PRDeltaPropagationDUnitTest.class, "closeCache");
     dataStore2.invoke(PRDeltaPropagationDUnitTest.class, "closeCache");
     dataStore3.invoke(PRDeltaPropagationDUnitTest.class, "closeCache");
-    
   }
 
   public static void closeCache() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/PutAllCSDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/PutAllCSDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/PutAllCSDUnitTest.java
index cd2a26e..cc5876f 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/PutAllCSDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/internal/cache/PutAllCSDUnitTest.java
@@ -75,12 +75,19 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests putAll for c/s. Also tests removeAll
@@ -132,7 +139,7 @@ public class PutAllCSDUnitTest extends ClientServerTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 10 * 1000, 1000, true);
+    Wait.waitForCriterion(ev, 10 * 1000, 1000, true);
     assertEquals(expectedSize, region.size());
   }
   
@@ -149,7 +156,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     VM client2 = host.getVM(3);
     final String regionName = getUniqueName();
     final int serverPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server.getHost());
 
     // set <false, true> means <PR=false, notifyBySubscription=true> to enable registerInterest and CQ
     createBridgeServer(server, regionName, serverPort, false, 0, null);
@@ -170,7 +177,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         
         // registerInterest for ALL_KEYS
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
 
@@ -196,7 +203,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         CqAttributes cqa1 = cqf1.create();
         String cqName1 = "EOInfoTracker";
         String queryStr1 = "SELECT ALL * FROM /root/"+regionName+" ii WHERE ii.getTicker() >= '10' and ii.getTicker() < '20'";
-        getLogWriter().info("Query String: "+queryStr1);
+        LogWriterUtils.getLogWriter().info("Query String: "+queryStr1);
         try {
           QueryService cqService = getCache().getQueryService();
           CqQuery EOTracker = cqService.newCq(cqName1, queryStr1, cqa1);
@@ -206,11 +213,11 @@ public void testOneServer() throws CacheException, InterruptedException {
           for (int i=0; i<list1.size(); i++) {
             Struct s = (Struct)list1.get(i);
             TestObject o = (TestObject)s.get("value");
-            getLogWriter().info("InitialResult:"+i+":"+o);
+            LogWriterUtils.getLogWriter().info("InitialResult:"+i+":"+o);
             localregion.put("key-"+i, o);
           }
           if (localregion.size() > 0) {
-            getLogWriter().info("CQ is ready");
+            LogWriterUtils.getLogWriter().info("CQ is ready");
             synchronized(lockObject) {
               lockObject.notify();
             }
@@ -220,19 +227,19 @@ public void testOneServer() throws CacheException, InterruptedException {
           EOTracker.close();
         }
         catch (CqClosedException e) {
-          fail("CQ", e);
+          Assert.fail("CQ", e);
         }
         catch (RegionNotFoundException e) {
-          fail("CQ", e);
+          Assert.fail("CQ", e);
         }
         catch (QueryInvalidException e) {
-          fail("CQ", e);
+          Assert.fail("CQ", e);
         }
         catch (CqExistsException e) {
-          fail("CQ", e);
+          Assert.fail("CQ", e);
         }
         catch (CqException e) {
-          fail("CQ", e);
+          Assert.fail("CQ", e);
         }
       }
     });
@@ -289,9 +296,9 @@ public void testOneServer() throws CacheException, InterruptedException {
             obj = (TestObject)localregion.get("key-"+i);
             if (obj != null) {
               // wait for the key to be destroyed
-              pause(100);
-              if (getLogWriter().fineEnabled()) {
-                getLogWriter().info("Waiting 100ms("+cnt+") for key-" + i + " to be destroyed");
+              Wait.pause(100);
+              if (LogWriterUtils.getLogWriter().fineEnabled()) {
+                LogWriterUtils.getLogWriter().info("Waiting 100ms("+cnt+") for key-" + i + " to be destroyed");
               }
               cnt++;
             } else {
@@ -321,8 +328,8 @@ public void testOneServer() throws CacheException, InterruptedException {
           while (cnt < 100) {
             obj = (TestObject)localregion.get("key-"+i);
             if (obj == null || obj.getPrice() != i*10) {
-              pause(100);
-              getLogWriter().info("Waiting 100ms("+cnt+") for obj.getPrice() == i*10 at entry "+i);
+              Wait.pause(100);
+              LogWriterUtils.getLogWriter().info("Waiting 100ms("+cnt+") for obj.getPrice() == i*10 at entry "+i);
               cnt++;
             } else {
               break;
@@ -336,7 +343,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       }
     });
 
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
     
     // verify stats for client putAll into distributed region
     // 1. verify client staus
@@ -422,7 +429,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int serverPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int serverPort1 = serverPorts[0];
     final int serverPort2 = serverPorts[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
   
     // set notifyBySubscription=false to test local-invalidates
     createBridgeServer(server1, regionName, serverPort1, false, 0, null);
@@ -437,7 +444,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         
         // registerInterest for ALL_KEYS
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
   
@@ -493,7 +500,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int serverPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int serverPort1 = serverPorts[0];
     final int serverPort2 = serverPorts[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set notifyBySubscription=false to test local-invalidates
     createBridgeServer(server1, regionName, serverPort1, false, 0, null);
@@ -576,7 +583,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         Region region = getRootRegion().getSubregion(regionName);
         assertEquals(0, region.size());
         MyWriter mywriter = (MyWriter)region.getAttributes().getCacheWriter();
-        getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
+        LogWriterUtils.getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
         assertEquals(numberOfEntries, mywriter.num_destroyed);
       }
     });
@@ -588,7 +595,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         Region region = getRootRegion().getSubregion(regionName);
         assertEquals(0, region.size());
         MyWriter mywriter = (MyWriter)region.getAttributes().getCacheWriter();
-        getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
+        LogWriterUtils.getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
         // beforeDestroys are only triggered at server1 since the removeAll is submitted from client1
         assertEquals(0, mywriter.num_destroyed);
       }
@@ -616,8 +623,8 @@ public void testOneServer() throws CacheException, InterruptedException {
       }
     });
 
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
-    DistributedTestCase.join(async2, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
+    ThreadUtils.join(async2, 30 * 1000);
     }
 
     client1.invoke(new CacheSerializableRunnable(title
@@ -715,8 +722,8 @@ public void testOneServer() throws CacheException, InterruptedException {
         }
       });
 
-      DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
-      DistributedTestCase.join(async2, 30 * 1000, getLogWriter());
+      ThreadUtils.join(async1, 30 * 1000);
+      ThreadUtils.join(async2, 30 * 1000);
     }
 
     client1.invoke(new CacheSerializableRunnable(title+"client1 removeAll") {
@@ -863,7 +870,7 @@ public void testOneServer() throws CacheException, InterruptedException {
               return null;
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 10 * 1000, 1000, true);
+          Wait.waitForCriterion(ev, 10 * 1000, 1000, true);
           // local invalidate will set the value to null
           TestObject obj = null;
           obj = (TestObject)region.getEntry("key-" + i).getValue();
@@ -911,7 +918,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     VM client2 = host.getVM(3);
     final String regionName = getUniqueName();
     
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set notifyBySubscription=false to test local-invalidates
     int serverPort1 = createServerRegion(server1, regionName, CCE);
@@ -984,7 +991,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         Region region = getRootRegion().getSubregion(regionName);
         assertEquals(0, region.size());
         MyWriter mywriter = (MyWriter)region.getAttributes().getCacheWriter();
-        getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
+        LogWriterUtils.getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
         assertEquals(numberOfEntries, mywriter.num_destroyed);
       }
     });
@@ -996,7 +1003,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         Region region = getRootRegion().getSubregion(regionName);
         assertEquals(0, region.size());
         MyWriter mywriter = (MyWriter)region.getAttributes().getCacheWriter();
-        getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
+        LogWriterUtils.getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
         // beforeDestroys are only triggered at server1 since the removeAll is submitted from client1
         assertEquals(0, mywriter.num_destroyed);
       }
@@ -1138,7 +1145,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int serverPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int serverPort1 = serverPorts[0];
     final int serverPort2 = serverPorts[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set <true, false> means <PR=true, notifyBySubscription=false> to test local-invalidates
     createBridgeServer(server1, regionName, serverPort1, isPR, redundantCopies, null);
@@ -1179,7 +1186,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     client2.invoke(new CacheSerializableRunnable(title
         + "verify Bridge Server 2") {
       public void run2() throws CacheException {
-        pause(5000);
+        Wait.pause(5000);
         Region region = getRootRegion().getSubregion(regionName);
         Region.Entry re = region.getEntry("case3-1");
         assertNotNull(re);
@@ -1237,7 +1244,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     client2.invoke(new CacheSerializableRunnable(title
         + "verify Bridge Server 2") {
       public void run2() throws CacheException {
-        pause(5000);
+        Wait.pause(5000);
         Region region = getRootRegion().getSubregion(regionName);
         Region.Entry re = region.getEntry("case3-1");
         assertNull(re);
@@ -1287,7 +1294,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int serverPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int serverPort1 = serverPorts[0];
     final int serverPort2 = serverPorts[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set <true, false> means <PR=true, notifyBySubscription=false> to test local-invalidates
     createBridgeServer(server1, regionName, serverPort1, true, 0, "ds1");
@@ -1306,7 +1313,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         region.getAttributesMutator().addCacheListener(new MyListener(false));
         
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
     
@@ -1361,7 +1368,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     
     client2.invoke(new CacheSerializableRunnable(title+"verify entries from client2") {
       public void run2() throws CacheException {
-        pause(5000);
+        Wait.pause(5000);
         Region region = getRootRegion().getSubregion(regionName);
         Region.Entry re;
         for (int i=0; i<numberOfEntries; i++) {
@@ -1392,7 +1399,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int serverPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int serverPort1 = serverPorts[0];
     final int serverPort2 = serverPorts[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set <true, false> means <PR=true, notifyBySubscription=false> to test local-invalidates
     createBridgeServer(server1, regionName, serverPort1, true, 1, null);
@@ -1475,7 +1482,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         Region region = getRootRegion().getSubregion(regionName);
         assertEquals(0, region.size());
         MyWriter mywriter = (MyWriter)region.getAttributes().getCacheWriter();
-        getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
+        LogWriterUtils.getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
         // beforeDestroys are only triggered at primary buckets. server1 and server2 each holds half of buckets
         assertEquals(numberOfEntries/2, mywriter.num_destroyed);
       }
@@ -1488,7 +1495,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         Region region = getRootRegion().getSubregion(regionName);
         assertEquals(0, region.size());
         MyWriter mywriter = (MyWriter)region.getAttributes().getCacheWriter();
-        getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
+        LogWriterUtils.getLogWriter().info("server cachewriter triggered for destroy: "+mywriter.num_destroyed);
         // beforeDestroys are only triggered at primary buckets. server1 and server2 each holds half of buckets
         assertEquals(numberOfEntries/2, mywriter.num_destroyed);
       }
@@ -1516,8 +1523,8 @@ public void testOneServer() throws CacheException, InterruptedException {
       }
     });
 
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
-    DistributedTestCase.join(async2, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
+    ThreadUtils.join(async2, 30 * 1000);
     }
     
     client1.invoke(new CacheSerializableRunnable(title
@@ -1594,8 +1601,8 @@ public void testOneServer() throws CacheException, InterruptedException {
         }
       });
 
-      DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
-      DistributedTestCase.join(async2, 30 * 1000, getLogWriter());
+      ThreadUtils.join(async1, 30 * 1000);
+      ThreadUtils.join(async2, 30 * 1000);
     }
 
     client1.invoke(new CacheSerializableRunnable(title+"client1 removeAll") {
@@ -1742,7 +1749,7 @@ public void testOneServer() throws CacheException, InterruptedException {
               return null;
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 10 * 1000, 1000, true);
+          Wait.waitForCriterion(ev, 10 * 1000, 1000, true);
           // local invalidate will set the value to null
           TestObject obj = (TestObject)region.getEntry("key-" + i).getValue();
           assertEquals(null, obj);
@@ -1766,7 +1773,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     final String regionName = getUniqueName();
 
     final int serverPort1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set <true, false> means <PR=true, notifyBySubscription=false> to test local-invalidates
     createBridgeServer(server1, regionName, serverPort1, false, 0, null);
@@ -1821,7 +1828,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int serverPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int serverPort1 = serverPorts[0];
     final int serverPort2 = serverPorts[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set <true, false> means <PR=true, notifyBySubscription=false> to test local-invalidates
     createBridgeServer(server1, regionName, serverPort1, false, 0, null);
@@ -1849,7 +1856,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         region.getAttributesMutator().addCacheListener(new MyListener(false));
         
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client1 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client1 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });   
 
@@ -1879,27 +1886,27 @@ public void testOneServer() throws CacheException, InterruptedException {
         int c2Size = getRegionSize(client2, regionName);
         int s1Size = getRegionSize(server1, regionName);
         int s2Size = getRegionSize(server2, regionName);
-        getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
+        LogWriterUtils.getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
         if (c1Size != 15) {
-          getLogWriter().info("waiting for client1 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for client1 to get all updates");
           return false;
         }
         if (c2Size != 15) {
-          getLogWriter().info("waiting for client2 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for client2 to get all updates");
           return false;
         }
         if (s1Size != 15) {
-          getLogWriter().info("waiting for server1 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for server1 to get all updates");
           return false;
         }
         if (s2Size != 15) {
-          getLogWriter().info("waiting for server2 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for server2 to get all updates");
           return false;
         }
         return true;
       }
     };
-    waitForCriterion(waitForSizes, 10000, 1000, true);
+    Wait.waitForCriterion(waitForSizes, 10000, 1000, true);
     }
     int server1Size = getRegionSize(server1, regionName);
     int server2Size = getRegionSize(server1, regionName);
@@ -1943,27 +1950,27 @@ public void testOneServer() throws CacheException, InterruptedException {
           int c2Size = getRegionSize(client2, regionName);
           int s1Size = getRegionSize(server1, regionName);
           int s2Size = getRegionSize(server2, regionName);
-          getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
+          LogWriterUtils.getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
           if (c1Size != 15) { // client 1 did not register interest
-            getLogWriter().info("waiting for client1 to get all updates");
+            LogWriterUtils.getLogWriter().info("waiting for client1 to get all updates");
             return false;
           }
           if (c2Size != 15*2) {
-            getLogWriter().info("waiting for client2 to get all updates");
+            LogWriterUtils.getLogWriter().info("waiting for client2 to get all updates");
             return false;
           }
           if (s1Size != 15*2) {
-            getLogWriter().info("waiting for server1 to get all updates");
+            LogWriterUtils.getLogWriter().info("waiting for server1 to get all updates");
             return false;
           }
           if (s2Size != 15*2) {
-            getLogWriter().info("waiting for server2 to get all updates");
+            LogWriterUtils.getLogWriter().info("waiting for server2 to get all updates");
             return false;
           }
           return true;
         }
       };
-      waitForCriterion(waitForSizes, 10000, 1000, true);
+      Wait.waitForCriterion(waitForSizes, 10000, 1000, true);
     }
     
     // now do a removeAll that is not allowed to remove everything
@@ -2000,27 +2007,27 @@ public void testOneServer() throws CacheException, InterruptedException {
           int c2Size = getRegionSize(client2, regionName);
           int s1Size = getRegionSize(server1, regionName);
           int s2Size = getRegionSize(server2, regionName);
-          getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
+          LogWriterUtils.getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
           if (c1Size != 15-5) { // client 1 did not register interest
-            getLogWriter().info("waiting for client1 to get all destroys");
+            LogWriterUtils.getLogWriter().info("waiting for client1 to get all destroys");
             return false;
           }
           if (c2Size != (15*2)-5) {
-            getLogWriter().info("waiting for client2 to get all destroys");
+            LogWriterUtils.getLogWriter().info("waiting for client2 to get all destroys");
             return false;
           }
           if (s1Size != (15*2)-5) {
-            getLogWriter().info("waiting for server1 to get all destroys");
+            LogWriterUtils.getLogWriter().info("waiting for server1 to get all destroys");
             return false;
           }
           if (s2Size != (15*2)-5) {
-            getLogWriter().info("waiting for server2 to get all destroys");
+            LogWriterUtils.getLogWriter().info("waiting for server2 to get all destroys");
             return false;
           }
           return true;
         }
       };
-      waitForCriterion(waitForSizes, 10000, 1000, true);
+      Wait.waitForCriterion(waitForSizes, 10000, 1000, true);
     }
 
     // reset cacheWriter's count to allow another 5 keys to be destroyed
@@ -2059,27 +2066,27 @@ public void testOneServer() throws CacheException, InterruptedException {
           int c2Size = getRegionSize(client2, regionName);
           int s1Size = getRegionSize(server1, regionName);
           int s2Size = getRegionSize(server2, regionName);
-          getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
+          LogWriterUtils.getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
           if (c1Size != 15-5) { // client 1 did not register interest
-            getLogWriter().info("waiting for client1 to get all destroys");
+            LogWriterUtils.getLogWriter().info("waiting for client1 to get all destroys");
             return false;
           }
           if (c2Size != (15*2)-5-5) {
-            getLogWriter().info("waiting for client2 to get all destroys");
+            LogWriterUtils.getLogWriter().info("waiting for client2 to get all destroys");
             return false;
           }
           if (s1Size != (15*2)-5-5) {
-            getLogWriter().info("waiting for server1 to get all destroys");
+            LogWriterUtils.getLogWriter().info("waiting for server1 to get all destroys");
             return false;
           }
           if (s2Size != (15*2)-5-5) {
-            getLogWriter().info("waiting for server2 to get all destroys");
+            LogWriterUtils.getLogWriter().info("waiting for server2 to get all destroys");
             return false;
           }
           return true;
         }
       };
-      waitForCriterion(waitForSizes, 10000, 1000, true);
+      Wait.waitForCriterion(waitForSizes, 10000, 1000, true);
     }
     server1.invoke(removeExceptionTag1(expectedExceptions));
     server2.invoke(removeExceptionTag1(expectedExceptions));
@@ -2109,7 +2116,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int serverPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int serverPort1 = serverPorts[0];
     final int serverPort2 = serverPorts[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set <true, false> means <PR=true, notifyBySubscription=false> to test local-invalidates
     createBridgeServer(server1, regionName, serverPort1, true, 0, "ds1");
@@ -2146,7 +2153,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         region.getAttributesMutator().addCacheListener(new MyListener(false));
         
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });   
 
@@ -2173,23 +2180,23 @@ public void testOneServer() throws CacheException, InterruptedException {
 
     // server2 will closeCache after created 10 keys
     
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
     if (async1.exceptionOccurred()) {
-      fail("Aync1 get exceptions:", async1.getException());
+      Assert.fail("Aync1 get exceptions:", async1.getException());
     }
 
     int client1Size = getRegionSize(client1, regionName);
     // client2Size maybe more than client1Size
     int client2Size = getRegionSize(client2, regionName);
     int server1Size = getRegionSize(server1, regionName);
-    getLogWriter().info("region sizes: "+client1Size+","+client2Size+","+server1Size);
+    LogWriterUtils.getLogWriter().info("region sizes: "+client1Size+","+client2Size+","+server1Size);
 //    assertEquals(server1Size, client1Size);
 
     // restart server2
     createBridgeServer(server2, regionName, serverPort2, true, 0, "ds1");
     server1Size = getRegionSize(server1, regionName);
     int server2Size = getRegionSize(server2, regionName);
-    getLogWriter().info("region sizes after server2 restarted: "+client1Size+","+client2Size+","+server1Size+":"+server2Size);
+    LogWriterUtils.getLogWriter().info("region sizes after server2 restarted: "+client1Size+","+client2Size+","+server1Size+":"+server2Size);
     assertEquals(client2Size, server1Size);
     assertEquals(client2Size, server2Size);
 
@@ -2215,7 +2222,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int new_client1Size = getRegionSize(client1, regionName);
     int new_client2Size = getRegionSize(client2, regionName);
 
-    getLogWriter().info("region sizes after re-run the putAll: "+new_client1Size+","+new_client2Size+","+new_server1Size);
+    LogWriterUtils.getLogWriter().info("region sizes after re-run the putAll: "+new_client1Size+","+new_client2Size+","+new_server1Size);
     assertEquals(server1Size+numberOfEntries/2, new_server1Size);
     assertEquals(client1Size+numberOfEntries/2, new_client1Size);
     assertEquals(client2Size+numberOfEntries/2, new_client2Size);
@@ -2224,7 +2231,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     createBridgeServer(server2, regionName, serverPort2, true, 0, "ds1");
     server1Size = getRegionSize(server1, regionName);
     server2Size = getRegionSize(server2, regionName);
-    getLogWriter().info("region sizes after restart server2: "+server1Size+","+server2Size);
+    LogWriterUtils.getLogWriter().info("region sizes after restart server2: "+server1Size+","+server2Size);
     assertEquals(server1Size, server2Size);
 
     // add a cacheWriter for server to stop after created 15 keys
@@ -2252,7 +2259,7 @@ public void testOneServer() throws CacheException, InterruptedException {
 
     new_server1Size = getRegionSize(server1, regionName);
     int new_server2Size = getRegionSize(server2, regionName);
-    getLogWriter().info("region sizes after restart server2: "+new_server1Size+","+new_server2Size);
+    LogWriterUtils.getLogWriter().info("region sizes after restart server2: "+new_server1Size+","+new_server2Size);
     assertEquals(server1Size+15, new_server1Size);
     assertEquals(server2Size+15, new_server2Size);
     server1.invoke(removeExceptionTag1(expectedExceptions));
@@ -2289,7 +2296,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int serverPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int serverPort1 = serverPorts[0];
     final int serverPort2 = serverPorts[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set <true, false> means <PR=true, notifyBySubscription=false> to test local-invalidates
     createBridgeServer(server1, regionName, serverPort1, true, 0, "ds1");
@@ -2308,7 +2315,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         region.getAttributesMutator().addCacheListener(new MyListener(false));
         
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
     
@@ -2327,27 +2334,27 @@ public void testOneServer() throws CacheException, InterruptedException {
         int c2Size = getRegionSize(client2, regionName);
         int s1Size = getRegionSize(server1, regionName);
         int s2Size = getRegionSize(server2, regionName);
-        getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
+        LogWriterUtils.getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
         if (c1Size != numberOfEntries) {
-          getLogWriter().info("waiting for client1 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for client1 to get all updates");
           return false;
         }
         if (c2Size != numberOfEntries) {
-          getLogWriter().info("waiting for client2 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for client2 to get all updates");
           return false;
         }
         if (s1Size != numberOfEntries) {
-          getLogWriter().info("waiting for server1 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for server1 to get all updates");
           return false;
         }
         if (s2Size != numberOfEntries) {
-          getLogWriter().info("waiting for server2 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for server2 to get all updates");
           return false;
         }
         return true;
       }
     };
-    waitForCriterion(waitForSizes, 10000, 1000, true);
+    Wait.waitForCriterion(waitForSizes, 10000, 1000, true);
 
     client1Size = getRegionSize(client1, regionName);
     client2Size = getRegionSize(client2, regionName);
@@ -2390,20 +2397,20 @@ public void testOneServer() throws CacheException, InterruptedException {
 
     // server2 will closeCache after creating 10 keys
     
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
     if (async1.exceptionOccurred()) {
-      fail("Aync1 get exceptions:", async1.getException());
+      Assert.fail("Aync1 get exceptions:", async1.getException());
     }
 
     client1Size = getRegionSize(client1, regionName);
     // client2Size maybe more than client1Size
     client2Size = getRegionSize(client2, regionName);
     server1Size = getRegionSize(server1, regionName);
-    getLogWriter().info("region sizes: "+client1Size+","+client2Size+","+server1Size);
+    LogWriterUtils.getLogWriter().info("region sizes: "+client1Size+","+client2Size+","+server1Size);
 //    assertEquals(server1Size, client1Size);
 
     // restart server2 
-    getLogWriter().info("restarting server 2");
+    LogWriterUtils.getLogWriter().info("restarting server 2");
     createBridgeServer(server2, regionName, serverPort2, true, 0, "ds1");
     
     // Test Case1: Trigger singleHop putAll. Stop server2 in middle. 
@@ -2414,7 +2421,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     client2Size = getRegionSize(client2, regionName);
     server1Size = getRegionSize(server1, regionName);
     server2Size = getRegionSize(server2, regionName);
-    getLogWriter().info("region sizes after server2 restarted: "+client1Size+","+client2Size+","+server1Size);
+    LogWriterUtils.getLogWriter().info("region sizes after server2 restarted: "+client1Size+","+client2Size+","+server1Size);
     assertEquals(150, client1Size);
     assertEquals(client2Size, server1Size);
     assertEquals(client2Size, server2Size);
@@ -2442,7 +2449,7 @@ public void testOneServer() throws CacheException, InterruptedException {
 
     // Test Case 2: based on case 1, but this time, there should be no X keys 
     // created on server2.
-    getLogWriter().info("region sizes after re-run the putAll: "+new_client1Size+","+new_client2Size+","+new_server1Size);
+    LogWriterUtils.getLogWriter().info("region sizes after re-run the putAll: "+new_client1Size+","+new_client2Size+","+new_server1Size);
     assertEquals(server1Size+numberOfEntries/2, new_server1Size);
     assertEquals(client1Size+numberOfEntries/2, new_client1Size);
     assertEquals(client2Size+numberOfEntries/2, new_client2Size);
@@ -2451,7 +2458,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     createBridgeServer(server2, regionName, serverPort2, true, 0, "ds1");
     server1Size = getRegionSize(server1, regionName);
     server2Size = getRegionSize(server2, regionName);
-    getLogWriter().info("region sizes after restart server2: "+server1Size+","+server2Size);
+    LogWriterUtils.getLogWriter().info("region sizes after restart server2: "+server1Size+","+server2Size);
     assertEquals(server1Size, server2Size);
 
     // add a cacheWriter for server to fail putAll after it created cacheWriterAllowedKeyNum keys
@@ -2483,7 +2490,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int oncemore_client2Size = getRegionSize(client2, regionName);
     int oncemore_server1Size = getRegionSize(server1, regionName);
     int oncemore_server2Size = getRegionSize(server2, regionName);
-    getLogWriter().info("region sizes in once more test: "
+    LogWriterUtils.getLogWriter().info("region sizes in once more test: "
         +oncemore_client1Size+","+oncemore_client2Size+","+oncemore_server1Size+","+oncemore_server2Size);
     int delta_at_server = oncemore_server1Size - server1Size;
     assertEquals(new_client1Size+delta_at_server, oncemore_client1Size);
@@ -2521,7 +2528,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int serverPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int serverPort1 = serverPorts[0];
     final int serverPort2 = serverPorts[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set <true, false> means <PR=true, notifyBySubscription=false> to test local-invalidates
     createBridgeServer(server1, regionName, serverPort1, true, 1, "ds1");
@@ -2540,7 +2547,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         region.getAttributesMutator().addCacheListener(new MyListener(false));
         
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
     
@@ -2559,27 +2566,27 @@ public void testOneServer() throws CacheException, InterruptedException {
         int c2Size = getRegionSize(client2, regionName);
         int s1Size = getRegionSize(server1, regionName);
         int s2Size = getRegionSize(server2, regionName);
-        getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
+        LogWriterUtils.getLogWriter().info("region sizes: "+c1Size+","+c2Size+","+s1Size+","+s2Size);
         if (c1Size != numberOfEntries) {
-          getLogWriter().info("waiting for client1 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for client1 to get all updates");
           return false;
         }
         if (c2Size != numberOfEntries) {
-          getLogWriter().info("waiting for client2 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for client2 to get all updates");
           return false;
         }
         if (s1Size != numberOfEntries) {
-          getLogWriter().info("waiting for server1 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for server1 to get all updates");
           return false;
         }
         if (s2Size != numberOfEntries) {
-          getLogWriter().info("waiting for server2 to get all updates");
+          LogWriterUtils.getLogWriter().info("waiting for server2 to get all updates");
           return false;
         }
         return true;
       }
     };
-    waitForCriterion(waitForSizes, 10000, 1000, true);
+    Wait.waitForCriterion(waitForSizes, 10000, 1000, true);
 
     client1Size = getRegionSize(client1, regionName);
     client2Size = getRegionSize(client2, regionName);
@@ -2616,9 +2623,9 @@ public void testOneServer() throws CacheException, InterruptedException {
 
     // server2 will closeCache after created 10 keys
     
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
     if (async1.exceptionOccurred()) {
-      fail("Aync1 get exceptions:", async1.getException());
+      Assert.fail("Aync1 get exceptions:", async1.getException());
     }
 
     client1Size = getRegionSize(client1, regionName);
@@ -2626,7 +2633,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     client2Size = getRegionSize(client2, regionName);
     server1Size = getRegionSize(server1, regionName);
     // putAll should succeed after retry
-    getLogWriter().info("region sizes: "+client1Size+","+client2Size+","+server1Size);
+    LogWriterUtils.getLogWriter().info("region sizes: "+client1Size+","+client2Size+","+server1Size);
     assertEquals(server1Size, client1Size);
     assertEquals(server1Size, client2Size);
 
@@ -2635,7 +2642,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     
     server1Size = getRegionSize(server1, regionName);
     server2Size = getRegionSize(server2, regionName);
-    getLogWriter().info("region sizes after server2 restarted: "+client1Size+","+client2Size+","+server1Size);
+    LogWriterUtils.getLogWriter().info("region sizes after server2 restarted: "+client1Size+","+client2Size+","+server1Size);
     assertEquals(client2Size, server1Size);
     assertEquals(client2Size, server2Size);
 
@@ -2653,7 +2660,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int new_client2Size = getRegionSize(client2, regionName);
 
     // putAll should succeed, all the numbers should match
-    getLogWriter().info("region sizes after re-run the putAll: "+new_client1Size+","+new_client2Size+","+new_server1Size);
+    LogWriterUtils.getLogWriter().info("region sizes after re-run the putAll: "+new_client1Size+","+new_client2Size+","+new_server1Size);
     assertEquals(new_server1Size, new_client1Size);
     assertEquals(new_server1Size, new_client2Size);
 
@@ -2678,7 +2685,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     final String regionName = getUniqueName();
     
     final int[] serverPorts = AvailablePortHelper.getRandomAvailableTCPPorts(3);
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
     
     final SharedCounter sc_server1 = new SharedCounter("server1");
     final SharedCounter sc_server2 = new SharedCounter("server2");
@@ -2709,7 +2716,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         assertNotNull(getRootRegion().getSubregion(regionName));
       }
       catch (CacheException ex) {
-        fail("While creating Region on Edge", ex);
+        Assert.fail("While creating Region on Edge", ex);
       }
     }
     
@@ -2759,7 +2766,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int server1Size = getRegionSize(server1, regionName);
     int server2Size = getRegionSize(server2, regionName);
     int server3Size = getRegionSize(server2, regionName);
-    getLogWriter().info("region sizes: "+client1Size+","+server1Size+","+server2Size+","+server3Size);
+    LogWriterUtils.getLogWriter().info("region sizes: "+client1Size+","+server1Size+","+server2Size+","+server3Size);
 
     AsyncInvocation async1 = client1.invokeAsync(new CacheSerializableRunnable(title+"client1 add listener and putAll") {
       public void run2() throws CacheException {
@@ -2771,9 +2778,9 @@ public void testOneServer() throws CacheException, InterruptedException {
 
     // server1 and server2 will closeCache after created 10 keys
    
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
     if (async1.exceptionOccurred()) {
-      fail("Aync1 get exceptions:", async1.getException());
+      Assert.fail("Aync1 get exceptions:", async1.getException());
     }
     
     server3.invoke(new CacheSerializableRunnable(title
@@ -2781,14 +2788,14 @@ public void testOneServer() throws CacheException, InterruptedException {
       public void run2() throws CacheException {
         Region r = getRootRegion().getSubregion(regionName);
         MyListener l = (MyListener)r.getAttributes().getCacheListeners()[0];
-        getLogWriter().info("event counters : "+l.sc);
+        LogWriterUtils.getLogWriter().info("event counters : "+l.sc);
         assertEquals(numberOfEntries, l.sc.num_create_event);
         assertEquals(0, l.sc.num_update_event);
       }
     });
 
 
-    getLogWriter().info("event counters : "+myListener.sc);
+    LogWriterUtils.getLogWriter().info("event counters : "+myListener.sc);
     assertEquals(numberOfEntries, myListener.sc.num_create_event);
     assertEquals(0, myListener.sc.num_update_event);
     
@@ -2808,9 +2815,9 @@ public void testOneServer() throws CacheException, InterruptedException {
    */
   public void test2FailOverDistributedServer() throws CacheException,
       InterruptedException {
-    addExpectedException("Broken pipe");
-    addExpectedException("Connection reset");
-    addExpectedException("Unexpected IOException");
+    IgnoredException.addIgnoredException("Broken pipe");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("Unexpected IOException");
     final String title = "test2FailOverDistributedServer:";
 //    disconnectAllFromDS();
     
@@ -2824,7 +2831,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int serverPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int serverPort1 = serverPorts[0];
     final int serverPort2 = serverPorts[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set notifyBySubscription=true to test register interest
     createBridgeServer(server1, regionName, serverPort1, false, 0, null);
@@ -2853,7 +2860,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         
         // registerInterest for ALL_KEYS
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client1 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client1 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
 
@@ -2864,7 +2871,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         
         // registerInterest for ALL_KEYS
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
 
@@ -2877,7 +2884,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     });
 
 
-    pause(2000);
+    Wait.pause(2000);
     server1.invoke(new CacheSerializableRunnable(title
         + "stop Bridge Server 1") {
       public void run2() throws CacheException {
@@ -2885,7 +2892,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       }
     });
 
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
 
     // verify bridge server 2 for asyn keys
     server2.invoke(new CacheSerializableRunnable(title
@@ -2924,7 +2931,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     int serverPorts[] = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int serverPort1 = serverPorts[0];
     final int serverPort2 = serverPorts[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set notifyBySubscription=true to test register interest
     createBridgeServer(server1, regionName, serverPort1, false, 0, null);
@@ -2955,7 +2962,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         try {
           doPutAll(regionName, "key-", thousandEntries);
         } catch (Exception e) {
-          getLogWriter().info(title + "Expected SocketTimeOut:"+e.getMessage());
+          LogWriterUtils.getLogWriter().info(title + "Expected SocketTimeOut:"+e.getMessage());
           exceptionTriggered = true;
         }
         assertTrue(exceptionTriggered);
@@ -2971,9 +2978,9 @@ public void testOneServer() throws CacheException, InterruptedException {
    * Tests while putAll timeout at endpoint1 and switch to endpoint2
    */
   public void testEndPointSwitch() throws CacheException, InterruptedException {
-    addExpectedException("Broken pipe");
-    addExpectedException("Connection reset");
-    addExpectedException("Unexpected IOException");
+    IgnoredException.addIgnoredException("Broken pipe");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("Unexpected IOException");
     final String title = "testEndPointSwitch:";
     disconnectAllFromDS();
 
@@ -2986,7 +2993,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     
     final int serverPort1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     final int serverPort2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set notifyBySubscription=true to test register interest
     createBridgeServer(server1, regionName, serverPort1, false, 0, null);
@@ -3008,7 +3015,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         Region region = getRootRegion().getSubregion(regionName);
         region.getAttributesMutator().addCacheListener(new MyListener(false));
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
     
@@ -3018,7 +3025,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         try {
           doPutAll(regionName, title, testEndPointSwitchNumber);
         } catch (Exception e) {
-          getLogWriter().info(title + "Expected SocketTimeOut"+e.getMessage());
+          LogWriterUtils.getLogWriter().info(title + "Expected SocketTimeOut"+e.getMessage());
         }
       }
     });
@@ -3106,7 +3113,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     
     final int serverPort1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     final int serverPort2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set notifyBySubscription=true to test register interest
     createBridgeServer(server1, regionName, serverPort1, false, 0, null);
@@ -3124,7 +3131,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client1 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client1 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
 
@@ -3133,7 +3140,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         Region region = getRootRegion().getSubregion(regionName);
         region.getAttributesMutator().addCacheListener(new MyListener(false));
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
 
@@ -3153,7 +3160,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       }
     });
 
-    pause(2000);
+    Wait.pause(2000);
     server1.invoke(new CacheSerializableRunnable(title
         + "stop Bridge Server 1") {
       public void run2() throws CacheException {
@@ -3161,8 +3168,8 @@ public void testOneServer() throws CacheException, InterruptedException {
       }
     });
 
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
-    DistributedTestCase.join(async2, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
+    ThreadUtils.join(async2, 30 * 1000);
 
     // verify client 2 for asyn keys
     client2.invokeAsync(new CacheSerializableRunnable(title
@@ -3234,18 +3241,18 @@ public void testOneServer() throws CacheException, InterruptedException {
         }
         region.putAll(map, "putAllCallback");
         try {
-          getLogWriter().info("before commit TX1");
+          LogWriterUtils.getLogWriter().info("before commit TX1");
           //tx.commit();
-          getLogWriter().info("TX1 committed");
+          LogWriterUtils.getLogWriter().info("TX1 committed");
         }
         catch (CommitConflictException e) {
-          getLogWriter().info("TX1 rollbacked");
+          LogWriterUtils.getLogWriter().info("TX1 rollbacked");
         }
       }
     });
 
     // we have to pause a while to let TX1 finish earlier
-    pause(500);
+    Wait.pause(500);
     // TX2: server2 do a putAll
     AsyncInvocation async2 = server2.invokeAsync(new CacheSerializableRunnable(
         title + "TX2: async putAll from server2") {
@@ -3261,12 +3268,12 @@ public void testOneServer() throws CacheException, InterruptedException {
         }
         region.putAll(map, "putAllCallback");
         try {
-          getLogWriter().info("before commit TX2");
+          LogWriterUtils.getLogWriter().info("before commit TX2");
           //tx.commit();
-          getLogWriter().info("TX2 committed");
+          LogWriterUtils.getLogWriter().info("TX2 committed");
         }
         catch (CommitConflictException e) {
-          getLogWriter().info("TX2 rollbacked");
+          LogWriterUtils.getLogWriter().info("TX2 rollbacked");
         }
       }
     });
@@ -3289,19 +3296,19 @@ public void testOneServer() throws CacheException, InterruptedException {
         }
         region.putAll(map, "putAllCallback");
         try {
-          getLogWriter().info("before commit TX3");
+          LogWriterUtils.getLogWriter().info("before commit TX3");
           //tx.commit();
-          getLogWriter().info("TX3 committed");
+          LogWriterUtils.getLogWriter().info("TX3 committed");
         }
         catch (CommitConflictException e) {
-          getLogWriter().info("TX3 rollbacked");
+          LogWriterUtils.getLogWriter().info("TX3 rollbacked");
         }
       }
     });
 
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
-    DistributedTestCase.join(async2, 30 * 1000, getLogWriter());
-    DistributedTestCase.join(async3, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
+    ThreadUtils.join(async2, 30 * 1000);
+    ThreadUtils.join(async3, 30 * 1000);
 
     // verify server 2 for asyn keys
     server2.invoke(new CacheSerializableRunnable(title
@@ -3324,7 +3331,7 @@ public void testOneServer() throws CacheException, InterruptedException {
             else if (obj.getPrice() == i + numberOfEntries * 2) {
               tx_no = 3;
             }
-            getLogWriter().info("Verifying TX:" + tx_no);
+            LogWriterUtils.getLogWriter().info("Verifying TX:" + tx_no);
           }
           if (tx_no == 1) {
             assertEquals(i, obj.getPrice());
@@ -3363,7 +3370,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     
     final int serverPort1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     final int serverPort2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set notifyBySubscription=true to test register interest
     createBridgeServer(server1, regionName, serverPort1, true, 0, null);
@@ -3390,7 +3397,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
 
@@ -3405,7 +3412,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         for (Object key : entries.keySet()) {
           RegionEntry internalRegionEntry = entries.getEntry(key);
           VersionTag tag = internalRegionEntry.getVersionStamp().asVersionTag();
-          getLogWriter().info("Entry version tag on client for " + key + ": " + tag);
+          LogWriterUtils.getLogWriter().info("Entry version tag on client for " + key + ": " + tag);
           versions.add(tag);
         }
         
@@ -3425,7 +3432,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         for (Object key : entries.keySet()) {
           RegionEntry internalRegionEntry = entries.getEntry(key);
           VersionTag tag = internalRegionEntry.getVersionStamp().asVersionTag();
-          getLogWriter().info("Entry version tag on client for " + key + ": " + tag);
+          LogWriterUtils.getLogWriter().info("Entry version tag on client for " + key + ": " + tag);
           versions.add(tag);
         }
         return versions;
@@ -3433,9 +3440,9 @@ public void testOneServer() throws CacheException, InterruptedException {
     });
 
     assertEquals(numberOfEntries*2, client1Versions.size());
-    getLogWriter().info(Arrays.toString(client1Versions.toArray()));
+    LogWriterUtils.getLogWriter().info(Arrays.toString(client1Versions.toArray()));
     
-    getLogWriter().info(Arrays.toString(client2Versions.toArray()));
+    LogWriterUtils.getLogWriter().info(Arrays.toString(client2Versions.toArray()));
     
     for (VersionTag tag : client1Versions) {
       if (!client2Versions.contains(tag)) {
@@ -3463,7 +3470,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     
     final int serverPort1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     final int serverPort2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set notifyBySubscription=true to test register interest
     createBridgeServer(server1, regionName, serverPort1, true, 0, null);
@@ -3493,7 +3500,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         Region region = getRootRegion().getSubregion(regionName);
         region.registerInterest("ALL_KEYS");
         assertEquals(numberOfEntries, region.size());
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
 
@@ -3509,7 +3516,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         for (Object key : entries.keySet()) {
           RegionEntry internalRegionEntry = entries.getEntry(key);
           VersionTag tag = internalRegionEntry.getVersionStamp().asVersionTag();
-          getLogWriter().info("Entry version tag on client for " + key + ": " + tag);
+          LogWriterUtils.getLogWriter().info("Entry version tag on client for " + key + ": " + tag);
           versions.add(tag);
         }
         
@@ -3529,7 +3536,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         for (Object key : entries.keySet()) {
           RegionEntry internalRegionEntry = entries.getEntry(key);
           VersionTag tag = internalRegionEntry.getVersionStamp().asVersionTag();
-          getLogWriter().info("Entry version tag on client for " + key + ": " + tag);
+          LogWriterUtils.getLogWriter().info("Entry version tag on client for " + key + ": " + tag);
           versions.add(tag);
         }
         return versions;
@@ -3537,9 +3544,9 @@ public void testOneServer() throws CacheException, InterruptedException {
     });
 
     assertEquals(numberOfEntries*2, client1RAVersions.size());
-    getLogWriter().info(Arrays.toString(client1RAVersions.toArray()));
+    LogWriterUtils.getLogWriter().info(Arrays.toString(client1RAVersions.toArray()));
     
-    getLogWriter().info(Arrays.toString(client2RAVersions.toArray()));
+    LogWriterUtils.getLogWriter().info(Arrays.toString(client2RAVersions.toArray()));
     
     for (VersionTag tag : client1RAVersions) {
       if (!client2RAVersions.contains(tag)) {
@@ -3566,7 +3573,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     final int serverPort1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     final int serverPort2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     final int serverPort3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set notifyBySubscription=true to test register interest
     createBridgeServer(server1, regionName, serverPort1, true, 1, null);
@@ -3585,7 +3592,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
 
@@ -3612,7 +3619,7 @@ public void testOneServer() throws CacheException, InterruptedException {
           for (Object key : entries.keySet()) {
             RegionEntry internalRegionEntry = entries.getEntry(key);
             VersionTag tag = internalRegionEntry.getVersionStamp().asVersionTag();
-            getLogWriter().info("Entry version tag on server1:" + tag);
+            LogWriterUtils.getLogWriter().info("Entry version tag on server1:" + tag);
             versions.add(key + " " + tag);
           }
         }
@@ -3622,7 +3629,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     });
 
     // Let client be updated with all keys.
-    pause(1000);
+    Wait.pause(1000);
     
     actualVersions = (List<String>) client1.invoke(new SerializableCallable(title+"client2 versions collection") {
       
@@ -3643,10 +3650,10 @@ public void testOneServer() throws CacheException, InterruptedException {
       }
     });
 
-    getLogWriter().info(Arrays.toString(expectedVersions.toArray()));
+    LogWriterUtils.getLogWriter().info(Arrays.toString(expectedVersions.toArray()));
     
     assertEquals(numberOfEntries*2, actualVersions.size());
-    getLogWriter().info(Arrays.toString(actualVersions.toArray()));
+    LogWriterUtils.getLogWriter().info(Arrays.toString(actualVersions.toArray()));
     
     for (String keyTag : expectedVersions) {
       if (!actualVersions.contains(keyTag)) {
@@ -3674,7 +3681,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     final int serverPort1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     final int serverPort2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     final int serverPort3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set notifyBySubscription=true to test register interest
     createBridgeServer(server1, regionName, serverPort1, true, 1, null);
@@ -3693,7 +3700,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
         region.registerInterest("ALL_KEYS");
-        getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
+        LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at "+region.getFullPath());
       }
     });
 
@@ -3722,7 +3729,7 @@ public void testOneServer() throws CacheException, InterruptedException {
           for (Object key : entries.keySet()) {
             RegionEntry internalRegionEntry = entries.getEntry(key);
             VersionTag tag = internalRegionEntry.getVersionStamp().asVersionTag();
-            getLogWriter().info("Entry version tag on server1:" + tag);
+            LogWriterUtils.getLogWriter().info("Entry version tag on server1:" + tag);
             versions.add(key + " " + tag);
           }
         }
@@ -3732,7 +3739,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     });
 
     // Let client be updated with all keys.
-    pause(1000);
+    Wait.pause(1000);
     
     actualRAVersions = (List<String>) client1.invoke(new SerializableCallable(title+"client2 versions collection") {
       
@@ -3754,10 +3761,10 @@ public void testOneServer() throws CacheException, InterruptedException {
       }
     });
 
-    getLogWriter().info(Arrays.toString(expectedRAVersions.toArray()));
+    LogWriterUtils.getLogWriter().info(Arrays.toString(expectedRAVersions.toArray()));
     
     assertEquals(numberOfEntries*2, actualRAVersions.size());
-    getLogWriter().info(Arrays.toString(actualRAVersions.toArray()));
+    LogWriterUtils.getLogWriter().info(Arrays.toString(actualRAVersions.toArray()));
     
     for (String keyTag : expectedRAVersions) {
       if (!actualRAVersions.contains(keyTag)) {
@@ -3784,7 +3791,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     
     final int serverPort1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     final int serverPort2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     // set notifyBySubscription=true to test register interest
     createBridgeServer(server1, regionName, serverPort1, false, 0, null);
@@ -3818,7 +3825,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         for (Object key : entries.keySet()) {
           RegionEntry internalRegionEntry = entries.getEntry(key);
           VersionTag tag = internalRegionEntry.getVersionStamp().asVersionTag();
-          getLogWriter().info("Entry version tag on client:" + tag);
+          LogWriterUtils.getLogWriter().info("Entry version tag on client:" + tag);
           versions.add(tag);
         }
         
@@ -3846,9 +3853,9 @@ public void testOneServer() throws CacheException, InterruptedException {
     });
 
     assertEquals(numberOfEntries*2, client1Versions.size());
-    getLogWriter().info(Arrays.toString(client1Versions.toArray()));
+    LogWriterUtils.getLogWriter().info(Arrays.toString(client1Versions.toArray()));
     
-    getLogWriter().info(Arrays.toString(client2Versions.toArray()));
+    LogWriterUtils.getLogWriter().info(Arrays.toString(client2Versions.toArray()));
     
     for (VersionTag tag : client2Versions) {
       tag.setMemberID(null);
@@ -3865,7 +3872,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       public void run2() throws CacheException {
         // Create DS
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         getSystem(config);
 
         // Create Region
@@ -3919,9 +3926,9 @@ public void testOneServer() throws CacheException, InterruptedException {
         }
         try {
           int retPort = startBridgeServer(serverPort);
-          getLogWriter().info("Cache Server Started:"+retPort+":"+serverPort);
+          LogWriterUtils.getLogWriter().info("Cache Server Started:"+retPort+":"+serverPort);
         } catch (Exception e) {
-          fail("While starting CacheServer", e);
+          Assert.fail("While starting CacheServer", e);
         }
       }
     });
@@ -3946,7 +3953,7 @@ public void testOneServer() throws CacheException, InterruptedException {
 
         try {
           getCache();
-          addExpectedException("java.net.ConnectException||java.net.SocketException");
+          IgnoredException.addIgnoredException("java.net.ConnectException||java.net.SocketException");
           if (readTimeOut>0) {
                 PoolFactory pf = PoolManager.createFactory();
             for(int i=0; i<serverPorts.length; i++) {
@@ -3966,7 +3973,7 @@ public void testOneServer() throws CacheException, InterruptedException {
           assertNotNull(getRootRegion().getSubregion(regionName));
         }
         catch (CacheException ex) {
-          fail("While creating Region on Edge", ex);
+          Assert.fail("While creating Region on Edge", ex);
         }
       }
     });
@@ -4175,7 +4182,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         localregion.put(key, newValue);
         num_updates ++;
       }
-      getLogWriter().info("CQListener:TestObject:" + key + ":" + newValue);
+      LogWriterUtils.getLogWriter().info("CQListener:TestObject:" + key + ":" + newValue);
     }
 
     public void close() {
@@ -4241,7 +4248,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       if (closeCacheAtItem != -1 && sc.num_create_event >= closeCacheAtItem) {
         closeCacheAsync(vm);
       }
-      getLogWriter().fine(
+      LogWriterUtils.getLogWriter().fine(
           "MyListener:afterCreate " + event.getKey() + ":"
               + event.getNewValue()+":num_create_event="+sc.num_create_event
               + ":eventID="+((EntryEventImpl)event).getEventId());
@@ -4262,7 +4269,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       if (event.getKey().toString().startsWith("testEndPointSwitch")) {
         num_testEndPointSwitch++;
         if (num_testEndPointSwitch == testEndPointSwitchNumber) {
-          getLogWriter().info("testEndPointSwitch received expected events");
+          LogWriterUtils.getLogWriter().info("testEndPointSwitch received expected events");
           synchronized(lockObject3) {
             lockObject3.notify();
           }
@@ -4271,7 +4278,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       if (event.getKey().toString().startsWith("testHADRFailOver")) {
         num_testHADRFailOver++;
         if (num_testHADRFailOver == thousandEntries*2) {
-          getLogWriter().info("testHADRFailOver received expected events");
+          LogWriterUtils.getLogWriter().info("testHADRFailOver received expected events");
           synchronized(lockObject4) {
             lockObject4.notify();
           }
@@ -4281,7 +4288,7 @@ public void testOneServer() throws CacheException, InterruptedException {
 
     public void afterUpdate(EntryEvent event) {
       sc.num_update_event++;
-      getLogWriter().fine(
+      LogWriterUtils.getLogWriter().fine(
           "MyListener:afterUpdate " + event.getKey() + ":"
               + event.getNewValue()+":"+event.getOldValue()
               +":num_update_event="+sc.num_update_event
@@ -4304,7 +4311,7 @@ public void testOneServer() throws CacheException, InterruptedException {
         if (event.getOldValue() !=null) {
           num_oldValueInAfterUpdate++;
           if (num_oldValueInAfterUpdate == numberOfEntries) {
-            getLogWriter().info("received expected OldValue events");
+            LogWriterUtils.getLogWriter().info("received expected OldValue events");
             synchronized(lockObject) {
               lockObject.notify();
             }
@@ -4315,7 +4322,7 @@ public void testOneServer() throws CacheException, InterruptedException {
 
     public void afterInvalidate(EntryEvent event) {
       sc.num_invalidate_event++;
-      getLogWriter()
+      LogWriterUtils.getLogWriter()
           .info("local invalidate is triggered for " + event.getKey()+":num_invalidte_event="+sc.num_invalidate_event);
     }
 
@@ -4324,7 +4331,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       if (event.getOperation().isRemoveAll()) {
         assertEquals("removeAllCallback", event.getCallbackArgument());
       }
-      getLogWriter()
+      LogWriterUtils.getLogWriter()
           .info("local destroy is triggered for " + event.getKey()+":num_invalidte_event="+sc.num_destroy_event);
     }
   }
@@ -4353,7 +4360,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       if (exceptionAtItem != -1 && num_created >= exceptionAtItem) {
         throw new CacheWriterException("Triggered exception as planned, created "+num_created+" keys.");
       }
-      getLogWriter()
+      LogWriterUtils.getLogWriter()
           .info(
               "MyWriter:beforeCreate " + event.getKey() + ":"
                   + event.getNewValue() + "num_created=" + num_created);
@@ -4374,7 +4381,7 @@ public void testOneServer() throws CacheException, InterruptedException {
     }
 
     public void beforeUpdate(EntryEvent event) {
-      getLogWriter()
+      LogWriterUtils.getLogWriter()
           .info(
               "MyWriter:beforeUpdate " + event.getKey() + ":"
                   + event.getNewValue());
@@ -4398,7 +4405,7 @@ public void testOneServer() throws CacheException, InterruptedException {
       if (exceptionAtItem != -1 && num_destroyed >= exceptionAtItem) {
         throw new CacheWriterException("Triggered exception as planned, destroyed "+num_destroyed+" keys.");
       }
-      getLogWriter().info("MyWriter:beforeDestroy " + event.getKey() + ":" + "num_destroyed=" + num_destroyed);
+      LogWriterUtils.getLogWriter().info("MyWriter:beforeDestroy " + event.getKey() + ":" + "num_destroyed=" + num_destroyed);
       if (event.getOperation().isRemoveAll()) {
         assertEquals("removeAllCallback", event.getCallbackArgument());
       }



[57/62] [abbrv] incubator-geode git commit: GEODE-12: Minor updates for publishing module artifacts

Posted by je...@apache.org.
GEODE-12: Minor updates for publishing module artifacts


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/28f5391a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/28f5391a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/28f5391a

Branch: refs/heads/feature/GEODE-17
Commit: 28f5391aa7c2a0b2035451843db257de10aa5862
Parents: 8af2858
Author: Jens Deppe <jd...@pivotal.io>
Authored: Tue Feb 9 16:15:20 2016 -0800
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Tue Feb 9 16:17:37 2016 -0800

----------------------------------------------------------------------
 extensions/gemfire-modules-hibernate/build.gradle | 4 ++--
 extensions/gemfire-modules-session/build.gradle   | 8 ++++++--
 extensions/gemfire-modules-tomcat7/build.gradle   | 4 ++--
 extensions/gemfire-modules/build.gradle           | 2 +-
 4 files changed, 11 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/28f5391a/extensions/gemfire-modules-hibernate/build.gradle
----------------------------------------------------------------------
diff --git a/extensions/gemfire-modules-hibernate/build.gradle b/extensions/gemfire-modules-hibernate/build.gradle
index 56e9956..9d6cc5d 100644
--- a/extensions/gemfire-modules-hibernate/build.gradle
+++ b/extensions/gemfire-modules-hibernate/build.gradle
@@ -29,5 +29,5 @@ dependencies {
   testRuntime 'org.hsqldb:hsqldb:' + project.'hsqldb.version'
   testRuntime 'org.javassist:javassist:' + project.'javassist.version'
 
-  provided project(path: ':gemfire-junit')
-}
\ No newline at end of file
+  testCompile project(path: ':gemfire-junit')
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/28f5391a/extensions/gemfire-modules-session/build.gradle
----------------------------------------------------------------------
diff --git a/extensions/gemfire-modules-session/build.gradle b/extensions/gemfire-modules-session/build.gradle
index 9562e0f..45c6445 100644
--- a/extensions/gemfire-modules-session/build.gradle
+++ b/extensions/gemfire-modules-session/build.gradle
@@ -27,7 +27,7 @@ dependencies {
   testCompile(group: 'org.eclipse.jetty', name: 'jetty-http', version: project.'jetty.version', classifier: 'tests')
   testCompile(group: 'org.eclipse.jetty', name: 'jetty-servlet', version: project.'jetty.version', classifier: 'tests')
 
-  provided project(path: ':gemfire-junit')
+  testCompile project(path: ':gemfire-junit')
 }
 
 jar {
@@ -38,6 +38,7 @@ jar {
   manifest {
     attributes 'Main-Class': 'com.gemstone.gemfire.modules.session.installer.Installer'
   }
+  baseName = 'gemfire-modules-session'
 }
 
 task internalJar(type: Jar) {
@@ -45,6 +46,9 @@ task internalJar(type: Jar) {
   include '**/internal/**/*'
   includeEmptyDirs = false
   baseName = 'gemfire-modules-session'
+  classifier = 'internal'
 }
 
-assemble.dependsOn(internalJar)
+artifacts {
+  archives internalJar
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/28f5391a/extensions/gemfire-modules-tomcat7/build.gradle
----------------------------------------------------------------------
diff --git a/extensions/gemfire-modules-tomcat7/build.gradle b/extensions/gemfire-modules-tomcat7/build.gradle
index 9d2219d..edb070f 100644
--- a/extensions/gemfire-modules-tomcat7/build.gradle
+++ b/extensions/gemfire-modules-tomcat7/build.gradle
@@ -49,6 +49,6 @@ dependencies {
     exclude group: 'org.apache.tomcat', module: 'catalina-ha'
     exclude group: 'org.apache.tomcat', module: 'juli'
   }
-  provided project(path: ':gemfire-junit')
+  testCompile project(path: ':gemfire-junit')
   provided project(path: ':gemfire-core', configuration: 'testOutput')
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/28f5391a/extensions/gemfire-modules/build.gradle
----------------------------------------------------------------------
diff --git a/extensions/gemfire-modules/build.gradle b/extensions/gemfire-modules/build.gradle
index cf22f2c..42eb91f 100644
--- a/extensions/gemfire-modules/build.gradle
+++ b/extensions/gemfire-modules/build.gradle
@@ -34,6 +34,6 @@ dependencies {
   testCompile 'org.httpunit:httpunit:' + project.'httpunit.version'
   testRuntime 'org.apache.tomcat:coyote:' + project.'tomcat6.version'
 
-  provided project(path: ':gemfire-junit')
+  testCompile project(path: ':gemfire-junit')
   provided project(path: ':gemfire-core', configuration: 'testOutput')
 }


[62/62] [abbrv] incubator-geode git commit: Merge branch 'develop' into feature/GEODE-17

Posted by je...@apache.org.
Merge branch 'develop' into feature/GEODE-17


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/c39f8a5f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/c39f8a5f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/c39f8a5f

Branch: refs/heads/feature/GEODE-17
Commit: c39f8a5f103f0243325e59cd7cdca18c5ea04324
Parents: 3bf38a0 99e4aaf
Author: Jens Deppe <jd...@pivotal.io>
Authored: Thu Feb 11 07:25:44 2016 -0800
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Thu Feb 11 07:25:44 2016 -0800

----------------------------------------------------------------------
 .gitignore                                      |     7 +-
 DISCLAIMER                                      |     6 +
 KEYS                                            |   236 +
 LICENSE                                         |   361 +
 LICENSE.txt                                     |   202 -
 NOTICE                                          |   336 +-
 README.md                                       |     9 +-
 build.gradle                                    |   433 +-
 .../gemfire-modules-assembly/build.gradle       |   247 +
 .../release/conf/cache-client.xml               |    40 +
 .../release/conf/cache-peer.xml                 |    48 +
 .../release/conf/cache-server.xml               |    74 +
 .../release/scripts/cacheserver.bat             |   133 +
 .../release/scripts/cacheserver.sh              |    97 +
 .../release/scripts/gemfire.bat                 |    41 +
 .../release/scripts/gemfire.sh                  |    58 +
 .../release/scripts/setenv.properties           |     6 +
 .../release/session/bin/cacheserver.bat         |    56 +
 .../release/session/bin/cacheserver.sh          |    57 +
 .../release/session/bin/gemfire.bat             |    53 +
 .../release/session/bin/gemfire.sh              |    69 +
 .../release/session/bin/modify_war              |   392 +
 .../release/session/bin/setenv.properties       |     6 +
 .../gemfire-cs-tomcat-7/context-fragment.xml    |    15 +
 .../tcserver/gemfire-cs-tomcat-7/modules.env    |     1 +
 .../gemfire-cs-tomcat-8/context-fragment.xml    |    15 +
 .../tcserver/gemfire-cs-tomcat-8/modules.env    |     1 +
 .../gemfire-cs/configuration-prompts.properties |    17 +
 .../tcserver/gemfire-cs/context-fragment.xml    |    13 +
 .../release/tcserver/gemfire-cs/modules.env     |     1 +
 .../tcserver/gemfire-cs/server-fragment.xml     |    12 +
 .../gemfire-p2p-tomcat-7/context-fragment.xml   |    15 +
 .../tcserver/gemfire-p2p-tomcat-7/modules.env   |     1 +
 .../gemfire-p2p-tomcat-8/context-fragment.xml   |    15 +
 .../tcserver/gemfire-p2p-tomcat-8/modules.env   |     1 +
 .../configuration-prompts.properties            |    19 +
 .../tcserver/gemfire-p2p/context-fragment.xml   |    13 +
 .../release/tcserver/gemfire-p2p/modules.env    |     1 +
 .../tcserver/gemfire-p2p/server-fragment.xml    |    14 +
 .../gemfire-modules-hibernate/build.gradle      |    33 +
 .../gemfire/modules/hibernate/EnumType.java     |    58 +
 .../gemfire/modules/hibernate/GemFireCache.java |   238 +
 .../modules/hibernate/GemFireCacheListener.java |    54 +
 .../modules/hibernate/GemFireCacheProvider.java |   200 +
 .../hibernate/GemFireQueryCacheFactory.java     |    39 +
 .../modules/hibernate/GemFireRegionFactory.java |   237 +
 .../modules/hibernate/internal/Access.java      |   257 +
 .../ClientServerRegionFactoryDelegate.java      |   208 +
 .../hibernate/internal/CollectionAccess.java    |   224 +
 .../hibernate/internal/EntityRegionWriter.java  |    87 +
 .../hibernate/internal/EntityVersion.java       |    28 +
 .../hibernate/internal/EntityVersionImpl.java   |    51 +
 .../hibernate/internal/EntityWrapper.java       |    89 +
 .../hibernate/internal/GemFireBaseRegion.java   |   166 +
 .../internal/GemFireCollectionRegion.java       |    59 +
 .../hibernate/internal/GemFireEntityRegion.java |   187 +
 .../internal/GemFireQueryResultsRegion.java     |   113 +
 .../modules/hibernate/internal/KeyWrapper.java  |    93 +
 .../internal/NonStrictReadWriteAccess.java      |    83 +
 .../hibernate/internal/ReadOnlyAccess.java      |    55 +
 .../hibernate/internal/ReadWriteAccess.java     |    36 +
 .../internal/RegionFactoryDelegate.java         |   153 +
 .../hibernate/internal/TransactionalAccess.java |    25 +
 .../com/gemstone/gemfire/modules/Event.java     |    67 +
 .../gemfire/modules/HibernateJUnitTest.java     |   410 +
 .../com/gemstone/gemfire/modules/Owner.java     |   186 +
 .../com/gemstone/gemfire/modules/Person.java    |    72 +
 .../gemstone/gemfire/modules/SecondVMTest.java  |    93 +
 .../com/gemstone/gemfire/modules/Event.hbm.xml  |    32 +
 .../com/gemstone/gemfire/modules/Person.hbm.xml |    36 +
 .../src/test/resources/log4j.properties         |    16 +
 extensions/gemfire-modules-session/build.gradle |    54 +
 .../session/filter/SessionCachingFilter.java    |   652 +
 .../modules/session/filter/SessionListener.java |    51 +
 .../modules/session/installer/Installer.java    |   296 +
 .../session/installer/JarClassLoader.java       |   123 +
 .../session/installer/args/Argument.java        |   275 +
 .../session/installer/args/ArgumentHandler.java |    38 +
 .../installer/args/ArgumentProcessor.java       |   397 +
 .../session/installer/args/ArgumentValues.java  |   222 +
 .../installer/args/URLArgumentHandler.java      |    77 +
 .../installer/args/UnknownArgumentHandler.java  |    36 +
 .../session/installer/args/UsageException.java  |    89 +
 .../internal/common/AbstractSessionCache.java   |   102 +
 .../session/internal/common/CacheProperty.java  |    65 +
 .../common/ClientServerSessionCache.java        |   186 +
 .../internal/common/PeerToPeerSessionCache.java |   184 +
 .../session/internal/common/SessionCache.java   |    68 +
 .../common/SessionExpirationCacheListener.java  |    53 +
 .../session/internal/filter/Constants.java      |    30 +
 .../internal/filter/DummySessionManager.java    |   132 +
 .../internal/filter/GemfireHttpSession.java     |   526 +
 .../filter/GemfireSessionException.java         |    41 +
 .../internal/filter/GemfireSessionManager.java  |   511 +
 .../internal/filter/ListenerEventType.java      |    75 +
 .../session/internal/filter/SessionManager.java |   110 +
 .../AbstractDeltaSessionAttributes.java         |   107 +
 .../attributes/AbstractSessionAttributes.java   |   188 +
 .../internal/filter/attributes/DeltaEvent.java  |   119 +
 .../DeltaQueuedSessionAttributes.java           |    94 +
 .../attributes/DeltaSessionAttributes.java      |    75 +
 .../attributes/ImmediateSessionAttributes.java  |    68 +
 .../attributes/QueuedSessionAttributes.java     |    65 +
 .../filter/attributes/SessionAttributes.java    |   120 +
 .../filter/util/NamedThreadFactory.java         |    68 +
 .../filter/util/ThreadLocalSession.java         |    39 +
 .../internal/filter/util/TypeAwareMap.java      |    50 +
 .../session/internal/jmx/SessionStatistics.java |    78 +
 .../internal/jmx/SessionStatisticsMXBean.java   |    30 +
 .../internal/filter/AbstractListener.java       |    57 +
 .../session/internal/filter/BasicServlet.java   |    52 +
 .../session/internal/filter/Callback.java       |    30 +
 .../internal/filter/CallbackServlet.java        |    91 +
 .../session/internal/filter/CommonTests.java    |   582 +
 .../HttpSessionAttributeListenerImpl.java       |    46 +
 .../filter/HttpSessionBindingListenerImpl.java  |    42 +
 .../filter/HttpSessionListenerImpl.java         |    41 +
 .../filter/HttpSessionListenerImpl2.java        |    43 +
 .../internal/filter/MyServletTester.java        |    38 +
 .../internal/filter/RendezvousManager.java      |    46 +
 .../ServletRequestAttributeListenerImpl.java    |    45 +
 .../filter/ServletRequestListenerImpl.java      |    36 +
 .../SessionReplicationIntegrationJUnitTest.java |  1558 ++
 .../filter/SessionReplicationJUnitTest.java     |    53 +
 .../SessionReplicationLocalCacheJUnitTest.java  |    54 +
 .../session/junit/ChildFirstClassLoader.java    |    86 +
 .../modules/session/junit/NamedRunner.java      |   120 +
 .../session/junit/PerTestClassLoaderRunner.java |   283 +
 .../junit/SeparateClassloaderTestRunner.java    |    56 +
 .../src/test/resources/log4j.properties         |    12 +
 extensions/gemfire-modules-tomcat7/build.gradle |    54 +
 .../catalina/Tomcat7DeltaSessionManager.java    |   112 +
 .../session/Tomcat7SessionsJUnitTest.java       |    35 +
 .../test/resources/tomcat/conf/tomcat-users.xml |     3 +
 .../src/test/resources/tomcat/logs/.gitkeep     |     0
 .../src/test/resources/tomcat/temp/.gitkeep     |     0
 extensions/gemfire-modules/build.gradle         |    39 +
 .../gatewaydelta/AbstractGatewayDeltaEvent.java |    64 +
 .../modules/gatewaydelta/GatewayDelta.java      |    26 +
 .../gatewaydelta/GatewayDeltaCreateEvent.java   |    88 +
 .../gatewaydelta/GatewayDeltaDestroyEvent.java  |    82 +
 .../modules/gatewaydelta/GatewayDeltaEvent.java |    24 +
 ...tewayDeltaEventApplicationCacheListener.java |    67 +
 .../GatewayDeltaForwarderCacheListener.java     |   197 +
 .../session/bootstrap/AbstractCache.java        |   398 +
 .../session/bootstrap/ClientServerCache.java    |    74 +
 .../session/bootstrap/LifecycleTypeAdapter.java |    59 +
 .../session/bootstrap/PeerToPeerCache.java      |    85 +
 .../AbstractCacheLifecycleListener.java         |    68 +
 .../session/catalina/AbstractSessionCache.java  |   113 +
 .../ClientServerCacheLifecycleListener.java     |    26 +
 .../catalina/ClientServerSessionCache.java      |   252 +
 .../session/catalina/CommitSessionValve.java    |    68 +
 .../modules/session/catalina/DeltaSession.java  |   597 +
 .../session/catalina/DeltaSessionFacade.java    |    49 +
 .../session/catalina/DeltaSessionManager.java   |   992 +
 .../session/catalina/JvmRouteBinderValve.java   |   108 +
 .../session/catalina/LocalStrings.properties    |    16 +
 .../PeerToPeerCacheLifecycleListener.java       |    29 +
 .../catalina/PeerToPeerSessionCache.java        |   215 +
 .../modules/session/catalina/SessionCache.java  |    64 +
 .../session/catalina/SessionManager.java        |    48 +
 .../catalina/Tomcat6DeltaSessionManager.java    |    98 +
 .../callback/LocalSessionCacheLoader.java       |    45 +
 .../callback/LocalSessionCacheWriter.java       |    59 +
 .../SessionExpirationCacheListener.java         |    77 +
 .../internal/DeltaSessionAttributeEvent.java    |    25 +
 .../DeltaSessionAttributeEventBatch.java        |    88 +
 .../DeltaSessionDestroyAttributeEvent.java      |    73 +
 .../internal/DeltaSessionStatistics.java        |    88 +
 .../DeltaSessionUpdateAttributeEvent.java       |    83 +
 .../gemstone/gemfire/modules/util/Banner.java   |    59 +
 .../modules/util/BootstrappingFunction.java     |   188 +
 .../util/ClassLoaderObjectInputStream.java      |    40 +
 .../gemfire/modules/util/ContextMapper.java     |    53 +
 .../modules/util/CreateRegionFunction.java      |   245 +
 .../modules/util/DebugCacheListener.java        |    72 +
 .../gemfire/modules/util/ModuleStatistics.java  |    91 +
 .../modules/util/RegionConfiguration.java       |   308 +
 .../util/RegionConfigurationCacheListener.java  |   114 +
 .../gemfire/modules/util/RegionHelper.java      |   241 +
 .../modules/util/RegionSizeFunction.java        |    56 +
 .../gemfire/modules/util/RegionStatus.java      |    21 +
 .../modules/util/ResourceManagerValidator.java  |   166 +
 .../modules/util/SessionCustomExpiry.java       |    64 +
 .../TouchPartitionedRegionEntriesFunction.java  |   100 +
 .../TouchReplicatedRegionEntriesFunction.java   |    97 +
 .../main/resources/modules-version.properties   |     1 +
 .../gemfire/modules/session/Callback.java       |    30 +
 .../gemfire/modules/session/CommandServlet.java |    91 +
 .../gemfire/modules/session/EmbeddedTomcat.java |   193 +
 .../gemfire/modules/session/QueryCommand.java   |    34 +
 .../modules/session/TestSessionsBase.java       |   493 +
 .../session/Tomcat6SessionsJUnitTest.java       |    35 +
 .../com/gemstone/gemfire/modules/Event.hbm.xml  |    16 +
 .../com/gemstone/gemfire/modules/Person.hbm.xml |    21 +
 .../src/test/resources/log4j.properties         |    16 +
 .../test/resources/tomcat/conf/tomcat-users.xml |     3 +
 .../src/test/resources/tomcat/logs/.gitkeep     |     0
 .../src/test/resources/tomcat/temp/.gitkeep     |     0
 gemfire-assembly/build.gradle                   |   222 +-
 gemfire-assembly/src/main/dist/DISCLAIMER       |     6 +
 gemfire-assembly/src/main/dist/LICENSE          |   429 +
 gemfire-assembly/src/main/dist/NOTICE           |   467 +
 gemfire-assembly/src/main/dist/bin/gfsh         |    14 +
 .../src/main/dist/bin/gfsh-completion.bash      |    15 +
 gemfire-assembly/src/main/dist/bin/gfsh.bat     |    14 +
 .../LocatorLauncherAssemblyJUnitTest.java       |   157 +
 .../management/internal/AgentUtilJUnitTest.java |    25 +-
 .../LauncherLifecycleCommandsDUnitTest.java     |  1007 +
 .../LauncherLifecycleCommandsJUnitTest.java     |   625 +
 .../SharedConfigurationEndToEndDUnitTest.java   |   450 +
 gemfire-common/build.gradle                     |    19 +-
 gemfire-core/build.gradle                       |    33 +-
 gemfire-core/src/jca/ra.xml                     |    17 +-
 .../com/gemstone/gemfire/SystemFailure.java     |    49 +-
 .../com/gemstone/gemfire/admin/AdminConfig.java |     2 +-
 .../gemfire/admin/AdminDistributedSystem.java   |     7 +-
 .../admin/AdminDistributedSystemFactory.java    |     2 +-
 .../gemstone/gemfire/admin/AdminException.java  |     2 +-
 .../gemfire/admin/AdminXmlException.java        |     2 +-
 .../java/com/gemstone/gemfire/admin/Alert.java  |     2 +-
 .../com/gemstone/gemfire/admin/AlertLevel.java  |     2 +-
 .../gemstone/gemfire/admin/AlertListener.java   |     2 +-
 .../gemstone/gemfire/admin/BackupStatus.java    |     2 +-
 .../admin/CacheDoesNotExistException.java       |     2 +-
 .../gemfire/admin/CacheHealthConfig.java        |     2 +-
 .../com/gemstone/gemfire/admin/CacheVm.java     |     2 +-
 .../gemstone/gemfire/admin/CacheVmConfig.java   |     2 +-
 .../gemfire/admin/ConfigurationParameter.java   |     2 +-
 .../gemfire/admin/DistributedSystemConfig.java  |     2 +-
 .../admin/DistributedSystemHealthConfig.java    |     2 +-
 .../gemfire/admin/DistributionLocator.java      |     2 +-
 .../admin/DistributionLocatorConfig.java        |     2 +-
 .../gemstone/gemfire/admin/GemFireHealth.java   |     2 +-
 .../gemfire/admin/GemFireHealthConfig.java      |     2 +-
 .../gemfire/admin/GemFireMemberStatus.java      |     2 +-
 .../gemstone/gemfire/admin/ManagedEntity.java   |     2 +-
 .../gemfire/admin/ManagedEntityConfig.java      |     2 +-
 .../gemfire/admin/MemberHealthConfig.java       |     2 +-
 .../admin/OperationCancelledException.java      |     2 +-
 .../gemfire/admin/RegionNotFoundException.java  |     2 +-
 .../gemfire/admin/RegionSubRegionSnapshot.java  |     2 +-
 .../gemfire/admin/RuntimeAdminException.java    |     2 +-
 .../com/gemstone/gemfire/admin/Statistic.java   |     2 +-
 .../gemfire/admin/StatisticResource.java        |     2 +-
 .../gemstone/gemfire/admin/SystemMember.java    |     2 +-
 .../gemfire/admin/SystemMemberCache.java        |     2 +-
 .../gemfire/admin/SystemMemberCacheEvent.java   |     2 +-
 .../admin/SystemMemberCacheListener.java        |     2 +-
 .../gemfire/admin/SystemMemberCacheServer.java  |     2 +-
 .../gemfire/admin/SystemMemberRegion.java       |     2 +-
 .../gemfire/admin/SystemMemberRegionEvent.java  |     2 +-
 .../gemfire/admin/SystemMemberType.java         |     2 +-
 .../gemfire/admin/SystemMembershipEvent.java    |     2 +-
 .../gemfire/admin/SystemMembershipListener.java |     2 +-
 .../UnmodifiableConfigurationException.java     |     2 +-
 .../internal/AdminDistributedSystemImpl.java    |     3 -
 .../DistributedSystemHealthEvaluator.java       |     2 +-
 .../gemfire/admin/internal/package.html         |     4 +-
 .../com/gemstone/gemfire/admin/jmx/Agent.java   |     2 +-
 .../gemstone/gemfire/admin/jmx/AgentConfig.java |     2 +-
 .../gemfire/admin/jmx/AgentFactory.java         |     2 +-
 .../jmx/internal/RMIRegistryServiceMBean.java   |     2 +-
 .../gemfire/admin/jmx/internal/package.html     |     6 +-
 .../gemfire/cache/AttributesMutator.java        |    11 +-
 .../java/com/gemstone/gemfire/cache/Cache.java  |     3 +-
 .../gemstone/gemfire/cache/CacheFactory.java    |     2 -
 .../gemfire/cache/CacheTransactionManager.java  |     2 +-
 .../com/gemstone/gemfire/cache/DataPolicy.java  |     1 -
 .../gemfire/cache/DiskStoreFactory.java         |     3 -
 .../gemstone/gemfire/cache/RegionFactory.java   |     2 -
 .../internal/ParallelAsyncEventQueueImpl.java   |     8 +-
 .../internal/SerialAsyncEventQueueImpl.java     |     8 +-
 .../gemfire/cache/client/ClientCache.java       |     4 +-
 .../cache/client/ClientCacheFactory.java        |     2 +-
 .../client/doc-files/example-client-cache.xml   |    16 +
 .../internal/AutoConnectionSourceImpl.java      |     2 +-
 .../cache/client/internal/ConnectionImpl.java   |     5 +-
 .../cache/control/RebalanceOperation.java       |     2 +-
 .../gemfire/cache/control/ResourceManager.java  |     2 +-
 .../gemfire/cache/doc-files/example-cache.xml   |    16 +
 .../gemfire/cache/doc-files/example2-cache.xml  |    17 +
 .../gemfire/cache/doc-files/example3-cache.xml  |    16 +
 .../gemfire/cache/execute/ResultSender.java     |     9 +-
 .../gemfire/cache/hdfs/HDFSStoreFactory.java    |     2 +-
 .../org/apache/hadoop/io/SequenceFile.java      |    24 +-
 .../cache/operations/OperationContext.java      |     6 +-
 .../com/gemstone/gemfire/cache/package.html     |    83 +-
 .../cache/partition/PartitionManager.java       |   377 -
 .../gemfire/cache/query/QueryService.java       |     6 +-
 .../query/internal/CompiledComparison.java      |     4 +-
 .../cache/query/internal/CompiledIn.java        |   106 +-
 .../cache/query/internal/CompiledJunction.java  |    94 +-
 .../cache/query/internal/CompiledSelect.java    |    22 +-
 .../query/internal/CompiledSortCriterion.java   |    11 -
 .../cache/query/internal/DefaultQuery.java      |     8 +-
 .../cache/query/internal/DerivedInfo.java       |   306 +
 .../cache/query/internal/GroupJunction.java     |     5 +-
 .../cache/query/internal/QueryMonitor.java      |    22 +-
 .../cache/query/internal/QueryUtils.java        |    61 +-
 .../query/internal/index/AbstractIndex.java     |     4 -
 .../query/internal/index/CompactRangeIndex.java |    10 +-
 .../query/internal/index/IndexManager.java      |     4 +
 .../query/internal/index/MemoryIndexStore.java  |   100 +-
 .../query/internal/index/PartitionedIndex.java  |     2 +-
 .../cache/query/internal/parse/fixantlr.sh      |    15 +
 .../gemfire/cache/query/internal/parse/oql.g    |    17 +
 .../cache/query/internal/types/TypeUtils.java   |    28 +-
 .../gemstone/gemfire/cache/server/package.html  |     2 +-
 .../gemfire/compression/SnappyCompressor.java   |     3 +-
 .../gemfire/distributed/AbstractLauncher.java   |     2 +-
 .../gemfire/distributed/DistributedSystem.java  |   209 +-
 .../gemfire/distributed/LocatorLauncher.java    |    13 +
 .../gemfire/distributed/internal/DMStats.java   |    81 +
 .../distributed/internal/DistributedState.java  |   143 -
 .../internal/DistributionAdvisor.java           |     2 +-
 .../internal/DistributionConfigImpl.java        |     2 +-
 .../internal/DistributionManager.java           |    44 +-
 .../internal/DistributionMessage.java           |     2 +-
 .../distributed/internal/DistributionStats.java |   240 +
 .../internal/ForceDisconnectOperation.java      |    42 -
 .../internal/InternalDistributedSystem.java     |    58 +-
 .../distributed/internal/InternalLocator.java   |    37 +-
 .../internal/LonerDistributionManager.java      |    70 +-
 .../internal/MembershipListener.java            |     3 +-
 .../distributed/internal/ProductUseLog.java     |     2 +-
 .../distributed/internal/ReplyProcessor21.java  |     9 +-
 .../distributed/internal/ServerLocator.java     |     2 +-
 .../internal/SharedConfiguration.java           |     1 +
 .../distributed/internal/StartupMessage.java    |    13 -
 .../internal/StartupMessageData.java            |     4 +-
 .../internal/StartupResponseMessage.java        |    16 -
 .../internal/deadlock/DeadlockDetector.java     |     2 +-
 .../internal/deadlock/UnsafeThreadLocal.java    |     1 -
 .../internal/direct/DirectChannel.java          |   100 +-
 .../internal/direct/DirectChannelListener.java  |    16 +
 .../internal/direct/MissingStubException.java   |    37 -
 .../internal/direct/ShunnedMemberException.java |    34 +
 .../internal/locks/DLockGrantor.java            |    40 +-
 .../DistributedMembershipListener.java          |     3 +-
 .../membership/InternalDistributedMember.java   |    17 +-
 .../internal/membership/MemberAttributes.java   |   131 +-
 .../internal/membership/MemberServices.java     |     1 -
 .../internal/membership/MembershipManager.java  |    47 +-
 .../internal/membership/NetMember.java          |     4 +
 .../internal/membership/NetView.java            |    29 +
 .../internal/membership/gms/GMSMember.java      |    15 +-
 .../internal/membership/gms/ServiceConfig.java  |    21 +-
 .../internal/membership/gms/Services.java       |    12 +-
 .../internal/membership/gms/SuspectMember.java  |     9 +-
 .../membership/gms/auth/GMSAuthenticator.java   |     2 +-
 .../membership/gms/fd/GMSHealthMonitor.java     |   363 +-
 .../gms/interfaces/HealthMonitor.java           |     1 -
 .../membership/gms/interfaces/Manager.java      |     1 -
 .../membership/gms/interfaces/Messenger.java    |    22 +
 .../membership/gms/interfaces/Service.java      |     3 +-
 .../gms/locator/FindCoordinatorRequest.java     |    33 +
 .../gms/locator/FindCoordinatorResponse.java    |     1 -
 .../membership/gms/membership/GMSJoinLeave.java |   471 +-
 .../gms/messages/HeartbeatMessage.java          |     2 +-
 .../gms/messages/HeartbeatRequestMessage.java   |     2 +-
 .../gms/messages/InstallViewMessage.java        |     2 +-
 .../gms/messages/JoinResponseMessage.java       |    10 +-
 .../membership/gms/messages/ViewAckMessage.java |     2 +-
 .../gms/messenger/AddressManager.java           |    21 +-
 .../membership/gms/messenger/GMSPingPonger.java |    22 +-
 .../membership/gms/messenger/JGAddress.java     |    23 +-
 .../gms/messenger/JGroupsMessenger.java         |   476 +-
 .../membership/gms/messenger/Transport.java     |    85 +-
 .../gms/mgr/GMSMembershipManager.java           |   277 +-
 .../internal/tcpserver/TcpClient.java           |    17 +-
 .../internal/tcpserver/TcpServer.java           |    21 +-
 .../gemfire/internal/AbstractConfig.java        |     4 -
 .../gemfire/internal/AvailablePort.java         |   109 +-
 .../internal/InternalDataSerializer.java        |     2 -
 .../internal/LocalStatisticsFactory.java        |     4 +-
 .../gemfire/internal/SocketCreator.java         |    10 +-
 .../internal/admin/StatAlertsManager.java       |    10 +-
 .../gemfire/internal/admin/package.html         |     4 +-
 .../admin/remote/DistributionLocatorId.java     |     1 -
 .../admin/remote/RemoteBridgeServer.java        |     8 +-
 .../admin/remote/RemoteGfManagerAgent.java      |     2 +-
 .../admin/remote/RemoteRegionAttributes.java    |   116 +-
 .../gemfire/internal/admin/remote/package.html  |     4 +-
 .../gemfire/internal/cache/AbstractRegion.java  |     5 +-
 .../internal/cache/AbstractRegionMap.java       |  1175 +-
 .../gemfire/internal/cache/BucketAdvisor.java   |     6 +-
 .../gemfire/internal/cache/BucketRegion.java    |     5 +-
 .../gemfire/internal/cache/CacheObserver.java   |     4 +
 .../internal/cache/CacheObserverAdapter.java    |     4 +
 .../gemfire/internal/cache/DiskStoreImpl.java   |     4 +-
 .../internal/cache/DistTXRollbackMessage.java   |     2 +-
 .../gemfire/internal/cache/DistTXState.java     |     6 +-
 .../cache/DistributedCacheOperation.java        |    11 +-
 .../cache/DistributedClearOperation.java        |     8 +-
 .../cache/DistributedPutAllOperation.java       |     2 +-
 .../internal/cache/DistributedRegion.java       |     2 +-
 .../cache/DistributedRemoveAllOperation.java    |     2 +-
 .../cache/DistributedTombstoneOperation.java    |    15 +-
 .../gemfire/internal/cache/EventTracker.java    |     2 +-
 .../gemfire/internal/cache/FilterProfile.java   |   853 +-
 .../internal/cache/GemFireCacheImpl.java        |   100 +-
 .../internal/cache/IdentityArrayList.java       |     2 +-
 .../internal/cache/InitialImageFlowControl.java |     2 +-
 .../internal/cache/InitialImageOperation.java   |    94 +-
 .../gemfire/internal/cache/LocalRegion.java     |    78 +-
 .../internal/cache/LocalRegionDataView.java     |     2 +-
 .../internal/cache/PRHARedundancyProvider.java  |     4 +-
 .../internal/cache/PartitionedRegion.java       |    87 +-
 .../cache/PartitionedRegionDataStore.java       |    31 +-
 .../cache/PartitionedRegionDataView.java        |     2 +-
 .../internal/cache/PartitionedRegionHelper.java |     2 +-
 .../gemfire/internal/cache/PoolManagerImpl.java |     4 +-
 .../gemfire/internal/cache/QueuedOperation.java |     9 +-
 .../internal/cache/RemoteInvalidateMessage.java |     1 -
 .../internal/cache/RemoteOperationMessage.java  |    16 +-
 .../internal/cache/RemotePutAllMessage.java     |    10 +-
 .../cache/SearchLoadAndWriteProcessor.java      |     2 +-
 .../internal/cache/StateFlushOperation.java     |     6 +-
 .../gemfire/internal/cache/TXCommitMessage.java |     2 +-
 .../internal/cache/TXFarSideCMTracker.java      |     2 +-
 .../gemfire/internal/cache/TXManagerImpl.java   |     2 +-
 .../gemfire/internal/cache/TXState.java         |     2 +-
 .../internal/cache/TombstoneService.java        |    10 +
 .../CompressedCachedDeserializable.java         |     2 +-
 .../internal/cache/control/ResourceAdvisor.java |   138 +-
 .../cache/control/ResourceListener.java         |     2 +-
 .../cache/execute/FunctionServiceStats.java     |     1 -
 .../cache/execute/InternalExecution.java        |     2 -
 .../cache/execute/InternalFunctionService.java  |    14 +-
 .../cache/execute/util/CommitFunction.java      |     3 +-
 .../cache/execute/util/RollbackFunction.java    |     3 +-
 .../internal/cache/ha/HARegionQueue.java        |    22 +-
 .../locks/GFEAbstractQueuedSynchronizer.java    |  1715 --
 .../locks/ReentrantReadWriteWriteShareLock.java |   477 -
 .../internal/cache/lru/NewLIFOClockHand.java    |    78 +-
 .../internal/cache/lru/NewLRUClockHand.java     |    14 +-
 .../cache/partitioned/FetchEntriesMessage.java  |    19 +-
 .../cache/partitioned/FetchKeysMessage.java     |    20 +-
 .../cache/partitioned/IndexCreationMsg.java     |    31 +-
 .../cache/partitioned/PRTombstoneMessage.java   |    19 +-
 .../cache/partitioned/PartitionMessage.java     |     3 +-
 .../partitioned/PartitionedRegionObserver.java  |     5 +
 .../PartitionedRegionObserverAdapter.java       |     4 +
 .../PartitionedRegionRebalanceOp.java           |     2 +-
 .../cache/partitioned/PutAllPRMessage.java      |    10 +-
 .../StreamingPartitionOperation.java            |     2 -
 .../rebalance/PartitionedRegionLoadModel.java   |     2 -
 .../cache/persistence/BackupManager.java        |     2 +-
 .../persistence/PersistenceAdvisorImpl.java     |     6 +-
 .../persistence/PersistentMemberManager.java    |     2 +-
 .../cache/persistence/PersistentMemberView.java |     1 -
 .../region/entry/RegionEntryFactoryBuilder.java |   103 +
 .../cache/tier/InternalClientMembership.java    |     2 +-
 .../cache/tier/sockets/CacheClientProxy.java    |    22 +-
 .../tier/sockets/ClientProxyMembershipID.java   |    15 +-
 .../internal/cache/tier/sockets/HandShake.java  |    16 +-
 .../sockets/command/GatewayReceiverCommand.java |    24 +-
 .../cache/versions/RegionVersionHolder.java     |    83 +-
 .../cache/versions/RegionVersionVector.java     |     2 +-
 .../cache/wan/AbstractGatewaySender.java        |    31 +-
 .../AbstractGatewaySenderEventProcessor.java    |    11 +-
 .../cache/wan/GatewaySenderAdvisor.java         |     4 +-
 .../ParallelGatewaySenderEventProcessor.java    |     8 +-
 .../parallel/ParallelGatewaySenderQueue.java    |    61 +-
 ...urrentSerialGatewaySenderEventProcessor.java |     9 +
 .../cache/xmlcache/CacheXmlGenerator.java       |     4 +-
 .../cache/xmlcache/GeodeEntityResolver.java     |     8 +-
 .../concurrent/CompactConcurrentHashSet2.java   |     6 +-
 .../internal/i18n/ParentLocalizedStrings.java   |    21 +-
 .../gemfire/internal/jta/GlobalTransaction.java |    13 +-
 .../gemfire/internal/logging/LogService.java    |    48 -
 .../gemfire/internal/logging/MergeLogFiles.java |     6 -
 .../internal/logging/log4j/LogWriterLogger.java |     7 -
 .../gemfire/internal/offheap/Chunk.java         |    26 +-
 .../gemfire/internal/offheap/DataAsAddress.java |    19 +-
 .../gemfire/internal/offheap/DataType.java      |    23 +-
 ...DisconnectingOutOfOffHeapMemoryListener.java |    77 +
 .../internal/offheap/MemoryBlockNode.java       |     2 +-
 .../internal/offheap/MemoryInspector.java       |     9 +-
 .../internal/offheap/MemoryInspectorImpl.java   |    99 +
 .../offheap/OffHeapRegionEntryHelper.java       |    89 +-
 .../internal/offheap/OffHeapStorage.java        |   127 +-
 .../internal/offheap/RefCountChangeInfo.java    |    80 +-
 .../internal/offheap/ReferenceCountHelper.java  |    14 +-
 .../offheap/SimpleMemoryAllocatorImpl.java      |   102 +-
 .../internal/offheap/SyncChunkStack.java        |    13 +-
 .../internal/offheap/UnsafeMemoryChunk.java     |    55 -
 .../internal/process/AttachProcessUtils.java    |     5 +
 .../internal/process/FileProcessController.java |    39 +-
 .../internal/process/NativeProcessUtils.java    |     5 +
 .../internal/process/ProcessController.java     |     2 +-
 .../gemfire/internal/process/ProcessUtils.java  |    16 +-
 .../gemfire/internal/process/signal/Signal.java |     6 +-
 .../internal/redis/ByteArrayWrapper.java        |     2 +-
 .../internal/redis/ByteToCommandDecoder.java    |     2 +-
 .../gemstone/gemfire/internal/redis/Coder.java  |     2 +-
 .../redis/RedisCommandParserException.java      |     2 +-
 .../internal/redis/RegionCreationException.java |     2 +-
 .../gemfire/internal/redis/RegionProvider.java  |     1 -
 .../redis/executor/AbstractExecutor.java        |     8 -
 .../redis/executor/hll/HyperLogLogPlus.java     |     1 -
 .../gemfire/internal/shared/NativeCalls.java    |     2 -
 .../gemfire/internal/tcp/Connection.java        |   123 +-
 .../gemfire/internal/tcp/ConnectionTable.java   |    93 +-
 .../internal/tcp/MemberShunnedException.java    |     7 +-
 .../gemfire/internal/tcp/ServerDelegate.java    |     5 +-
 .../com/gemstone/gemfire/internal/tcp/Stub.java |   164 -
 .../gemfire/internal/tcp/TCPConduit.java        |   276 +-
 .../gemfire/internal/util/DebuggerSupport.java  |    17 +-
 .../CustomEntryConcurrentHashMap.java           |    14 +-
 .../gemfire/management/internal/AgentUtil.java  |   111 +-
 .../management/internal/FederatingManager.java  |     7 +-
 .../management/internal/JmxManagerAdvisor.java  |     1 -
 .../management/internal/MBeanProxyFactory.java  |     2 -
 .../management/internal/ManagementAgent.java    |    17 +-
 .../internal/ManagementMembershipListener.java  |     4 +-
 .../gemfire/management/internal/RestAgent.java  |     2 +-
 .../internal/SystemManagementService.java       |     7 +-
 .../beans/GatewaySenderMBeanBridge.java         |     8 +-
 .../internal/beans/MBeanAggregator.java         |     2 +-
 .../internal/beans/stats/StatsKey.java          |     1 -
 .../management/internal/cli/CliUtil.java        |     2 +-
 .../management/internal/cli/Launcher.java       |     4 +-
 .../cli/commands/LauncherLifecycleCommands.java |     9 +-
 .../internal/cli/commands/ShellCommands.java    |     4 +-
 .../internal/cli/result/TableBuilder.java       |     9 +-
 .../internal/cli/result/TableBuilderHelper.java |    18 +-
 .../management/internal/cli/shell/Gfsh.java     |    15 +-
 .../internal/cli/shell/JmxOperationInvoker.java |    10 +-
 .../internal/cli/shell/jline/ANSIBuffer.java    |   433 +
 .../internal/cli/shell/jline/ANSIHandler.java   |     5 +-
 .../cli/shell/jline/CygwinMinttyTerminal.java   |   137 +-
 .../internal/cli/shell/jline/GfshHistory.java   |    24 +-
 .../shell/jline/GfshUnsupportedTerminal.java    |     2 +-
 .../internal/cli/util/CLIConsoleBufferUtil.java |     8 +-
 .../configuration/domain/CacheElement.java      |     1 -
 .../internal/security/JSONAuthorization.java    |   292 +
 .../security/ManagementInterceptor.java         |     5 +
 .../management/internal/web/domain/Link.java    |     2 +-
 .../internal/web/http/ClientHttpRequest.java    |     2 +-
 .../internal/web/http/HttpHeader.java           |     2 +-
 .../main/java/com/gemstone/gemfire/package.html |     6 +-
 .../com/gemstone/gemfire/pdx/PdxInstance.java   |     2 +-
 .../gemfire/redis/GemFireRedisServer.java       |     3 +-
 .../security/GemFireSecurityException.java      |     2 +-
 .../config/GemFireFileConfigurationMonitor.java |   145 -
 .../config/xml/GemFireXmlConfiguration.java     |   344 -
 .../xml/GemFireXmlConfigurationFactory.java     |    59 -
 .../tools/gfsh/app/windowsbindings.properties   |    15 +
 .../src/test/java/com/examples/TestObject.java  |     6 +-
 .../com/examples/ds/PutDataSerializables.java   |     6 +-
 .../test/java/com/gemstone/gemfire/BadTest.java |    42 -
 .../com/gemstone/gemfire/GemFireTestCase.java   |     2 +-
 .../gemfire/SystemFailureJUnitTest.java         |    60 +
 .../com/gemstone/gemfire/TXExpiryJUnitTest.java |    11 +-
 .../com/gemstone/gemfire/UnitTestDoclet.java    |     7 +-
 .../cache/CacheRegionClearStatsDUnitTest.java   |    19 +-
 .../cache/ClientServerTimeSyncDUnitTest.java    |    38 +-
 .../cache/ConnectionPoolAndLoaderDUnitTest.java |    21 +-
 .../client/ClientCacheFactoryJUnitTest.java     |    50 +
 .../ClientServerRegisterInterestsDUnitTest.java |    19 +-
 .../internal/AutoConnectionSourceDUnitTest.java |    60 +-
 .../CacheServerSSLConnectionDUnitTest.java      |    20 +-
 .../internal/LocatorLoadBalancingDUnitTest.java |    65 +-
 .../cache/client/internal/LocatorTestBase.java  |    36 +-
 .../internal/SSLNoClientAuthDUnitTest.java      |    13 +-
 .../pooling/ConnectionManagerJUnitTest.java     |    12 +-
 .../SortedListForAsyncQueueJUnitTest.java       |     4 +-
 .../management/MXMemoryPoolListenerExample.java |     4 -
 .../management/MemoryThresholdsDUnitTest.java   |    93 +-
 .../MemoryThresholdsOffHeapDUnitTest.java       |   101 +-
 .../management/ResourceManagerDUnitTest.java    |    14 +-
 .../mapInterface/PutAllGlobalLockJUnitTest.java |     5 +-
 .../partition/PartitionManagerDUnitTest.java    |   443 -
 .../PartitionRegionHelperDUnitTest.java         |    34 +-
 .../gemfire/cache/query/QueryTestUtils.java     |    15 +-
 .../query/cq/dunit/CqQueryTestListener.java     |    27 +-
 .../query/dunit/CompactRangeIndexDUnitTest.java |    26 +-
 .../cache/query/dunit/CqTimeTestListener.java   |    15 +-
 .../cache/query/dunit/GroupByDUnitImpl.java     |     7 +-
 .../dunit/GroupByPartitionedQueryDUnitTest.java |     7 +-
 .../query/dunit/GroupByQueryDUnitTest.java      |     7 +-
 .../cache/query/dunit/HashIndexDUnitTest.java   |    19 +-
 .../cache/query/dunit/HelperTestCase.java       |    12 +-
 .../dunit/NonDistinctOrderByDUnitImpl.java      |     7 +-
 .../NonDistinctOrderByPartitionedDUnitTest.java |     7 +-
 .../query/dunit/PdxStringQueryDUnitTest.java    |   310 +-
 .../dunit/QueryDataInconsistencyDUnitTest.java  |    66 +-
 .../dunit/QueryIndexUsingXMLDUnitTest.java      |    92 +-
 .../QueryParamsAuthorizationDUnitTest.java      |    10 +-
 .../QueryUsingFunctionContextDUnitTest.java     |    56 +-
 .../query/dunit/QueryUsingPoolDUnitTest.java    |   290 +-
 .../cache/query/dunit/RemoteQueryDUnitTest.java |   182 +-
 ...esourceManagerWithQueryMonitorDUnitTest.java |    36 +-
 .../query/dunit/SelectStarQueryDUnitTest.java   |   128 +-
 .../query/functional/CountStarJUnitTest.java    |     3 -
 ...ctResultsWithDupValuesInRegionJUnitTest.java |     3 -
 .../IndexCreationDeadLockJUnitTest.java         |     7 +-
 .../IndexMaintenanceAsynchJUnitTest.java        |     7 +-
 .../functional/IndexOnEntrySetJUnitTest.java    |   335 +
 .../functional/LikePredicateJUnitTest.java      |     5 +-
 .../query/functional/PdxGroupByTestImpl.java    |     2 +-
 .../query/internal/CompiledInJUnitTest.java     |   460 +
 .../internal/ExecutionContextJUnitTest.java     |     5 +-
 .../index/AsynchIndexMaintenanceJUnitTest.java  |    18 +-
 ...rrentIndexInitOnOverflowRegionDUnitTest.java |    52 +-
 ...ndexOperationsOnOverflowRegionDUnitTest.java |    71 +-
 ...pdateWithInplaceObjectModFalseDUnitTest.java |    79 +-
 ...ConcurrentIndexUpdateWithoutWLDUnitTest.java |    73 +-
 .../index/CopyOnReadIndexDUnitTest.java         |    69 +-
 .../query/internal/index/EquijoinDUnitTest.java |   437 +
 .../index/IndexCreationInternalsJUnitTest.java  |     7 +-
 .../index/IndexMaintainceJUnitTest.java         |     5 +-
 .../IndexTrackingQueryObserverDUnitTest.java    |    25 +-
 ...itializeIndexEntryDestroyQueryDUnitTest.java |    44 +-
 .../index/MemoryIndexStoreJUnitTest.java        |   396 +
 ...exStoreWithInplaceModificationJUnitTest.java |    54 +
 .../index/MultiIndexCreationDUnitTest.java      |    39 +-
 .../PartitionedRegionEquijoinDUnitTest.java     |   130 +
 .../PutAllWithIndexPerfDUnitDisabledTest.java   |   215 -
 .../index/PutAllWithIndexPerfDUnitTest.java     |   216 +
 .../PRBasicIndexCreationDUnitTest.java          |   117 +-
 .../PRBasicIndexCreationDeadlockDUnitTest.java  |    26 +-
 .../PRBasicMultiIndexCreationDUnitTest.java     |    94 +-
 .../partitioned/PRBasicQueryDUnitTest.java      |    38 +-
 .../PRBasicRemoveIndexDUnitTest.java            |    12 +-
 .../PRColocatedEquiJoinDUnitTest.java           |   310 +-
 .../partitioned/PRInvalidQueryDUnitTest.java    |    22 +-
 .../partitioned/PRQueryCacheCloseDUnitTest.java |    82 +-
 .../PRQueryCacheClosedJUnitTest.java            |    13 +-
 .../query/partitioned/PRQueryDUnitHelper.java   |   211 +-
 .../query/partitioned/PRQueryDUnitTest.java     |   165 +-
 .../query/partitioned/PRQueryPerfDUnitTest.java |    15 +-
 .../PRQueryRegionCloseDUnitTest.java            |    46 +-
 .../PRQueryRegionClosedJUnitTest.java           |     6 -
 .../PRQueryRegionDestroyedDUnitTest.java        |    48 +-
 .../PRQueryRegionDestroyedJUnitTest.java        |    12 +-
 .../PRQueryRemoteNodeExceptionDUnitTest.java    |   162 +-
 .../query/transaction/QueryAndJtaJUnitTest.java |     1 -
 .../snapshot/ParallelSnapshotDUnitTest.java     |    12 +-
 .../snapshot/SnapshotByteArrayDUnitTest.java    |    16 +-
 .../cache/snapshot/SnapshotDUnitTest.java       |     5 +-
 .../snapshot/SnapshotPerformanceDUnitTest.java  |    26 +-
 .../gemfire/cache30/Bug34387DUnitTest.java      |    19 +-
 .../gemfire/cache30/Bug34948DUnitTest.java      |    26 +-
 .../gemfire/cache30/Bug35214DUnitTest.java      |    31 +-
 .../gemfire/cache30/Bug38013DUnitTest.java      |    25 +-
 .../gemfire/cache30/Bug38741DUnitTest.java      |    22 +-
 .../cache30/Bug40255JUnitDisabledTest.java      |   139 -
 .../gemfire/cache30/Bug40255JUnitTest.java      |   143 +
 .../cache30/Bug40662JUnitDisabledTest.java      |    92 -
 .../gemfire/cache30/Bug40662JUnitTest.java      |    90 +
 .../gemfire/cache30/CacheCloseDUnitTest.java    |    16 +-
 .../gemfire/cache30/CacheListenerTestCase.java  |    85 +-
 .../gemfire/cache30/CacheLoaderTestCase.java    |    22 +-
 .../gemfire/cache30/CacheLogRollDUnitTest.java  |    16 -
 .../gemfire/cache30/CacheMapTxnDUnitTest.java   |    50 +-
 ...cheRegionsReliablityStatsCheckDUnitTest.java |    10 +-
 .../cache30/CacheSerializableRunnable.java      |    10 +-
 .../cache30/CacheStatisticsDUnitTest.java       |    28 +-
 .../gemstone/gemfire/cache30/CacheTestCase.java |    56 +-
 .../gemfire/cache30/CacheXml30DUnitTest.java    |    39 +-
 .../gemfire/cache30/CacheXml41DUnitTest.java    |    44 +-
 .../gemfire/cache30/CacheXml45DUnitTest.java    |     9 +-
 .../gemfire/cache30/CacheXml51DUnitTest.java    |     5 +-
 .../gemfire/cache30/CacheXml57DUnitTest.java    |    32 +-
 .../gemfire/cache30/CacheXml60DUnitTest.java    |    30 +-
 .../gemfire/cache30/CacheXml61DUnitTest.java    |     5 +-
 .../gemfire/cache30/CacheXml65DUnitTest.java    |    24 +-
 .../gemfire/cache30/CacheXml66DUnitTest.java    |     9 +-
 .../gemfire/cache30/CacheXml80DUnitTest.java    |    20 +-
 .../gemfire/cache30/CacheXml81DUnitTest.java    |     9 +-
 .../gemfire/cache30/CacheXml90DUnitTest.java    |    18 +-
 .../gemfire/cache30/CacheXmlTestCase.java       |    13 +-
 .../cache30/CachedAllEventsDUnitTest.java       |    16 +-
 .../gemfire/cache30/CallbackArgDUnitTest.java   |    29 +-
 .../cache30/CertifiableTestCacheListener.java   |    13 +-
 .../cache30/ClearMultiVmCallBkDUnitTest.java    |    54 +-
 .../gemfire/cache30/ClearMultiVmDUnitTest.java  |    66 +-
 .../cache30/ClientMembershipDUnitTest.java      |   173 +-
 .../ClientRegisterInterestDUnitTest.java        |    62 +-
 .../cache30/ClientServerCCEDUnitTest.java       |    65 +-
 .../gemfire/cache30/ClientServerTestCase.java   |    16 +-
 .../ConcurrentLeaveDuringGIIDUnitTest.java      |    24 +-
 .../gemfire/cache30/DiskRegionDUnitTest.java    |    67 +-
 .../gemfire/cache30/DiskRegionTestImpl.java     |    19 +-
 .../cache30/DistAckMapMethodsDUnitTest.java     |    60 +-
 ...tedAckOverflowRegionCCEOffHeapDUnitTest.java |     9 +-
 ...tributedAckPersistentRegionCCEDUnitTest.java |    14 +-
 ...dAckPersistentRegionCCEOffHeapDUnitTest.java |    14 +-
 .../DistributedAckRegionCCEDUnitTest.java       |    33 +-
 ...DistributedAckRegionCCEOffHeapDUnitTest.java |    14 +-
 ...istributedAckRegionCompressionDUnitTest.java |     3 +-
 .../cache30/DistributedAckRegionDUnitTest.java  |    44 +-
 .../DistributedAckRegionOffHeapDUnitTest.java   |    14 +-
 .../DistributedMulticastRegionDUnitTest.java    |   212 +
 .../DistributedNoAckRegionCCEDUnitTest.java     |    28 +-
 ...stributedNoAckRegionCCEOffHeapDUnitTest.java |    14 +-
 .../DistributedNoAckRegionDUnitTest.java        |    63 +-
 .../DistributedNoAckRegionOffHeapDUnitTest.java |    14 +-
 .../gemfire/cache30/DynamicRegionDUnitTest.java |    41 +-
 .../gemfire/cache30/GlobalLockingDUnitTest.java |    22 +-
 .../cache30/GlobalRegionCCEDUnitTest.java       |    12 +-
 .../GlobalRegionCCEOffHeapDUnitTest.java        |    14 +-
 .../gemfire/cache30/GlobalRegionDUnitTest.java  |    51 +-
 .../cache30/GlobalRegionOffHeapDUnitTest.java   |    16 +-
 .../cache30/LRUEvictionControllerDUnitTest.java |     8 +-
 .../gemfire/cache30/LocalRegionDUnitTest.java   |    20 +-
 .../MemLRUEvictionControllerDUnitTest.java      |     6 +-
 .../gemfire/cache30/MultiVMRegionTestCase.java  |   393 +-
 .../OffHeapLRUEvictionControllerDUnitTest.java  |    14 +-
 .../PRBucketSynchronizationDUnitTest.java       |    34 +-
 .../cache30/PartitionedRegionDUnitTest.java     |    37 +-
 ...tionedRegionMembershipListenerDUnitTest.java |     3 +-
 .../PartitionedRegionOffHeapDUnitTest.java      |    14 +-
 .../cache30/PreloadedRegionTestCase.java        |    19 +-
 .../gemfire/cache30/ProxyDUnitTest.java         |    34 +-
 .../cache30/PutAllCallBkRemoteVMDUnitTest.java  |    63 +-
 .../cache30/PutAllCallBkSingleVMDUnitTest.java  |    64 +-
 .../gemfire/cache30/PutAllMultiVmDUnitTest.java |    43 +-
 .../gemfire/cache30/QueueMsgDUnitTest.java      |    31 +-
 .../cache30/RRSynchronizationDUnitTest.java     |    32 +-
 .../gemfire/cache30/ReconnectDUnitTest.java     |   223 +-
 .../ReconnectedCacheServerDUnitTest.java        |     2 +-
 .../cache30/RegionExpirationDUnitTest.java      |    26 +-
 .../RegionMembershipListenerDUnitTest.java      |    49 +-
 .../RegionReliabilityListenerDUnitTest.java     |    26 +-
 .../cache30/RegionReliabilityTestCase.java      |    77 +-
 .../gemfire/cache30/RegionTestCase.java         |   119 +-
 .../cache30/RemoveAllMultiVmDUnitTest.java      |    40 +-
 .../gemfire/cache30/RequiredRolesDUnitTest.java |    45 +-
 .../cache30/RolePerformanceDUnitTest.java       |    20 +-
 .../gemfire/cache30/SearchAndLoadDUnitTest.java |   261 +-
 .../cache30/SlowRecDUnitDisabledTest.java       |  1446 --
 .../gemfire/cache30/SlowRecDUnitTest.java       |  1467 ++
 .../gemfire/cache30/TXDistributedDUnitTest.java |    83 +-
 .../gemfire/cache30/TXOrderDUnitTest.java       |    53 +-
 .../cache30/TXRestrictionsDUnitTest.java        |    20 +-
 .../gemfire/cache30/TestCacheCallback.java      |     7 +-
 .../codeAnalysis/CompiledClassUtils.java        |     1 -
 .../gemfire/codeAnalysis/decode/cp/Cp.java      |    14 +-
 .../codeAnalysis/decode/cp/CpInvokeDynamic.java |    33 +
 .../codeAnalysis/decode/cp/CpMethodHandle.java  |    33 +
 .../codeAnalysis/decode/cp/CpMethodType.java    |    31 +
 .../AbstractLauncherIntegrationJUnitTest.java   |    71 +
 .../distributed/AbstractLauncherJUnitTest.java  |    40 +-
 .../AbstractLauncherJUnitTestCase.java          |     2 +-
 .../distributed/CommonLauncherTestSuite.java    |    65 -
 .../distributed/DistributedMemberDUnitTest.java |    27 +-
 .../distributed/DistributedSystemDUnitTest.java |    93 +-
 .../DistributedSystemIntegrationJUnitTest.java  |    91 +
 .../distributed/DistributedSystemJUnitTest.java |    78 +
 .../distributed/HostedLocatorsDUnitTest.java    |    12 +-
 .../gemfire/distributed/LocatorDUnitTest.java   |   216 +-
 .../gemfire/distributed/LocatorJUnitTest.java   |     4 +-
 .../LocatorLauncherIntegrationJUnitTest.java    |   248 +
 .../distributed/LocatorLauncherJUnitTest.java   |   155 +-
 .../distributed/LocatorStateJUnitTest.java      |   208 +
 .../gemfire/distributed/RoleDUnitTest.java      |    16 +-
 .../ServerLauncherIntegrationJUnitTest.java     |   312 +
 .../distributed/ServerLauncherJUnitTest.java    |   184 +-
 .../ServerLauncherRemoteJUnitTest.java          |     2 +
 .../distributed/SystemAdminDUnitTest.java       |    16 +-
 .../distributed/internal/Bug40751DUnitTest.java |    16 +-
 .../ConsoleDistributionManagerDUnitTest.java    |    56 +-
 .../internal/DistributionAdvisorDUnitTest.java  |    20 +-
 .../internal/DistributionManagerDUnitTest.java  |    85 +-
 .../InternalDistributedSystemJUnitTest.java     |     2 +-
 .../gemfire/distributed/internal/LDM.java       |     2 +-
 .../internal/ProductUseLogDUnitTest.java        |    17 +-
 .../internal/SharedConfigurationJUnitTest.java  |     2 +-
 .../GemFireDeadlockDetectorDUnitTest.java       |    26 +-
 .../locks/CollaborationJUnitDisabledTest.java   |   562 -
 .../internal/locks/CollaborationJUnitTest.java  |   617 +
 .../locks/DLockReentrantLockJUnitTest.java      |    84 +
 ...entrantReadWriteWriteShareLockJUnitTest.java |   458 -
 .../membership/MembershipJUnitTest.java         |   232 +-
 .../internal/membership/NetViewJUnitTest.java   |    95 +-
 .../membership/gms/GMSMemberJUnitTest.java      |    16 +
 .../membership/gms/MembershipManagerHelper.java |    10 +-
 .../gms/fd/GMSHealthMonitorJUnitTest.java       |   286 +-
 .../locator/GMSLocatorRecoveryJUnitTest.java    |     4 +-
 .../gms/membership/GMSJoinLeaveJUnitTest.java   |   344 +-
 .../messenger/GMSQuorumCheckerJUnitTest.java    |     4 +-
 .../messenger/JGroupsMessengerJUnitTest.java    |   594 +-
 .../gms/mgr/GMSMembershipManagerJUnitTest.java  |   130 +-
 .../StreamingOperationManyDUnitTest.java        |    20 +-
 .../StreamingOperationOneDUnitTest.java         |    23 +-
 ...cpServerBackwardCompatDUnitDisabledTest.java |   250 -
 .../TcpServerBackwardCompatDUnitTest.java       |   257 +
 .../gemfire/disttx/CacheMapDistTXDUnitTest.java |     5 +-
 .../gemfire/disttx/DistTXDebugDUnitTest.java    |   124 +-
 .../disttx/DistTXPersistentDebugDUnitTest.java  |    19 +-
 .../disttx/DistributedTransactionDUnitTest.java |    37 +-
 .../internal/AbstractConfigJUnitTest.java       |   114 +
 .../gemfire/internal/AvailablePortHelper.java   |     2 +-
 .../internal/AvailablePortJUnitTest.java        |     3 +
 ...wardCompatibilitySerializationDUnitTest.java |     2 +-
 .../gemstone/gemfire/internal/ClassBuilder.java |     1 -
 .../ClassNotFoundExceptionDUnitTest.java        |    15 +-
 .../internal/DataSerializableJUnitTest.java     |    37 +-
 .../gemstone/gemfire/internal/FDDUnitTest.java  |   282 -
 .../gemfire/internal/JSSESocketJUnitTest.java   |     5 +-
 .../gemfire/internal/JarDeployerDUnitTest.java  |    17 +-
 .../internal/PdxDeleteFieldDUnitTest.java       |    13 +-
 .../gemfire/internal/PdxRenameDUnitTest.java    |    13 +-
 .../gemfire/internal/SocketCloserJUnitTest.java |    13 +-
 .../internal/cache/AbstractRegionJUnitTest.java |     2 +-
 .../internal/cache/AbstractRegionMapTest.java   |   186 +
 .../gemfire/internal/cache/BackupDUnitTest.java |    73 +-
 .../internal/cache/Bug33359DUnitTest.java       |    29 +-
 .../internal/cache/Bug33726DUnitTest.java       |    11 +-
 .../internal/cache/Bug37241DUnitTest.java       |    22 +-
 .../internal/cache/Bug37244JUnitTest.java       |     3 -
 .../internal/cache/Bug37377DUnitTest.java       |    21 +-
 .../internal/cache/Bug39079DUnitTest.java       |    24 +-
 .../internal/cache/Bug40299DUnitTest.java       |    18 +-
 .../internal/cache/Bug40632DUnitTest.java       |     9 +-
 .../internal/cache/Bug41091DUnitTest.java       |    25 +-
 .../internal/cache/Bug41733DUnitTest.java       |    22 +-
 .../internal/cache/Bug41957DUnitTest.java       |    42 +-
 .../internal/cache/Bug42010StatsDUnitTest.java  |   531 -
 .../internal/cache/Bug42055DUnitTest.java       |     9 +-
 .../internal/cache/Bug45164DUnitTest.java       |    10 +-
 .../internal/cache/Bug45934DUnitTest.java       |     7 +-
 .../internal/cache/Bug47667DUnitTest.java       |    13 +-
 .../internal/cache/CacheAdvisorDUnitTest.java   |    37 +-
 .../internal/cache/ClearDAckDUnitTest.java      |    58 +-
 .../internal/cache/ClearGlobalDUnitTest.java    |    20 +-
 .../cache/ClientServerGetAllDUnitTest.java      |    65 +-
 ...ServerInvalidAndDestroyedEntryDUnitTest.java |   997 +-
 .../ClientServerTransactionCCEDUnitTest.java    |    17 +-
 .../cache/ClientServerTransactionDUnitTest.java |   101 +-
 .../ConcurrentDestroySubRegionDUnitTest.java    |    14 +-
 .../cache/ConcurrentMapOpsDUnitTest.java        |    38 +-
 .../ConcurrentRegionOperationsJUnitTest.java    |    19 +-
 ...rentRollingAndRegionOperationsJUnitTest.java |     5 +-
 .../internal/cache/ConflationJUnitTest.java     |    12 -
 .../cache/ConnectDisconnectDUnitTest.java       |    28 +-
 .../internal/cache/DeltaFaultInDUnitTest.java   |     9 +-
 .../cache/DeltaPropagationDUnitTest.java        |    41 +-
 .../cache/DeltaPropagationStatsDUnitTest.java   |    24 +-
 .../internal/cache/DeltaSizingDUnitTest.java    |    14 +-
 .../gemfire/internal/cache/DiskIdJUnitTest.java |     2 -
 .../cache/DiskRegByteArrayDUnitTest.java        |    22 +-
 .../cache/DiskRegionClearJUnitTest.java         |     5 +-
 .../internal/cache/DiskRegionHelperFactory.java |     3 -
 .../internal/cache/DiskRegionJUnitTest.java     |   216 +-
 .../internal/cache/DiskRegionTestingBase.java   |     3 -
 ...DistrbutedRegionProfileOffHeapDUnitTest.java |    29 +-
 .../cache/DistributedCacheTestCase.java         |    45 +-
 .../internal/cache/EventTrackerDUnitTest.java   |    34 +-
 .../cache/EvictionDUnitDisabledTest.java        |   240 -
 .../internal/cache/EvictionDUnitTest.java       |   247 +
 .../cache/EvictionObjectSizerDUnitTest.java     |    25 +-
 .../internal/cache/EvictionStatsDUnitTest.java  |    27 +-
 .../internal/cache/EvictionTestBase.java        |    48 +-
 .../cache/FixedPRSinglehopDUnitTest.java        |    36 +-
 .../internal/cache/GIIDeltaDUnitTest.java       |    87 +-
 .../internal/cache/GIIFlowControlDUnitTest.java |    47 +-
 .../internal/cache/GridAdvisorDUnitTest.java    |    58 +-
 .../internal/cache/HABug36773DUnitTest.java     |    32 +-
 .../HAOverflowMemObjectSizerDUnitTest.java      |    21 +-
 .../cache/IncrementalBackupDUnitTest.java       |    26 +-
 .../cache/InterruptClientServerDUnitTest.java   |    18 +-
 .../internal/cache/InterruptsDUnitTest.java     |    15 +-
 .../internal/cache/IteratorDUnitTest.java       |     7 +-
 ...victionAlgoMemoryEnabledRegionJUnitTest.java |   119 +
 .../internal/cache/MapClearGIIDUnitTest.java    |    38 +-
 .../internal/cache/MapInterface2JUnitTest.java  |     7 +-
 .../cache/NetSearchMessagingDUnitTest.java      |    24 +-
 .../cache/OffHeapEvictionDUnitTest.java         |    35 +-
 .../cache/OffHeapEvictionStatsDUnitTest.java    |    26 +-
 .../gemfire/internal/cache/OffHeapTestUtil.java |     2 +-
 .../gemfire/internal/cache/OplogJUnitTest.java  |    26 +-
 .../cache/P2PDeltaPropagationDUnitTest.java     |    13 +-
 .../internal/cache/PRBadToDataDUnitTest.java    |    12 +-
 .../cache/PartitionListenerDUnitTest.java       |     9 +-
 .../cache/PartitionedRegionAPIDUnitTest.java    |    30 +-
 .../PartitionedRegionAsSubRegionDUnitTest.java  |     5 +-
 ...gionBucketCreationDistributionDUnitTest.java |   155 +-
 .../PartitionedRegionCacheCloseDUnitTest.java   |    24 +-
 ...rtitionedRegionCacheXMLExampleDUnitTest.java |     5 +-
 .../PartitionedRegionCreationDUnitTest.java     |    90 +-
 .../cache/PartitionedRegionDUnitTestCase.java   |    35 +-
 .../PartitionedRegionDataStoreJUnitTest.java    |    73 +
 ...rtitionedRegionDelayedRecoveryDUnitTest.java |    20 +-
 .../PartitionedRegionDestroyDUnitTest.java      |    32 +-
 .../PartitionedRegionEntryCountDUnitTest.java   |    12 +-
 .../PartitionedRegionEvictionDUnitTest.java     |    25 +-
 .../cache/PartitionedRegionHADUnitTest.java     |    37 +-
 ...onedRegionHAFailureAndRecoveryDUnitTest.java |    69 +-
 .../PartitionedRegionInvalidateDUnitTest.java   |     7 +-
 ...artitionedRegionLocalMaxMemoryDUnitTest.java |    12 +-
 ...nedRegionLocalMaxMemoryOffHeapDUnitTest.java |    14 +-
 .../PartitionedRegionMultipleDUnitTest.java     |    78 +-
 ...rtitionedRegionOffHeapEvictionDUnitTest.java |    14 +-
 .../cache/PartitionedRegionPRIDDUnitTest.java   |    29 +-
 .../cache/PartitionedRegionQueryDUnitTest.java  |    42 +-
 ...artitionedRegionRedundancyZoneDUnitTest.java |    12 +-
 ...tionedRegionSerializableObjectJUnitTest.java |     4 +-
 .../PartitionedRegionSingleHopDUnitTest.java    |   180 +-
 ...RegionSingleHopWithServerGroupDUnitTest.java |   155 +-
 .../cache/PartitionedRegionSizeDUnitTest.java   |    31 +-
 .../cache/PartitionedRegionStatsDUnitTest.java  |    18 +-
 .../cache/PartitionedRegionTestHelper.java      |    17 -
 .../PartitionedRegionTestUtilsDUnitTest.java    |    33 +-
 .../PartitionedRegionWithSameNameDUnitTest.java |    66 +-
 .../internal/cache/PutAllDAckDUnitTest.java     |    39 +-
 .../internal/cache/PutAllGlobalDUnitTest.java   |    78 +-
 .../cache/RemoteTransactionDUnitTest.java       |    54 +-
 .../internal/cache/RemoveAllDAckDUnitTest.java  |    33 +-
 .../internal/cache/RemoveDAckDUnitTest.java     |    27 +-
 .../internal/cache/RemoveGlobalDUnitTest.java   |    43 +-
 .../cache/SimpleDiskRegionJUnitTest.java        |    13 +-
 .../internal/cache/SingleHopStatsDUnitTest.java |   549 +
 .../internal/cache/SizingFlagDUnitTest.java     |    12 +-
 .../internal/cache/SystemFailureDUnitTest.java  |    29 +-
 .../cache/TXReservationMgrJUnitTest.java        |     5 +-
 .../cache/TransactionsWithDeltaDUnitTest.java   |    18 +-
 .../control/RebalanceOperationDUnitTest.java    |    59 +-
 ...egionOverflowAsyncRollingOpLogJUnitTest.java |     7 +-
 ...RegionOverflowSyncRollingOpLogJUnitTest.java |     7 +-
 .../DiskRegionPerfJUnitPerformanceTest.java     |     6 +-
 ...ltiThreadedOplogPerJUnitPerformanceTest.java |     5 +-
 .../cache/execute/Bug51193DUnitTest.java        |    13 +-
 .../ClientServerFunctionExecutionDUnitTest.java |    90 +-
 .../execute/ColocationFailoverDUnitTest.java    |    36 +-
 ...ributedRegionFunctionExecutionDUnitTest.java |    87 +-
 .../FunctionExecution_ExceptionDUnitTest.java   |     9 +-
 .../execute/FunctionServiceStatsDUnitTest.java  |    66 +-
 .../cache/execute/LocalDataSetDUnitTest.java    |    25 +-
 .../execute/LocalDataSetIndexingDUnitTest.java  |     7 +-
 .../LocalFunctionExecutionDUnitTest.java        |    28 +-
 .../MemberFunctionExecutionDUnitTest.java       |    50 +-
 .../MultiRegionFunctionExecutionDUnitTest.java  |    16 +-
 .../OnGroupsFunctionExecutionDUnitTest.java     |    87 +-
 ...ntServerFunctionExecutionNoAckDUnitTest.java |    40 +-
 ...tServerRegionFunctionExecutionDUnitTest.java |    88 +-
 ...egionFunctionExecutionFailoverDUnitTest.java |    79 +-
 ...onFunctionExecutionNoSingleHopDUnitTest.java |    87 +-
 ...onExecutionSelectorNoSingleHopDUnitTest.java |   103 +-
 ...gionFunctionExecutionSingleHopDUnitTest.java |    97 +-
 .../cache/execute/PRClientServerTestBase.java   |    66 +-
 .../cache/execute/PRColocationDUnitTest.java    |   138 +-
 .../execute/PRCustomPartitioningDUnitTest.java  |    21 +-
 .../execute/PRFunctionExecutionDUnitTest.java   |    71 +-
 .../PRFunctionExecutionTimeOutDUnitTest.java    |    12 +-
 ...ctionExecutionWithResultSenderDUnitTest.java |    12 +-
 .../execute/PRPerformanceTestDUnitTest.java     |    19 +-
 .../cache/execute/PRTransactionDUnitTest.java   |    47 +-
 .../execute/SingleHopGetAllPutAllDUnitTest.java |    26 +-
 .../mock/AlterMockRegionExtensionFunction.java  |     2 +-
 .../extension/mock/MockCacheExtension.java      |     4 +-
 .../functions/DistributedRegionFunction.java    |     7 +-
 .../internal/cache/functions/TestFunction.java  |     9 +-
 .../ha/BlockingHARQAddOperationJUnitTest.java   |    10 +-
 .../cache/ha/BlockingHARegionJUnitTest.java     |    56 +-
 .../cache/ha/Bug36853EventsExpiryDUnitTest.java |    32 +-
 .../internal/cache/ha/Bug48571DUnitTest.java    |    20 +-
 .../internal/cache/ha/Bug48879DUnitTest.java    |    10 +-
 .../internal/cache/ha/ConflatableObject.java    |     2 +-
 .../cache/ha/EventIdOptimizationDUnitTest.java  |    28 +-
 .../cache/ha/EventIdOptimizationJUnitTest.java  |     8 +-
 .../internal/cache/ha/FailoverDUnitTest.java    |    32 +-
 .../internal/cache/ha/HABugInPutDUnitTest.java  |    18 +-
 .../internal/cache/ha/HAClearDUnitTest.java     |    29 +-
 .../cache/ha/HAConflationDUnitTest.java         |    19 +-
 .../internal/cache/ha/HADuplicateDUnitTest.java |    21 +-
 .../cache/ha/HAEventIdPropagationDUnitTest.java |    33 +-
 .../internal/cache/ha/HAExpiryDUnitTest.java    |    26 +-
 .../internal/cache/ha/HAGIIBugDUnitTest.java    |    40 +-
 .../internal/cache/ha/HAGIIDUnitTest.java       |    52 +-
 .../cache/ha/HARQAddOperationJUnitTest.java     |    25 +-
 .../cache/ha/HARQueueNewImplDUnitTest.java      |   115 +-
 .../internal/cache/ha/HARegionDUnitTest.java    |    22 +-
 .../internal/cache/ha/HARegionJUnitTest.java    |     8 +-
 .../cache/ha/HARegionQueueDUnitTest.java        |    46 +-
 .../cache/ha/HARegionQueueJUnitTest.java        |    29 +-
 ...HARegionQueueStartStopJUnitDisabledTest.java |   123 -
 .../cache/ha/HASlowReceiverDUnitTest.java       |    28 +-
 .../ha/OperationsPropagationDUnitTest.java      |    47 +-
 .../internal/cache/ha/PutAllDUnitTest.java      |    22 +-
 .../cache/ha/StatsBugDUnitDisabledTest.java     |   368 -
 .../internal/cache/ha/StatsBugDUnitTest.java    |   375 +
 .../cache/locks/TXLockServiceDUnitTest.java     |    78 +-
 .../cache/partitioned/Bug39356DUnitTest.java    |    14 +-
 .../cache/partitioned/Bug43684DUnitTest.java    |    16 +-
 .../cache/partitioned/Bug47388DUnitTest.java    |    16 +-
 .../cache/partitioned/Bug51400DUnitTest.java    |    18 +-
 .../partitioned/ElidedPutAllDUnitTest.java      |    12 +-
 .../partitioned/PartitionResolverDUnitTest.java |    12 +-
 .../PartitionedRegionLoadModelJUnitTest.java    |     6 +-
 .../PartitionedRegionLoaderWriterDUnitTest.java |    14 +-
 ...rtitionedRegionMetaDataCleanupDUnitTest.java |    26 +-
 .../partitioned/PersistPRKRFDUnitTest.java      |    29 +-
 ...tentColocatedPartitionedRegionDUnitTest.java |    56 +-
 .../PersistentPartitionedRegionDUnitTest.java   |    83 +-
 .../PersistentPartitionedRegionTestBase.java    |    38 +-
 ...rtitionedRegionWithTransactionDUnitTest.java |    34 +-
 .../cache/partitioned/ShutdownAllDUnitTest.java |    36 +-
 ...treamingPartitionOperationManyDUnitTest.java |    29 +-
 ...StreamingPartitionOperationOneDUnitTest.java |    37 +-
 .../fixed/FixedPartitioningDUnitTest.java       |    71 +-
 .../fixed/FixedPartitioningTestBase.java        |   188 +-
 ...ngWithColocationAndPersistenceDUnitTest.java |   161 +-
 .../PersistentRVVRecoveryDUnitTest.java         |    39 +-
 .../PersistentRecoveryOrderDUnitTest.java       |   130 +-
 ...rsistentRecoveryOrderOldConfigDUnitTest.java |     7 +-
 .../PersistentReplicatedTestBase.java           |    21 +-
 .../RegionEntryFactoryBuilderJUnitTest.java     |    85 +
 .../internal/cache/tier/Bug40396DUnitTest.java  |    16 +-
 ...mpatibilityHigherVersionClientDUnitTest.java |    16 +-
 .../cache/tier/sockets/Bug36269DUnitTest.java   |    31 +-
 .../cache/tier/sockets/Bug36457DUnitTest.java   |    23 +-
 .../cache/tier/sockets/Bug36805DUnitTest.java   |    21 +-
 .../cache/tier/sockets/Bug36829DUnitTest.java   |    20 +-
 .../cache/tier/sockets/Bug36995DUnitTest.java   |    26 +-
 .../cache/tier/sockets/Bug37210DUnitTest.java   |    31 +-
 .../cache/tier/sockets/Bug37805DUnitTest.java   |    13 +-
 .../CacheServerMaxConnectionsJUnitTest.java     |    12 +-
 .../cache/tier/sockets/CacheServerTestUtil.java |    39 +-
 .../CacheServerTransactionsDUnitTest.java       |   122 +-
 .../tier/sockets/ClearPropagationDUnitTest.java |    37 +-
 .../tier/sockets/ClientConflationDUnitTest.java |    38 +-
 .../sockets/ClientHealthMonitorJUnitTest.java   |    12 +-
 .../sockets/ClientInterestNotifyDUnitTest.java  |    36 +-
 .../tier/sockets/ClientServerMiscDUnitTest.java |   120 +-
 .../cache/tier/sockets/ConflationDUnitTest.java |    72 +-
 .../tier/sockets/ConnectionProxyJUnitTest.java  |    15 +-
 .../DataSerializerPropogationDUnitTest.java     |   125 +-
 .../DestroyEntryPropagationDUnitTest.java       |    53 +-
 .../sockets/DurableClientBug39997DUnitTest.java |    17 +-
 .../DurableClientQueueSizeDUnitTest.java        |    18 +-
 .../DurableClientReconnectAutoDUnitTest.java    |     9 +-
 .../DurableClientReconnectDUnitTest.java        |   100 +-
 .../sockets/DurableClientStatsDUnitTest.java    |    50 +-
 .../sockets/DurableRegistrationDUnitTest.java   |    67 +-
 .../sockets/DurableResponseMatrixDUnitTest.java |    37 +-
 .../sockets/EventIDVerificationDUnitTest.java   |    28 +-
 .../EventIDVerificationInP2PDUnitTest.java      |    23 +-
 .../FilterProfileIntegrationJUnitTest.java      |   110 +
 .../tier/sockets/FilterProfileJUnitTest.java    |   422 +-
 .../ForceInvalidateEvictionDUnitTest.java       |    19 +-
 ...ForceInvalidateOffHeapEvictionDUnitTest.java |    14 +-
 .../cache/tier/sockets/HABug36738DUnitTest.java |    23 +-
 .../cache/tier/sockets/HAInterestBaseTest.java  |  1015 -
 .../tier/sockets/HAInterestPart1DUnitTest.java  |   126 +-
 .../tier/sockets/HAInterestPart2DUnitTest.java  |   150 +-
 .../cache/tier/sockets/HAInterestTestCase.java  |  1021 +
 .../sockets/HAStartupAndFailoverDUnitTest.java  |    68 +-
 .../InstantiatorPropagationDUnitTest.java       |  1775 ++
 .../tier/sockets/InterestListDUnitTest.java     |   322 +-
 .../sockets/InterestListEndpointDUnitTest.java  |    36 +-
 .../sockets/InterestListFailoverDUnitTest.java  |    30 +-
 .../sockets/InterestListRecoveryDUnitTest.java  |    51 +-
 .../sockets/InterestRegrListenerDUnitTest.java  |    58 +-
 .../sockets/InterestResultPolicyDUnitTest.java  |    33 +-
 .../sockets/NewRegionAttributesDUnitTest.java   |    16 +-
 .../sockets/RedundancyLevelPart1DUnitTest.java  |    66 +-
 .../sockets/RedundancyLevelPart2DUnitTest.java  |    60 +-
 .../sockets/RedundancyLevelPart3DUnitTest.java  |    19 +-
 .../tier/sockets/RedundancyLevelTestBase.java   |    62 +-
 .../tier/sockets/RegionCloseDUnitTest.java      |    29 +-
 ...erInterestBeforeRegionCreationDUnitTest.java |    21 +-
 .../sockets/RegisterInterestKeysDUnitTest.java  |    29 +-
 .../sockets/ReliableMessagingDUnitTest.java     |    44 +-
 .../internal/cache/tier/sockets/TestFilter.java |    58 +
 .../sockets/UnregisterInterestDUnitTest.java    |    22 +-
 .../sockets/UpdatePropagationDUnitTest.java     |    60 +-
 .../VerifyEventIDGenerationInP2PDUnitTest.java  |    17 +-
 ...UpdatesFromNonInterestEndPointDUnitTest.java |    28 +-
 .../tier/sockets/command/CommitCommandTest.java |     8 +-
 .../versions/RegionVersionHolder2JUnitTest.java |   178 +
 .../versions/RegionVersionVectorJUnitTest.java  |     5 +-
 .../cache/wan/AsyncEventQueueTestBase.java      |  1671 ++
 .../cache/wan/CompressionConstants.java         |    37 -
 .../cache/wan/CompressionInputStream.java       |   147 -
 .../cache/wan/CompressionOutputStream.java      |   123 -
 .../asyncqueue/AsyncEventListenerDUnitTest.java |  1922 ++
 .../AsyncEventListenerOffHeapDUnitTest.java     |    33 +
 .../AsyncEventQueueStatsDUnitTest.java          |   320 +
 .../ConcurrentAsyncEventQueueDUnitTest.java     |   336 +
 ...ncurrentAsyncEventQueueOffHeapDUnitTest.java |    32 +
 .../CommonParallelAsyncEventQueueDUnitTest.java |    61 +
 ...ParallelAsyncEventQueueOffHeapDUnitTest.java |    32 +
 .../ParallelGatewaySenderQueueJUnitTest.java    |    87 +
 ...ialGatewaySenderEventProcessorJUnitTest.java |    42 +
 .../CompressionCacheConfigDUnitTest.java        |    32 +-
 .../CompressionCacheListenerDUnitTest.java      |    18 +-
 ...ompressionCacheListenerOffHeapDUnitTest.java |    14 +-
 .../CompressionRegionConfigDUnitTest.java       |    33 +-
 .../CompressionRegionFactoryDUnitTest.java      |    14 +-
 .../CompressionRegionOperationsDUnitTest.java   |    34 +-
 ...ressionRegionOperationsOffHeapDUnitTest.java |    15 +-
 .../compression/CompressionStatsDUnitTest.java  |     9 +-
 .../compression/SnappyCompressorJUnitTest.java  |     2 +-
 .../internal/jta/dunit/CommitThread.java        |    28 +-
 .../internal/jta/dunit/ExceptionsDUnitTest.java |    56 +-
 .../jta/dunit/IdleTimeOutDUnitTest.java         |   101 +-
 .../jta/dunit/LoginTimeOutDUnitTest.java        |    25 +-
 .../jta/dunit/MaxPoolSizeDUnitTest.java         |    85 +-
 .../internal/jta/dunit/RollbackThread.java      |    28 +-
 .../jta/dunit/TransactionTimeOutDUnitTest.java  |    46 +-
 .../dunit/TxnManagerMultiThreadDUnitTest.java   |   127 +-
 .../internal/jta/dunit/TxnTimeOutDUnitTest.java |    73 +-
 .../jta/functional/TestXACacheLoader.java       |     3 -
 .../DistributedSystemLogFileJUnitTest.java      |    19 +-
 .../logging/LocatorLogFileJUnitTest.java        |     7 +-
 .../logging/LogWriterPerformanceTest.java       |     6 +
 .../logging/MergeLogFilesJUnitTest.java         |     5 +-
 .../log4j/Log4J2DisabledPerformanceTest.java    |     6 +
 .../logging/log4j/Log4J2PerformanceTest.java    |    16 +-
 .../LogWriterLoggerDisabledPerformanceTest.java |     6 +
 .../log4j/LogWriterLoggerPerformanceTest.java   |    16 +-
 .../offheap/AbstractStoredObjectTestBase.java   |   203 +
 .../offheap/ChunkWithHeapFormJUnitTest.java     |    64 +
 .../offheap/DataAsAddressJUnitTest.java         |   368 +
 .../internal/offheap/DataTypeJUnitTest.java     |   300 +-
 ...tingOutOfOffHeapMemoryListenerJUnitTest.java |   100 +
 .../offheap/GemFireChunkFactoryJUnitTest.java   |   129 +
 .../internal/offheap/GemFireChunkJUnitTest.java |   921 +
 .../offheap/GemFireChunkSliceJUnitTest.java     |    72 +
 .../offheap/LifecycleListenerJUnitTest.java     |   230 +
 .../offheap/MemoryChunkJUnitTestBase.java       |    61 +
 .../internal/offheap/MemoryChunkTestSuite.java  |    32 +
 .../offheap/MemoryInspectorImplJUnitTest.java   |   142 +
 .../offheap/OffHeapHelperJUnitTest.java         |   314 +
 .../internal/offheap/OffHeapRegionBase.java     |     5 +-
 .../OffHeapRegionEntryHelperJUnitTest.java      |   870 +
 .../offheap/OffHeapStorageJUnitTest.java        |   206 +
 .../offheap/OffHeapValidationJUnitTest.java     |    10 +-
 .../offheap/OutOfOffHeapMemoryDUnitTest.java    |   157 +-
 .../offheap/RefCountChangeInfoJUnitTest.java    |   207 +
 ...moryAllocatorFillPatternIntegrationTest.java |   246 +
 ...mpleMemoryAllocatorFillPatternJUnitTest.java |   378 +-
 .../offheap/SimpleMemoryAllocatorJUnitTest.java |    26 +-
 ...moryAllocatorLifecycleListenerJUnitTest.java |   147 -
 .../internal/offheap/StoredObjectTestSuite.java |    33 +
 .../offheap/SyncChunkStackJUnitTest.java        |   289 +
 .../offheap/UnsafeMemoryChunkJUnitTest.java     |    56 +
 ...leProcessControllerIntegrationJUnitTest.java |   155 +
 .../process/LocalProcessLauncherDUnitTest.java  |    11 +-
 .../internal/process/PidFileJUnitTest.java      |     2 +-
 .../statistics/StatisticsDUnitTest.java         |    30 +-
 .../statistics/ValueMonitorJUnitTest.java       |     6 +-
 .../internal/tcp/ConnectionJUnitTest.java       |     3 +-
 .../internal/util/ArrayUtilsJUnitTest.java      |     1 -
 .../gemfire/internal/util/SerializableImpl.java |     2 -
 .../util/SerializableImplWithValue.java         |     2 -
 .../management/CacheManagementDUnitTest.java    |    45 +-
 .../management/ClientHealthStatsDUnitTest.java  |    40 +-
 .../management/CompositeTypeTestDUnitTest.java  |    20 +-
 .../management/DLockManagementDUnitTest.java    |    29 +-
 .../management/DiskManagementDUnitTest.java     |    56 +-
 .../management/DistributedSystemDUnitTest.java  |    55 +-
 .../management/LocatorManagementDUnitTest.java  |    42 +-
 .../gemstone/gemfire/management/MBeanUtil.java  |    29 +-
 .../gemfire/management/ManagementTestBase.java  |    67 +-
 .../MemberMBeanAttributesDUnitTest.java         |    12 +-
 .../management/OffHeapManagementDUnitTest.java  |    15 +-
 .../gemfire/management/QueryDataDUnitTest.java  |    32 +-
 .../management/RegionManagementDUnitTest.java   |    76 +-
 ...ersalMembershipListenerAdapterDUnitTest.java |   166 +-
 .../stats/DistributedSystemStatsDUnitTest.java  |    14 +-
 .../bean/stats/GatewayMBeanBridgeJUnitTest.java |   108 +
 .../bean/stats/GatewaySenderStatsJUnitTest.java |   101 -
 .../internal/cli/CliUtilDUnitTest.java          |    48 +-
 .../internal/cli/GfshParserJUnitTest.java       |    52 +-
 .../management/internal/cli/HeadlessGfsh.java   |   374 +
 .../internal/cli/HeadlessGfshJUnitTest.java     |    86 +
 .../management/internal/cli/ResultHandler.java  |    23 +
 .../internal/cli/TableBuilderJUnitTest.java     |   314 +
 .../cli/commands/CliCommandTestBase.java        |   565 +
 .../cli/commands/ConfigCommandsDUnitTest.java   |   502 +
 ...eateAlterDestroyRegionCommandsDUnitTest.java |  1150 ++
 .../cli/commands/DeployCommandsDUnitTest.java   |   480 +
 .../commands/DiskStoreCommandsDUnitTest.java    |  1157 ++
 .../cli/commands/FunctionCommandsDUnitTest.java |   595 +
 .../commands/GemfireDataCommandsDUnitTest.java  |  2088 ++
 ...WithCacheLoaderDuringCacheMissDUnitTest.java |   374 +
 .../cli/commands/IndexCommandsDUnitTest.java    |   815 +
 ...stAndDescribeDiskStoreCommandsDUnitTest.java |   194 +
 .../ListAndDescribeRegionDUnitTest.java         |   321 +
 .../cli/commands/ListIndexCommandDUnitTest.java |   669 +
 .../cli/commands/MemberCommandsDUnitTest.java   |   288 +
 .../MiscellaneousCommandsDUnitTest.java         |   498 +
 ...laneousCommandsExportLogsPart1DUnitTest.java |   140 +
 ...laneousCommandsExportLogsPart2DUnitTest.java |   144 +
 ...laneousCommandsExportLogsPart3DUnitTest.java |   151 +
 ...laneousCommandsExportLogsPart4DUnitTest.java |   137 +
 .../cli/commands/QueueCommandsDUnitTest.java    |   392 +
 .../SharedConfigurationCommandsDUnitTest.java   |   341 +
 .../cli/commands/ShellCommandsDUnitTest.java    |   367 +
 .../cli/commands/ShowDeadlockDUnitTest.java     |   274 +
 .../cli/commands/ShowMetricsDUnitTest.java      |   345 +
 .../cli/commands/ShowStackTraceDUnitTest.java   |   150 +
 .../cli/commands/UserCommandsDUnitTest.java     |   164 +
 .../cli/shell/GfshHistoryJUnitTest.java         |    89 +
 .../SharedConfigurationDUnitTest.java           |    30 +-
 .../configuration/utils/XmlUtilsJUnitTest.java  |     8 +-
 .../internal/pulse/TestClientIdsDUnitTest.java  |    38 +-
 .../internal/pulse/TestFunctionsDUnitTest.java  |    15 +-
 .../internal/pulse/TestHeapDUnitTest.java       |    14 +-
 .../internal/pulse/TestLocatorsDUnitTest.java   |    11 +-
 .../pulse/TestSubscriptionsDUnitTest.java       |    34 +-
 ...rDistributedSystemMXBeanIntegrationTest.java |    50 +
 ...horizeOperationForMBeansIntegrationTest.java |   325 +
 ...erationForRegionCommandsIntegrationTest.java |   136 +
 .../internal/security/CLISecurityDUnitTest.java |     8 +-
 .../internal/security/CommandTestBase.java      |    19 +-
 .../internal/security/HeadlessGfsh.java         |   337 -
 .../security/HeadlessGfshJUnitTest.java         |    75 -
 .../security/IntegratedSecDUnitTest.java        |    75 +-
 ...tionCodesForDataCommandsIntegrationTest.java |   101 +
 ...tionCodesForDistributedSystemMXBeanTest.java |    76 +
 .../security/RESTAdminAPISecurityDUnitTest.java |     6 +-
 .../internal/security/ResultHandler.java        |     7 -
 .../WanCommandsControllerJUnitTest.java         |     4 +-
 .../ClientsWithVersioningRetryDUnitTest.java    |    46 +-
 .../pdx/DistributedSystemIdDUnitTest.java       |    12 +-
 .../pdx/JSONPdxClientServerDUnitTest.java       |    15 +-
 .../pdx/PDXAsyncEventQueueDUnitTest.java        |     7 +-
 .../gemfire/pdx/PdxClientServerDUnitTest.java   |    25 +-
 .../pdx/PdxDeserializationDUnitTest.java        |    18 +-
 .../gemfire/pdx/PdxSerializableDUnitTest.java   |     9 +-
 .../gemfire/pdx/PdxTypeExportDUnitTest.java     |     8 +-
 .../gemfire/pdx/VersionClassLoader.java         |    24 +-
 .../gemfire/redis/ConcurrentStartTest.java      |    34 +-
 .../gemfire/redis/RedisDistDUnitTest.java       |    25 +-
 .../web/controllers/RestAPITestBase.java        |    17 +-
 .../security/ClientAuthenticationDUnitTest.java |   967 +
 .../ClientAuthenticationPart2DUnitTest.java     |    88 +
 .../security/ClientAuthorizationDUnitTest.java  |   797 +
 .../security/ClientAuthorizationTestBase.java   |  1388 ++
 .../security/ClientMultiUserAuthzDUnitTest.java |   535 +
 .../DeltaClientAuthorizationDUnitTest.java      |   337 +
 .../DeltaClientPostAuthorizationDUnitTest.java  |   542 +
 .../security/P2PAuthenticationDUnitTest.java    |   623 +
 .../gemfire/security/SecurityTestUtil.java      |  1875 ++
 .../com/gemstone/gemfire/test/dunit/Assert.java |    66 +
 .../gemfire/test/dunit/AsyncInvocation.java     |   216 +
 .../gemstone/gemfire/test/dunit/DUnitEnv.java   |    78 +
 .../gemfire/test/dunit/DebuggerUtils.java       |    52 +
 .../gemfire/test/dunit/DistributedTestCase.java |   532 +
 .../test/dunit/DistributedTestUtils.java        |   167 +
 .../com/gemstone/gemfire/test/dunit/Host.java   |   213 +
 .../gemfire/test/dunit/IgnoredException.java    |   200 +
 .../com/gemstone/gemfire/test/dunit/Invoke.java |   160 +
 .../com/gemstone/gemfire/test/dunit/Jitter.java |    87 +
 .../gemfire/test/dunit/LogWriterUtils.java      |   111 +
 .../gemfire/test/dunit/NetworkUtils.java        |    69 +
 .../gemfire/test/dunit/RMIException.java        |   170 +
 .../gemfire/test/dunit/RepeatableRunnable.java  |    31 +
 .../test/dunit/SerializableCallable.java        |    70 +
 .../test/dunit/SerializableCallableIF.java      |    26 +
 .../test/dunit/SerializableRunnable.java        |    91 +
 .../test/dunit/SerializableRunnableIF.java      |    25 +
 .../test/dunit/StoppableWaitCriterion.java      |    35 +
 .../gemfire/test/dunit/ThreadUtils.java         |   155 +
 .../com/gemstone/gemfire/test/dunit/VM.java     |  1357 ++
 .../com/gemstone/gemfire/test/dunit/Wait.java   |   204 +
 .../gemfire/test/dunit/WaitCriterion.java       |    33 +
 .../dunit/rules/DistributedDisconnectRule.java  |   121 +
 .../rules/DistributedExternalResource.java      |    58 +
 .../DistributedRestoreSystemProperties.java     |    74 +
 .../gemfire/test/dunit/rules/RemoteInvoker.java |    39 +
 .../test/dunit/standalone/BounceResult.java     |    36 +
 .../gemfire/test/dunit/standalone/ChildVM.java  |    81 +
 .../test/dunit/standalone/DUnitLauncher.java    |   464 +
 .../test/dunit/standalone/ProcessManager.java   |   259 +
 .../test/dunit/standalone/RemoteDUnitVM.java    |   143 +
 .../test/dunit/standalone/RemoteDUnitVMIF.java  |    36 +
 .../dunit/standalone/StandAloneDUnitEnv.java    |    74 +
 .../test/dunit/tests/BasicDUnitTest.java        |   158 +
 .../tests/GetDefaultDiskStoreNameDUnitTest.java |    67 +
 .../dunit/tests/GetTestMethodNameDUnitTest.java |    54 +
 .../gemfire/test/dunit/tests/VMDUnitTest.java   |   241 +
 .../test/process/ProcessOutputReader.java       |   101 +-
 .../test/process/ProcessStreamReader.java       |    42 +-
 .../gemfire/test/process/ProcessWrapper.java    |    10 +-
 .../gemstone/gemfire/util/JSR166TestCase.java   |    17 +-
 .../gemstone/persistence/logging/Logger.java    |     2 +-
 .../gemfire/GemfireSequenceDisplay.java         |     7 -
 .../src/test/java/dunit/AsyncInvocation.java    |   217 -
 .../src/test/java/dunit/BounceResult.java       |    36 -
 gemfire-core/src/test/java/dunit/DUnitEnv.java  |    78 -
 .../test/java/dunit/DistributedTestCase.java    |  1436 --
 gemfire-core/src/test/java/dunit/Host.java      |   208 -
 .../src/test/java/dunit/RMIException.java       |   170 -
 .../src/test/java/dunit/RemoteDUnitVMIF.java    |    34 -
 .../src/test/java/dunit/RepeatableRunnable.java |    29 -
 .../test/java/dunit/SerializableCallable.java   |    70 -
 .../test/java/dunit/SerializableRunnable.java   |    92 -
 gemfire-core/src/test/java/dunit/VM.java        |  1344 --
 .../src/test/java/dunit/standalone/ChildVM.java |    73 -
 .../java/dunit/standalone/DUnitLauncher.java    |   439 -
 .../java/dunit/standalone/ProcessManager.java   |   249 -
 .../java/dunit/standalone/RemoteDUnitVM.java    |   145 -
 .../dunit/standalone/StandAloneDUnitEnv.java    |    75 -
 .../test/java/dunit/tests/BasicDUnitTest.java   |   132 -
 .../src/test/java/dunit/tests/TestFailure.java  |    50 -
 .../src/test/java/dunit/tests/VMDUnitTest.java  |   237 -
 gemfire-core/src/test/java/hydra/GsRandom.java  |     1 -
 .../src/test/java/hydra/MethExecutor.java       |     3 +-
 .../src/test/java/hydra/log/AnyLogWriter.java   |     4 +-
 .../java/security/AuthzCredentialGenerator.java |   462 +
 .../test/java/security/CredentialGenerator.java |   343 +
 .../security/DummyAuthzCredentialGenerator.java |   145 +
 .../java/security/DummyCredentialGenerator.java |    94 +
 .../security/LdapUserCredentialGenerator.java   |   160 +
 .../java/security/PKCSCredentialGenerator.java  |   112 +
 .../java/security/SSLCredentialGenerator.java   |   117 +
 .../UserPasswordWithExtraPropsAuthInit.java     |    77 +
 .../security/XmlAuthzCredentialGenerator.java   |   264 +
 .../ClientCacheFactoryJUnitTest_single_pool.xml |    17 +
 .../gemfire/cache/query/dunit/IndexCreation.xml |    17 +
 .../functional/index-creation-with-eviction.xml |    17 +
 .../index-creation-without-eviction.xml         |    17 +
 .../functional/index-recovery-overflow.xml      |    17 +
 .../query/internal/index/cachequeryindex.xml    |    17 +
 .../internal/index/cachequeryindexwitherror.xml |    17 +
 .../cache/query/partitioned/PRIndexCreation.xml |    17 +
 .../gemfire/cache30/attributesUnordered.xml     |    17 +
 .../com/gemstone/gemfire/cache30/badFloat.xml   |    17 +
 .../com/gemstone/gemfire/cache30/badInt.xml     |    17 +
 .../gemfire/cache30/badKeyConstraintClass.xml   |    17 +
 .../com/gemstone/gemfire/cache30/badScope.xml   |    17 +
 .../com/gemstone/gemfire/cache30/bug44710.xml   |    17 +
 .../gemfire/cache30/callbackNotDeclarable.xml   |    17 +
 .../gemfire/cache30/callbackWithException.xml   |    17 +
 .../com/gemstone/gemfire/cache30/coLocation.xml |    17 +
 .../gemstone/gemfire/cache30/coLocation3.xml    |    17 +
 .../com/gemstone/gemfire/cache30/ewtest.xml     |    17 +
 .../cache30/examples_3_0/example-cache.xml      |    16 +
 .../cache30/examples_4_0/example-cache.xml      |    16 +
 .../gemfire/cache30/loaderNotLoader.xml         |    17 +
 .../com/gemstone/gemfire/cache30/malformed.xml  |    17 +
 .../gemfire/cache30/namedAttributes.xml         |    17 +
 .../gemfire/cache30/partitionedRegion.xml       |    17 +
 .../gemfire/cache30/partitionedRegion51.xml     |    17 +
 .../gemstone/gemfire/cache30/sameRootRegion.xml |    17 +
 .../gemstone/gemfire/cache30/sameSubregion.xml  |    17 +
 .../gemfire/cache30/unknownNamedAttributes.xml  |    17 +
 .../sanctionedDataSerializables.txt             |   240 +-
 .../codeAnalysis/sanctionedSerializables.txt    |     4 +-
 .../internal/SharedConfigurationJUnitTest.xml   |    17 +
 .../internal/cache/BackupJUnitTest.cache.xml    |    17 +
 .../internal/cache/DiskRegCacheXmlJUnitTest.xml |    16 +
 .../cache/PartitionRegionCacheExample1.xml      |    17 +
 .../cache/PartitionRegionCacheExample2.xml      |    17 +
 .../incorrect_bytes_threshold.xml               |    17 +
 .../faultyDiskXMLsForTesting/incorrect_dir.xml  |    17 +
 .../incorrect_dir_size.xml                      |    17 +
 .../incorrect_max_oplog_size.xml                |    17 +
 .../incorrect_roll_oplogs_value.xml             |    17 +
 .../incorrect_sync_value.xml                    |    17 +
 .../incorrect_time_interval.xml                 |    17 +
 .../mixed_diskstore_diskdir.xml                 |    17 +
 .../mixed_diskstore_diskwriteattrs.xml          |    17 +
 .../tier/sockets/RedundancyLevelJUnitTest.xml   |    16 +
 ...testDTDFallbackWithNonEnglishLocal.cache.xml |    17 +
 .../gemstone/gemfire/internal/jta/cachejta.xml  |    17 +
 .../src/test/resources/jta/cachejta.xml         |    17 +
 .../src/test/resources/lib/authz-dummy.xml      |   126 +
 .../src/test/resources/lib/authz-ldap.xml       |    85 +
 .../resources/lib/authz-multiUser-dummy.xml     |   106 +
 .../test/resources/lib/authz-multiUser-ldap.xml |    83 +
 .../test/resources/lib/keys/gemfire1.keystore   |   Bin 0 -> 1536 bytes
 .../test/resources/lib/keys/gemfire10.keystore  |   Bin 0 -> 1546 bytes
 .../test/resources/lib/keys/gemfire11.keystore  |   Bin 0 -> 1546 bytes
 .../test/resources/lib/keys/gemfire2.keystore   |   Bin 0 -> 1536 bytes
 .../test/resources/lib/keys/gemfire3.keystore   |   Bin 0 -> 1536 bytes
 .../test/resources/lib/keys/gemfire4.keystore   |   Bin 0 -> 1536 bytes
 .../test/resources/lib/keys/gemfire5.keystore   |   Bin 0 -> 1536 bytes
 .../test/resources/lib/keys/gemfire6.keystore   |   Bin 0 -> 1536 bytes
 .../test/resources/lib/keys/gemfire7.keystore   |   Bin 0 -> 1536 bytes
 .../test/resources/lib/keys/gemfire8.keystore   |   Bin 0 -> 1536 bytes
 .../test/resources/lib/keys/gemfire9.keystore   |   Bin 0 -> 1536 bytes
 .../resources/lib/keys/ibm/gemfire1.keystore    |   Bin 0 -> 1426 bytes
 .../resources/lib/keys/ibm/gemfire10.keystore   |   Bin 0 -> 1434 bytes
 .../resources/lib/keys/ibm/gemfire11.keystore   |   Bin 0 -> 1434 bytes
 .../resources/lib/keys/ibm/gemfire2.keystore    |   Bin 0 -> 1434 bytes
 .../resources/lib/keys/ibm/gemfire3.keystore    |   Bin 0 -> 1426 bytes
 .../resources/lib/keys/ibm/gemfire4.keystore    |   Bin 0 -> 1434 bytes
 .../resources/lib/keys/ibm/gemfire5.keystore    |   Bin 0 -> 1434 bytes
 .../resources/lib/keys/ibm/gemfire6.keystore    |   Bin 0 -> 1434 bytes
 .../resources/lib/keys/ibm/gemfire7.keystore    |   Bin 0 -> 1426 bytes
 .../resources/lib/keys/ibm/gemfire8.keystore    |   Bin 0 -> 1434 bytes
 .../resources/lib/keys/ibm/gemfire9.keystore    |   Bin 0 -> 1426 bytes
 .../test/resources/lib/keys/ibm/publickeyfile   |   Bin 0 -> 4535 bytes
 .../src/test/resources/lib/keys/publickeyfile   |   Bin 0 -> 4535 bytes
 .../resources/spring/spring-gemfire-context.xml |    17 +
 .../src/test/resources/ssl/untrusted.keystore   |   Bin 0 -> 1181 bytes
 gemfire-cq/build.gradle                         |    23 +
 .../cache/client/internal/CloseCQOp.java        |    72 +
 .../cache/client/internal/CreateCQOp.java       |   163 +
 .../cache/client/internal/CreateCQWithIROp.java |    92 +
 .../cache/client/internal/GetDurableCQsOp.java  |   135 +
 .../client/internal/ServerCQProxyImpl.java      |   111 +
 .../gemfire/cache/client/internal/StopCQOp.java |    72 +
 .../cache/query/internal/cq/ClientCQImpl.java   |   615 +
 .../internal/cq/CqAttributesMutatorImpl.java    |    68 +
 .../cache/query/internal/cq/CqConflatable.java  |   223 +
 .../cache/query/internal/cq/CqEventImpl.java    |   162 +
 .../cache/query/internal/cq/CqListenerImpl.java |    56 +
 .../cache/query/internal/cq/CqQueryImpl.java    |   383 +
 .../query/internal/cq/CqServiceFactoryImpl.java |    69 +
 .../cache/query/internal/cq/CqServiceImpl.java  |  2087 ++
 .../internal/cq/CqServiceStatisticsImpl.java    |   100 +
 .../query/internal/cq/CqServiceVsdStats.java    |   411 +
 .../query/internal/cq/CqStatisticsImpl.java     |    75 +
 .../cache/query/internal/cq/ServerCQImpl.java   |   655 +
 .../tier/sockets/command/BaseCQCommand.java     |    59 +
 .../cache/tier/sockets/command/CloseCQ.java     |   131 +
 .../cache/tier/sockets/command/ExecuteCQ.java   |   168 +
 .../cache/tier/sockets/command/ExecuteCQ61.java |   220 +
 .../cache/tier/sockets/command/GetCQStats.java  |   100 +
 .../tier/sockets/command/GetDurableCQs.java     |   143 +
 .../cache/tier/sockets/command/MonitorCQ.java   |   100 +
 .../cache/tier/sockets/command/StopCQ.java      |   135 +
 ...cache.query.internal.cq.spi.CqServiceFactory |    15 +
 .../gemfire/cache/query/cq/CQJUnitTest.java     |   150 +
 .../cache/query/cq/dunit/CqDataDUnitTest.java   |  1168 ++
 .../dunit/CqDataOptimizedExecuteDUnitTest.java  |    55 +
 .../cq/dunit/CqDataUsingPoolDUnitTest.java      |  1574 ++
 ...qDataUsingPoolOptimizedExecuteDUnitTest.java |    54 +
 .../cache/query/cq/dunit/CqPerfDUnitTest.java   |  1049 +
 .../cq/dunit/CqPerfUsingPoolDUnitTest.java      |  1009 +
 .../cache/query/cq/dunit/CqQueryDUnitTest.java  |  4009 ++++
 .../dunit/CqQueryOptimizedExecuteDUnitTest.java |   314 +
 .../cq/dunit/CqQueryUsingPoolDUnitTest.java     |  3328 ++++
 ...QueryUsingPoolOptimizedExecuteDUnitTest.java |    50 +
 .../cq/dunit/CqResultSetUsingPoolDUnitTest.java |  1144 ++
 ...ltSetUsingPoolOptimizedExecuteDUnitTest.java |   235 +
 .../cache/query/cq/dunit/CqStateDUnitTest.java  |   138 +
 .../cache/query/cq/dunit/CqStatsDUnitTest.java  |   445 +
 .../dunit/CqStatsOptimizedExecuteDUnitTest.java |    50 +
 .../cq/dunit/CqStatsUsingPoolDUnitTest.java     |   456 +
 ...StatsUsingPoolOptimizedExecuteDUnitTest.java |    50 +
 .../query/cq/dunit/CqTimeTestListener.java      |   266 +
 .../PartitionedRegionCqQueryDUnitTest.java      |  1792 ++
 ...dRegionCqQueryOptimizedExecuteDUnitTest.java |   248 +
 .../query/cq/dunit/PrCqUsingPoolDUnitTest.java  |  2033 ++
 .../PrCqUsingPoolOptimizedExecuteDUnitTest.java |    50 +
 .../cache/query/dunit/PdxQueryCQDUnitTest.java  |   706 +
 .../cache/query/dunit/PdxQueryCQTestBase.java   |   496 +
 .../dunit/QueryIndexUpdateRIDUnitTest.java      |   823 +
 .../query/dunit/QueryMonitorDUnitTest.java      |  1300 ++
 .../cache/snapshot/ClientSnapshotDUnitTest.java |   286 +
 .../AnalyzeCQSerializablesJUnitTest.java        |    79 +
 .../cache/PRDeltaPropagationDUnitTest.java      |  1215 ++
 .../internal/cache/PutAllCSDUnitTest.java       |  4426 +++++
 .../cache/RemoteCQTransactionDUnitTest.java     |  1119 ++
 .../internal/cache/ha/CQListGIIDUnitTest.java   |   821 +
 .../cache/ha/HADispatcherDUnitTest.java         |   696 +
 .../sockets/ClientToServerDeltaDUnitTest.java   |  1040 +
 .../DeltaPropagationWithCQDUnitTest.java        |   344 +
 ...ToRegionRelationCQRegistrationDUnitTest.java |   784 +
 .../sockets/DurableClientCrashDUnitTest.java    |    99 +
 .../sockets/DurableClientNetDownDUnitTest.java  |    79 +
 .../sockets/DurableClientSimpleDUnitTest.java   |  3409 ++++
 .../tier/sockets/DurableClientTestCase.java     |  2100 ++
 .../CacheServerManagementDUnitTest.java         |   575 +
 .../cli/commands/ClientCommandsDUnitTest.java   |  1446 ++
 .../DurableClientCommandsDUnitTest.java         |   436 +
 .../internal/pulse/TestCQDUnitTest.java         |   147 +
 .../internal/pulse/TestClientsDUnitTest.java    |   108 +
 .../internal/pulse/TestServerDUnitTest.java     |    98 +
 .../ClientAuthorizationTwoDUnitTest.java        |   243 +
 .../security/ClientAuthzObjectModDUnitTest.java |   417 +
 .../ClientCQPostAuthorizationDUnitTest.java     |   528 +
 .../ClientPostAuthorizationDUnitTest.java       |   397 +
 .../gemfire/security/MultiuserAPIDUnitTest.java |   392 +
 .../MultiuserDurableCQAuthzDUnitTest.java       |   491 +
 .../gemfire/codeAnalysis/excludedClasses.txt    |     2 +
 .../gemstone/gemfire/codeAnalysis/openBugs.txt  |    21 +
 .../sanctionedDataSerializables.txt             |     4 +
 .../codeAnalysis/sanctionedSerializables.txt    |     1 +
 .../tier/sockets/durablecq-client-cache.xml     |    37 +
 .../tier/sockets/durablecq-server-cache.xml     |    32 +
 .../joptsimple/internal/AbbreviationMap.java    |     1 -
 .../src/main/java/org/json/JSONObject.java      |     3 -
 gemfire-junit/build.gradle                      |    21 +
 .../gemfire/test/junit/ConditionalIgnore.java   |    49 +
 .../gemfire/test/junit/IgnoreCondition.java     |    32 +
 .../gemfire/test/junit/IgnoreUntil.java         |    49 +
 .../com/gemstone/gemfire/test/junit/Repeat.java |    43 +
 .../com/gemstone/gemfire/test/junit/Retry.java  |    38 +
 .../test/junit/categories/ContainerTest.java    |    25 +
 .../test/junit/categories/DistributedTest.java  |    25 +
 .../categories/DistributedTransactionsTest.java |    26 +
 .../test/junit/categories/HydraTest.java        |    24 +
 .../test/junit/categories/IntegrationTest.java  |    25 +
 .../test/junit/categories/PerformanceTest.java  |    25 +
 .../gemfire/test/junit/categories/UITest.java   |    24 +
 .../gemfire/test/junit/categories/UnitTest.java |    25 +
 .../gemfire/test/junit/categories/WanTest.java  |    24 +
 .../test/junit/rules/ConditionalIgnoreRule.java |   123 +
 .../test/junit/rules/ExpectedTimeout.java       |   180 +
 .../test/junit/rules/ExpectedTimeoutRule.java   |   180 +
 .../test/junit/rules/IgnoreUntilRule.java       |   123 +
 .../gemfire/test/junit/rules/RepeatRule.java    |    81 +
 .../gemfire/test/junit/rules/RetryRule.java     |   181 +
 .../rules/SerializableExternalResource.java     |   107 +
 .../test/junit/rules/SerializableRuleChain.java |   119 +
 .../rules/SerializableTemporaryFolder.java      |    70 +
 .../test/junit/rules/SerializableTestName.java  |    54 +
 .../test/junit/rules/SerializableTestRule.java  |    33 +
 .../junit/rules/SerializableTestWatcher.java    |    29 +
 .../test/junit/rules/SerializableTimeout.java   |   119 +
 .../junit/support/DefaultIgnoreCondition.java   |    57 +
 .../IgnoreConditionEvaluationException.java     |    43 +
 .../test/junit/categories/DistributedTest.java  |    25 -
 .../categories/DistributedTransactionsTest.java |    26 -
 .../test/junit/categories/IntegrationTest.java  |    25 -
 .../test/junit/categories/PerformanceTest.java  |    25 -
 .../gemfire/test/junit/categories/UnitTest.java |    25 -
 .../gemfire/test/junit/categories/WanTest.java  |    24 -
 .../test/junit/rules/ExpectedTimeout.java       |   180 -
 .../examples/RepeatingTestCasesExampleTest.java |    94 +
 .../rules/examples/RetryRuleExampleTest.java    |    43 +
 .../rules/tests/ExpectedTimeoutRuleTest.java    |   214 +
 .../junit/rules/tests/IgnoreUntilRuleTest.java  |   121 +
 .../junit/rules/tests/JUnitRuleTestSuite.java   |    33 +
 .../test/junit/rules/tests/RepeatRuleTest.java  |   304 +
 .../tests/RetryRuleGlobalWithErrorTest.java     |   250 +
 .../tests/RetryRuleGlobalWithExceptionTest.java |   254 +
 .../tests/RetryRuleLocalWithErrorTest.java      |   207 +
 .../tests/RetryRuleLocalWithExceptionTest.java  |   213 +
 .../junit/rules/tests/RuleAndClassRuleTest.java |   138 +
 .../test/junit/rules/tests/TestRunner.java      |    37 +
 gemfire-lucene/build.gradle                     |    25 +-
 .../cache/lucene/LuceneQueryFactory.java        |    11 +-
 .../cache/lucene/LuceneQueryProvider.java       |     2 +-
 .../gemfire/cache/lucene/LuceneService.java     |     9 +-
 .../internal/repository/RepositoryManager.java  |     1 -
 .../lucene/internal/xml/LuceneXmlConstants.java |     2 +-
 .../geode.apache.org/lucene/lucene-1.0.xsd      |    57 +
 .../lucene/lucene-1.0.xsd                       |    58 -
 .../LuceneFunctionReadPathDUnitTest.java        |     7 +-
 ...IndexRepositoryImplJUnitPerformanceTest.java |   437 -
 .../IndexRepositoryImplPerformanceTest.java     |   439 +
 ...erIntegrationJUnitTest.createIndex.cache.xml |    23 +-
 ...serIntegrationJUnitTest.parseIndex.cache.xml |    23 +-
 gemfire-pulse/build.gradle                      |   116 +
 .../tools/pulse/internal/PulseAppListener.java  |   703 +
 .../controllers/ExceptionHandlingAdvice.java    |    52 +
 .../internal/controllers/PulseController.java   |   587 +
 .../tools/pulse/internal/data/Cluster.java      |  3808 ++++
 .../tools/pulse/internal/data/DataBrowser.java  |   281 +
 .../pulse/internal/data/IClusterUpdater.java    |    37 +
 .../pulse/internal/data/JMXDataUpdater.java     |  2449 +++
 .../pulse/internal/data/JmxManagerFinder.java   |   171 +
 .../tools/pulse/internal/data/PulseConfig.java  |   126 +
 .../pulse/internal/data/PulseConstants.java     |   416 +
 .../tools/pulse/internal/data/PulseVersion.java |   104 +
 .../tools/pulse/internal/data/Repository.java   |   215 +
 .../gemfire/tools/pulse/internal/json/CDL.java  |   274 +
 .../tools/pulse/internal/json/Cookie.java       |   164 +
 .../tools/pulse/internal/json/CookieList.java   |    85 +
 .../gemfire/tools/pulse/internal/json/HTTP.java |   158 +
 .../tools/pulse/internal/json/HTTPTokener.java  |    72 +
 .../tools/pulse/internal/json/JSONArray.java    |   901 +
 .../pulse/internal/json/JSONException.java      |    47 +
 .../tools/pulse/internal/json/JSONML.java       |   462 +
 .../tools/pulse/internal/json/JSONObject.java   |  1585 ++
 .../tools/pulse/internal/json/JSONString.java   |    37 +
 .../tools/pulse/internal/json/JSONStringer.java |    73 +
 .../tools/pulse/internal/json/JSONTokener.java  |   441 +
 .../tools/pulse/internal/json/JSONWriter.java   |   322 +
 .../gemfire/tools/pulse/internal/json/README    |    68 +
 .../gemfire/tools/pulse/internal/json/XML.java  |   503 +
 .../tools/pulse/internal/json/XMLTokener.java   |   360 +
 .../tools/pulse/internal/log/LogWriter.java     |   266 +
 .../pulse/internal/log/MessageFormatter.java    |   103 +
 .../pulse/internal/log/PulseLogWriter.java      |   306 +
 .../tools/pulse/internal/log/PulseLogger.java   |   144 +
 .../internal/service/ClusterDetailsService.java |   109 +
 .../service/ClusterDiskThroughputService.java   |    79 +
 .../service/ClusterGCPausesService.java         |    71 +
 .../service/ClusterKeyStatisticsService.java    |    79 +
 .../internal/service/ClusterMemberService.java  |   133 +
 .../service/ClusterMembersRGraphService.java    |   372 +
 .../service/ClusterMemoryUsageService.java      |    70 +
 .../internal/service/ClusterRegionService.java  |   241 +
 .../internal/service/ClusterRegionsService.java |   243 +
 .../service/ClusterSelectedRegionService.java   |   278 +
 .../ClusterSelectedRegionsMemberService.java    |   146 +
 .../internal/service/ClusterWANInfoService.java |    82 +
 .../service/MemberAsynchEventQueuesService.java |   107 +
 .../internal/service/MemberClientsService.java  |   123 +
 .../internal/service/MemberDetailsService.java  |   129 +
 .../service/MemberDiskThroughputService.java    |    93 +
 .../internal/service/MemberGCPausesService.java |    85 +
 .../service/MemberGatewayHubService.java        |   163 +
 .../service/MemberHeapUsageService.java         |    85 +
 .../service/MemberKeyStatisticsService.java     |    98 +
 .../internal/service/MemberRegionsService.java  |   140 +
 .../internal/service/MembersListService.java    |    78 +
 .../pulse/internal/service/PulseService.java    |    42 +
 .../internal/service/PulseServiceFactory.java   |    56 +
 .../internal/service/PulseVersionService.java   |    74 +
 .../service/QueryStatisticsService.java         |   152 +
 .../internal/service/SystemAlertsService.java   |   134 +
 .../pulse/internal/util/ConnectionUtil.java     |    47 +
 .../pulse/internal/util/IPAddressUtil.java      |    66 +
 .../tools/pulse/internal/util/StringUtils.java  |    86 +
 .../tools/pulse/internal/util/TimeUtils.java    |   121 +
 .../main/resources/LogMessages_en_US.properties |    97 +
 .../main/resources/LogMessages_fr_FR.properties |    91 +
 .../src/main/resources/default.properties       |    23 +
 .../src/main/resources/gemfire.properties       |    47 +
 .../src/main/resources/pulse-users.properties   |    30 +
 .../src/main/resources/pulse.properties         |    54 +
 .../src/main/resources/pulsesecurity.properties |    26 +
 .../src/main/resources/sqlfire.properties       |    47 +
 gemfire-pulse/src/main/webapp/DataBrowser.html  |   367 +
 gemfire-pulse/src/main/webapp/Login.html        |   142 +
 .../src/main/webapp/META-INF/MANIFEST.MF        |     3 +
 .../src/main/webapp/MemberDetails.html          |   618 +
 .../src/main/webapp/QueryStatistics.html        |   323 +
 .../webapp/WEB-INF/mvc-dispatcher-servlet.xml   |    35 +
 .../src/main/webapp/WEB-INF/spring-security.xml |    83 +
 gemfire-pulse/src/main/webapp/WEB-INF/web.xml   |    62 +
 .../src/main/webapp/clusterDetail.html          |   676 +
 .../src/main/webapp/css/ForceDirected.css       |    46 +
 gemfire-pulse/src/main/webapp/css/Treemap.css   |   134 +
 gemfire-pulse/src/main/webapp/css/base.css      |    74 +
 gemfire-pulse/src/main/webapp/css/common.css    |   240 +
 .../webapp/css/fonts/DroidSans-Bold-webfont.eot |   Bin 0 -> 43462 bytes
 .../webapp/css/fonts/DroidSans-Bold-webfont.svg |   271 +
 .../webapp/css/fonts/DroidSans-Bold-webfont.ttf |   Bin 0 -> 43260 bytes
 .../css/fonts/DroidSans-Bold-webfont.woff       |   Bin 0 -> 27120 bytes
 .../main/webapp/css/fonts/DroidSans-webfont.eot |   Bin 0 -> 44926 bytes
 .../main/webapp/css/fonts/DroidSans-webfont.svg |   271 +
 .../main/webapp/css/fonts/DroidSans-webfont.ttf |   Bin 0 -> 44712 bytes
 .../webapp/css/fonts/DroidSans-webfont.woff     |   Bin 0 -> 27672 bytes
 .../src/main/webapp/css/grid/ui.jqgrid.css      |   850 +
 gemfire-pulse/src/main/webapp/css/ie/ie.css     |    19 +
 gemfire-pulse/src/main/webapp/css/ie/ie7.css    |    21 +
 gemfire-pulse/src/main/webapp/css/ie/ie8.css    |    20 +
 gemfire-pulse/src/main/webapp/css/ie/ie9.css    |    20 +
 gemfire-pulse/src/main/webapp/css/jquery-ui.css |   566 +
 .../src/main/webapp/css/jquery.jscrollpane.css  |   121 +
 .../src/main/webapp/css/jquery.ui.all.css       |    11 +
 .../src/main/webapp/css/jquery.ui.core.css      |    41 +
 .../src/main/webapp/css/jquery.ui.theme.css     |   248 +
 .../src/main/webapp/css/jquery.ztreestyle.css   |    90 +
 .../css/multiselect/jquery.multiselect.css      |   301 +
 .../main/webapp/css/multiselect/prettify.css    |    46 +
 .../src/main/webapp/css/multiselect/style.css   |    35 +
 gemfire-pulse/src/main/webapp/css/popup.css     |    55 +
 gemfire-pulse/src/main/webapp/css/style.css     |  3067 +++
 .../src/main/webapp/css/treeView/Treemap.css    |   134 +
 .../src/main/webapp/images/about-gemfirexd.png  |   Bin 0 -> 4440 bytes
 .../src/main/webapp/images/about-sqlfire.png    |   Bin 0 -> 6277 bytes
 gemfire-pulse/src/main/webapp/images/about.png  |   Bin 0 -> 4421 bytes
 .../src/main/webapp/images/acc-minus.png        |   Bin 0 -> 1049 bytes
 .../src/main/webapp/images/acc-n-minus.png      |   Bin 0 -> 961 bytes
 .../src/main/webapp/images/acc-n-plus.png       |   Bin 0 -> 988 bytes
 .../src/main/webapp/images/acc-plus.png         |   Bin 0 -> 1047 bytes
 .../src/main/webapp/images/activeServer.png     |   Bin 0 -> 2846 bytes
 .../src/main/webapp/images/arrow-down.png       |   Bin 0 -> 986 bytes
 .../src/main/webapp/images/arrow-up.png         |   Bin 0 -> 988 bytes
 .../src/main/webapp/images/bg-image.png         |   Bin 0 -> 948 bytes
 .../src/main/webapp/images/bg-imageLogin.png    |   Bin 0 -> 946 bytes
 .../src/main/webapp/images/blue-msg-icon.png    |   Bin 0 -> 1194 bytes
 .../src/main/webapp/images/border-left-grid.png |   Bin 0 -> 927 bytes
 .../src/main/webapp/images/bread-crumb.png      |   Bin 0 -> 1182 bytes
 .../src/main/webapp/images/bubble_arrow.png     |   Bin 0 -> 1168 bytes
 .../src/main/webapp/images/chart-active.png     |   Bin 0 -> 1096 bytes
 gemfire-pulse/src/main/webapp/images/chart.png  |   Bin 0 -> 1095 bytes
 .../src/main/webapp/images/checkbox.png         |   Bin 0 -> 1630 bytes
 gemfire-pulse/src/main/webapp/images/chkbox.png |   Bin 0 -> 1313 bytes
 .../src/main/webapp/images/copy_icon.png        |   Bin 0 -> 1172 bytes
 .../src/main/webapp/images/correct_icon.png     |   Bin 0 -> 1143 bytes
 .../main/webapp/images/correct_small_icon.png   |   Bin 0 -> 1065 bytes
 .../main/webapp/images/correct_white_icon.png   |   Bin 0 -> 1122 bytes
 gemfire-pulse/src/main/webapp/images/cross.png  |   Bin 0 -> 2954 bytes
 .../main/webapp/images/dataViewWanEnabled.png   |   Bin 0 -> 1204 bytes
 .../src/main/webapp/images/dd_active.png        |   Bin 0 -> 1065 bytes
 .../src/main/webapp/images/dd_arrow.png         |   Bin 0 -> 1058 bytes
 .../webapp/images/error-locators-others.png     |   Bin 0 -> 2052 bytes
 .../src/main/webapp/images/error-locators.png   |   Bin 0 -> 2023 bytes
 .../images/error-manager-locator-others.png     |   Bin 0 -> 2067 bytes
 .../webapp/images/error-manager-locator.png     |   Bin 0 -> 2047 bytes
 .../webapp/images/error-managers-others.png     |   Bin 0 -> 2051 bytes
 .../src/main/webapp/images/error-managers.png   |   Bin 0 -> 2025 bytes
 .../main/webapp/images/error-message-icon.png   |   Bin 0 -> 1193 bytes
 .../src/main/webapp/images/error-msg-icon.png   |   Bin 0 -> 1194 bytes
 .../src/main/webapp/images/error-others.png     |   Bin 0 -> 2066 bytes
 .../src/main/webapp/images/error-otheruser.png  |   Bin 0 -> 2002 bytes
 .../main/webapp/images/error-status-icon.png    |   Bin 0 -> 2024 bytes
 gemfire-pulse/src/main/webapp/images/error.png  |   Bin 0 -> 1110 bytes
 .../src/main/webapp/images/graph-active.png     |   Bin 0 -> 1360 bytes
 gemfire-pulse/src/main/webapp/images/graph.png  |   Bin 0 -> 1374 bytes
 .../images/graph/key-statistics-graph.png       |   Bin 0 -> 1617 bytes
 .../webapp/images/graph/memory-usage-graph.png  |   Bin 0 -> 4366 bytes
 .../src/main/webapp/images/graph/reads.png      |   Bin 0 -> 3423 bytes
 .../images/graph/throughput-writes-graph.png    |   Bin 0 -> 4340 bytes
 .../src/main/webapp/images/graph/topology.png   |   Bin 0 -> 14997 bytes
 .../src/main/webapp/images/graph/treeview.png   |   Bin 0 -> 3386 bytes
 .../src/main/webapp/images/graph/writes.png     |   Bin 0 -> 3527 bytes
 .../src/main/webapp/images/grid-active.png      |   Bin 0 -> 1095 bytes
 gemfire-pulse/src/main/webapp/images/grid.png   |   Bin 0 -> 1094 bytes
 .../webapp/images/header-bg-bottom-border.png   |   Bin 0 -> 924 bytes
 .../src/main/webapp/images/hide_ico.png         |   Bin 0 -> 3281 bytes
 .../src/main/webapp/images/history-icon.png     |   Bin 0 -> 3533 bytes
 .../src/main/webapp/images/history-remove.png   |   Bin 0 -> 1185 bytes
 .../src/main/webapp/images/hor-spiltter-dot.png |   Bin 0 -> 990 bytes
 .../webapp/images/icons members/locators.png    |   Bin 0 -> 3106 bytes
 .../images/icons members/locators_others.png    |   Bin 0 -> 3118 bytes
 .../webapp/images/icons members/managers.png    |   Bin 0 -> 3103 bytes
 .../images/icons members/managers_locators.png  |   Bin 0 -> 3120 bytes
 .../images/icons members/managers_others.png    |   Bin 0 -> 3117 bytes
 .../main/webapp/images/icons members/others.png |   Bin 0 -> 3102 bytes
 .../src/main/webapp/images/info-msg-icon.png    |   Bin 0 -> 1194 bytes
 .../src/main/webapp/images/lastLine.png         |   Bin 0 -> 948 bytes
 gemfire-pulse/src/main/webapp/images/line.png   |   Bin 0 -> 929 bytes
 .../src/main/webapp/images/mask-bg.png          |   Bin 0 -> 940 bytes
 .../webapp/images/membersName_arror-off.png     |   Bin 0 -> 1148 bytes
 .../main/webapp/images/membersName_arror-on.png |   Bin 0 -> 1170 bytes
 gemfire-pulse/src/main/webapp/images/minus.png  |   Bin 0 -> 2959 bytes
 .../webapp/images/normal-locators-others.png    |   Bin 0 -> 2025 bytes
 .../src/main/webapp/images/normal-locators.png  |   Bin 0 -> 1995 bytes
 .../images/normal-manager-locator-others.png    |   Bin 0 -> 2037 bytes
 .../webapp/images/normal-manager-locator.png    |   Bin 0 -> 2029 bytes
 .../webapp/images/normal-managers-others.png    |   Bin 0 -> 2027 bytes
 .../src/main/webapp/images/normal-managers.png  |   Bin 0 -> 1997 bytes
 .../src/main/webapp/images/normal-others.png    |   Bin 0 -> 1988 bytes
 .../src/main/webapp/images/normal-otheruser.png |   Bin 0 -> 1968 bytes
 .../main/webapp/images/normal-status-icon.png   |   Bin 0 -> 1955 bytes
 gemfire-pulse/src/main/webapp/images/normal.png |   Bin 0 -> 1110 bytes
 .../src/main/webapp/images/orange-msg-icon.png  |   Bin 0 -> 1194 bytes
 .../src/main/webapp/images/pivotal-logo.png     |   Bin 0 -> 4302 bytes
 gemfire-pulse/src/main/webapp/images/plus.png   |   Bin 0 -> 1178 bytes
 .../src/main/webapp/images/plusMinusIcon.png    |   Bin 0 -> 1192 bytes
 .../src/main/webapp/images/popup-arrow.png      |   Bin 0 -> 1075 bytes
 .../main/webapp/images/popup-close-button.png   |   Bin 0 -> 1026 bytes
 .../images/pulse-monitoring-gemfirexd-old.png   |   Bin 0 -> 6606 bytes
 .../images/pulse-monitoring-gemfirexd.png       |   Bin 0 -> 4440 bytes
 .../webapp/images/pulse-monitoring-sqlfire.png  |   Bin 0 -> 6467 bytes
 .../src/main/webapp/images/pulse-monitoring.png |   Bin 0 -> 4741 bytes
 .../src/main/webapp/images/radio-off.png        |   Bin 0 -> 1252 bytes
 .../src/main/webapp/images/radio-on.png         |   Bin 0 -> 1306 bytes
 gemfire-pulse/src/main/webapp/images/radio.png  |   Bin 0 -> 2476 bytes
 .../src/main/webapp/images/regionIcons.png      |   Bin 0 -> 1495 bytes
 .../src/main/webapp/images/rightBorder.png      |   Bin 0 -> 927 bytes
 .../src/main/webapp/images/searchIcon.png       |   Bin 0 -> 1592 bytes
 .../src/main/webapp/images/seperator.png        |   Bin 0 -> 929 bytes
 gemfire-pulse/src/main/webapp/images/server.png |   Bin 0 -> 1233 bytes
 .../webapp/images/severe-locators-others.png    |   Bin 0 -> 2026 bytes
 .../src/main/webapp/images/severe-locators.png  |   Bin 0 -> 1980 bytes
 .../images/severe-manager-locator-others.png    |   Bin 0 -> 2032 bytes
 .../webapp/images/severe-manager-locator.png    |   Bin 0 -> 2026 bytes
 .../webapp/images/severe-managers-others.png    |   Bin 0 -> 2026 bytes
 .../src/main/webapp/images/severe-managers.png  |   Bin 0 -> 1985 bytes
 .../src/main/webapp/images/severe-msg-icon.png  |   Bin 0 -> 1194 bytes
 .../src/main/webapp/images/severe-others.png    |   Bin 0 -> 2007 bytes
 .../src/main/webapp/images/severe-otheruser.png |   Bin 0 -> 1959 bytes
 .../main/webapp/images/severe-status-icon.png   |   Bin 0 -> 2218 bytes
 gemfire-pulse/src/main/webapp/images/severe.png |   Bin 0 -> 1110 bytes
 .../src/main/webapp/images/show_ico.png         |   Bin 0 -> 3296 bytes
 gemfire-pulse/src/main/webapp/images/spacer.png |   Bin 0 -> 922 bytes
 .../src/main/webapp/images/sqlfire.png          |   Bin 0 -> 6467 bytes
 .../src/main/webapp/images/status-down.png      |   Bin 0 -> 1125 bytes
 .../src/main/webapp/images/status-up.png        |   Bin 0 -> 1104 bytes
 .../src/main/webapp/images/subServer.png        |   Bin 0 -> 2201 bytes
 .../src/main/webapp/images/tab-bottom-bg.png    |   Bin 0 -> 929 bytes
 .../src/main/webapp/images/treeView-img.png     |   Bin 0 -> 962 bytes
 .../main/webapp/images/ui-anim_basic_16x16.gif  |   Bin 0 -> 1459 bytes
 .../src/main/webapp/images/ver-spiltter-dot.png |   Bin 0 -> 979 bytes
 .../webapp/images/warning-locators-others.png   |   Bin 0 -> 2048 bytes
 .../src/main/webapp/images/warning-locators.png |   Bin 0 -> 2032 bytes
 .../images/warning-manager-locator-others.png   |   Bin 0 -> 2071 bytes
 .../webapp/images/warning-manager-locator.png   |   Bin 0 -> 2052 bytes
 .../webapp/images/warning-managers-others.png   |   Bin 0 -> 2023 bytes
 .../src/main/webapp/images/warning-managers.png |   Bin 0 -> 2030 bytes
 .../src/main/webapp/images/warning-msg-icon.png |   Bin 0 -> 1194 bytes
 .../src/main/webapp/images/warning-others.png   |   Bin 0 -> 2027 bytes
 .../main/webapp/images/warning-otheruser.png    |   Bin 0 -> 2010 bytes
 .../main/webapp/images/warning-status-icon.png  |   Bin 0 -> 1714 bytes
 .../src/main/webapp/images/warning.png          |   Bin 0 -> 1107 bytes
 .../src/main/webapp/images/yellow-msg-icon.png  |   Bin 0 -> 1194 bytes
 gemfire-pulse/src/main/webapp/index.html        |    67 +
 .../main/webapp/properties/default.properties   |    21 +
 .../webapp/properties/default_en.properties     |    21 +
 .../main/webapp/properties/gemfire.properties   |    45 +
 .../webapp/properties/gemfire_en.properties     |    45 +
 .../main/webapp/properties/gemfirexd.properties |    45 +
 .../webapp/properties/gemfirexd_en.properties   |    45 +
 .../src/main/webapp/properties/index.properties |    18 +
 .../main/webapp/properties/index_fr.properties  |    19 +
 .../main/webapp/properties/sqlfire.properties   |    45 +
 gemfire-pulse/src/main/webapp/regionDetail.html |   567 +
 .../src/main/webapp/scripts/lib/common.js       |   536 +
 .../src/main/webapp/scripts/lib/excanvas.js     |  1416 ++
 .../main/webapp/scripts/lib/grid.locale-en.js   |   169 +
 .../src/main/webapp/scripts/lib/html5.js        |     3 +
 .../src/main/webapp/scripts/lib/jit.js          | 17208 +++++++++++++++++
 .../src/main/webapp/scripts/lib/jquery-1.7.2.js |  9404 +++++++++
 .../webapp/scripts/lib/jquery.generateFile.js   |    77 +
 .../scripts/lib/jquery.i18n.properties.js       |   336 +
 .../webapp/scripts/lib/jquery.jqGrid.src.js     | 12182 ++++++++++++
 .../webapp/scripts/lib/jquery.jscrollpane.js    |  1340 ++
 .../webapp/scripts/lib/jquery.mousewheel.js     |    84 +
 .../webapp/scripts/lib/jquery.placeholder.js    |   106 +
 .../main/webapp/scripts/lib/jquery.sparkline.js |  3001 +++
 .../main/webapp/scripts/lib/jquery.tablednd.js  |   383 +
 .../main/webapp/scripts/lib/jquery.timeago.js   |   193 +
 .../webapp/scripts/lib/jquery.ztree.core-3.5.js |  1650 ++
 .../scripts/lib/jquery.ztree.excheck-3.5.js     |   624 +
 .../src/main/webapp/scripts/lib/tooltip.js      |   357 +
 .../webapp/scripts/multiselect/jquery-ui.js     | 14988 ++++++++++++++
 .../scripts/multiselect/jquery.multiselect.js   |   816 +
 .../main/webapp/scripts/multiselect/prettify.js |  1522 ++
 .../webapp/scripts/pulsescript/MemberDetails.js |  1045 +
 .../scripts/pulsescript/PulseCallbacks.js       |  1735 ++
 .../scripts/pulsescript/PulseFunctions.js       |   227 +
 .../webapp/scripts/pulsescript/clusterDetail.js |  2360 +++
 .../scripts/pulsescript/clusterRGraphMembers.js |  1515 ++
 .../main/webapp/scripts/pulsescript/common.js   |  1626 ++
 .../scripts/pulsescript/pages/DataBrowser.js    |   662 +
 .../pulsescript/pages/DataBrowserQuery.js       |   964 +
 .../pages/DataBrowserQueryHistory.js            |    95 +
 .../webapp/scripts/pulsescript/pages/Login.js   |   170 +
 .../webapp/scripts/pulsescript/pages/index.js   |    26 +
 .../scripts/pulsescript/queryStatistics.js      |   315 +
 .../webapp/scripts/pulsescript/regionView.js    |   757 +
 .../pulse/testbed/GemFireDistributedSystem.java |   323 +
 .../tools/pulse/testbed/GemfireTopology.java    |    24 +
 .../tools/pulse/testbed/PropFileHelper.java     |   115 +
 .../pulse/testbed/PropMockDataUpdater.java      |   515 +
 .../gemfire/tools/pulse/testbed/TestBed.java    |    84 +
 .../tools/pulse/testbed/driver/PulseUITest.java |   284 +
 .../pulse/testbed/driver/TomcatHelper.java      |    77 +
 .../tools/pulse/tests/AggregateStatement.java   |   217 +
 .../pulse/tests/AggregateStatementMBean.java    |   168 +
 .../pulse/tests/DataBrowserResultLoader.java    |    84 +
 .../pulse/tests/GemFireXDAggregateTable.java    |    46 +
 .../tests/GemFireXDAggregateTableMBean.java     |    28 +
 .../tools/pulse/tests/GemFireXDCluster.java     |    95 +
 .../pulse/tests/GemFireXDClusterMBean.java      |    32 +
 .../tools/pulse/tests/GemFireXDMember.java      |    80 +
 .../tools/pulse/tests/GemFireXDMemberMBean.java |    31 +
 .../gemfire/tools/pulse/tests/JMXBaseBean.java  |    67 +
 .../tools/pulse/tests/JMXProperties.java        |    47 +
 .../gemfire/tools/pulse/tests/Member.java       |   193 +
 .../gemfire/tools/pulse/tests/MemberMBean.java  |    86 +
 .../tools/pulse/tests/PulseAutomatedTest.java   |   785 +
 .../tools/pulse/tests/PulseBaseTest.java        |   686 +
 .../gemfire/tools/pulse/tests/PulseTest.java    |  1056 +
 .../tools/pulse/tests/PulseTestData.java        |   106 +
 .../tools/pulse/tests/PulseTestLocators.java    |   225 +
 .../gemfire/tools/pulse/tests/Region.java       |   192 +
 .../gemfire/tools/pulse/tests/RegionMBean.java  |    59 +
 .../tools/pulse/tests/RegionOnMember.java       |    96 +
 .../tools/pulse/tests/RegionOnMemberMBean.java  |    50 +
 .../gemfire/tools/pulse/tests/Server.java       |   253 +
 .../gemfire/tools/pulse/tests/ServerObject.java |   264 +
 .../tools/pulse/tests/ServerObjectMBean.java    |    79 +
 .../gemfire/tools/pulse/tests/TomcatHelper.java |    97 +
 .../pulse/tests/junit/BaseServiceTest.java      |   250 +
 .../junit/ClusterSelectedRegionServiceTest.java |   350 +
 ...ClusterSelectedRegionsMemberServiceTest.java |   362 +
 .../junit/MemberGatewayHubServiceTest.java      |   423 +
 .../src/test/resources/NoDataFound1.txt         |     1 +
 .../src/test/resources/NoDataFound2.txt         |    35 +
 .../src/test/resources/NoDataFound3.txt         |     6 +
 gemfire-pulse/src/test/resources/message.txt    |     1 +
 .../src/test/resources/test.properties          |   326 +
 gemfire-pulse/src/test/resources/test1.txt      |     5 +
 gemfire-pulse/src/test/resources/test2.txt      |     7 +
 gemfire-pulse/src/test/resources/test3.txt      |     5 +
 gemfire-pulse/src/test/resources/test4.txt      |     4 +
 gemfire-pulse/src/test/resources/test5.txt      |     7 +
 gemfire-pulse/src/test/resources/test6.txt      |    11 +
 gemfire-pulse/src/test/resources/test7.txt      |    13 +
 .../resources/testNullObjectsAtRootLevel1.txt   |    25 +
 .../resources/testNullObjectsAtRootLevel2.txt   |    30 +
 .../src/test/resources/testQueryResult.txt      |   198 +
 .../src/test/resources/testQueryResult1000.txt  |  1023 +
 .../testQueryResultArrayAndArrayList.txt        |     8 +
 .../test/resources/testQueryResultArrayList.txt |     6 +
 .../resources/testQueryResultArrayOfList.txt    |    15 +
 .../resources/testQueryResultClusterSmall.txt   |    23 +
 .../testQueryResultClusterWithStruct.txt        |    10 +
 .../test/resources/testQueryResultHashMap.txt   |     8 +
 .../resources/testQueryResultHashMapSmall.txt   |    12 +
 .../src/test/resources/testQueryResultSmall.txt |    12 +
 .../resources/testQueryResultWithStruct.txt     |  1744 ++
 .../testQueryResultWithStructSmall.txt          |    15 +
 gemfire-pulse/src/test/resources/test_pp.txt    |     7 +
 .../src/test/resources/testbed.properties       |   157 +
 gemfire-rebalancer/build.gradle                 |    33 +-
 .../gemfire/cache/util/AutoBalancer.java        |     4 +-
 gemfire-site/.gitignore                         |     1 +
 gemfire-site/build.gradle                       |    37 -
 gemfire-site/src/jbake.zip                      |   Bin 207030 -> 0 bytes
 gemfire-site/src/jbake/assets/favicon.ico       |   Bin 1150 -> 0 bytes
 .../src/jbake/assets/images/bg-billboard.png    |   Bin 25538 -> 0 bytes
 .../jbake/assets/images/bg-crystals-home.png    |   Bin 41684 -> 0 bytes
 .../assets/images/bg-crystals-secondary.png     |   Bin 26046 -> 0 bytes
 .../src/jbake/assets/images/egg-logo1.png       |   Bin 8626 -> 0 bytes
 .../jbake/assets/images/events/apachecon.png    |   Bin 4528 -> 0 bytes
 .../src/jbake/assets/images/events/oscon.png    |   Bin 26024 -> 0 bytes
 .../src/jbake/assets/images/geode-banner.png    |   Bin 7916 -> 0 bytes
 .../assets/images/logo-apache-geode-white.png   |   Bin 2336 -> 0 bytes
 .../jbake/assets/images/logo-apache-geode.png   |   Bin 3200 -> 0 bytes
 .../jbake/assets/images/logo-geode-white.png    |   Bin 1620 -> 0 bytes
 .../src/jbake/assets/images/logo-geode.png      |   Bin 3345 -> 0 bytes
 .../src/jbake/assets/javascripts/master.js      |   121 -
 .../src/jbake/assets/javascripts/scale.fix.js   |    20 -
 .../jbake/assets/stylesheets/pygment_trac.css   |    60 -
 .../src/jbake/assets/stylesheets/styles.css     |   319 -
 gemfire-site/src/jbake/content/404.md           |     9 -
 gemfire-site/src/jbake/content/README.md        |    36 -
 gemfire-site/src/jbake/content/about/index.md   |    31 -
 .../src/jbake/content/community/index.md        |    82 -
 .../src/jbake/content/contribute/index.md       |    47 -
 gemfire-site/src/jbake/content/docs/index.md    |    23 -
 .../src/jbake/content/download/index.md         |    13 -
 .../src/jbake/content/getting-started/index.md  |    88 -
 gemfire-site/src/jbake/content/index.md         |    76 -
 gemfire-site/src/jbake/jbake.properties         |     6 -
 gemfire-site/src/jbake/templates/page.groovy    |    80 -
 gemfire-site/website/.gitignore                 |     1 +
 gemfire-site/website/README.md                  |    54 +
 gemfire-site/website/Rules                      |    69 +
 gemfire-site/website/build.sh                   |    18 +
 .../website/content/bootstrap/bootstrap.min.css |     9 +
 .../website/content/community/index.html        |   291 +
 .../website/content/css/bootflat-extensions.css |   356 +
 .../website/content/css/bootflat-square.css     |    69 +
 gemfire-site/website/content/css/bootflat.css   |  1559 ++
 .../website/content/css/font-awesome.min.css    |   405 +
 gemfire-site/website/content/css/geode-site.css |  1617 ++
 gemfire-site/website/content/favicon.ico        |   Bin 0 -> 20805 bytes
 .../website/content/font/FontAwesome.otf        |   Bin 0 -> 61896 bytes
 .../content/font/fontawesome-webfont-eot.eot    |   Bin 0 -> 37405 bytes
 .../content/font/fontawesome-webfont-svg.svg    |   399 +
 .../content/font/fontawesome-webfont-ttf.ttf    |   Bin 0 -> 79076 bytes
 .../content/font/fontawesome-webfont-woff.woff  |   Bin 0 -> 43572 bytes
 .../website/content/img/apache_geode_logo.png   |   Bin 0 -> 23616 bytes
 .../content/img/apache_geode_logo_white.png     |   Bin 0 -> 22695 bytes
 .../img/apache_geode_logo_white_small.png       |   Bin 0 -> 52948 bytes
 .../website/content/img/check_flat/default.png  |   Bin 0 -> 25851 bytes
 gemfire-site/website/content/img/egg-logo.png   |   Bin 0 -> 9938 bytes
 gemfire-site/website/content/img/github.png     |   Bin 0 -> 8936 bytes
 gemfire-site/website/content/index.html         |   142 +
 .../website/content/js/bootstrap.min.js         |     8 +
 gemfire-site/website/content/js/head.js         |   708 +
 gemfire-site/website/content/js/html5shiv.js    |     8 +
 .../website/content/js/jquery-1.10.1.min.js     |     6 +
 .../website/content/js/jquery.icheck.js         |   397 +
 gemfire-site/website/content/js/respond.min.js  |     6 +
 .../website/content/js/usergrid-site.js         |    66 +
 .../website/content/releases/index.html         |   129 +
 gemfire-site/website/layouts/community.html     |     1 +
 gemfire-site/website/layouts/default.html       |    44 +
 gemfire-site/website/layouts/docs.html          |     1 +
 gemfire-site/website/layouts/footer.html        |    96 +
 gemfire-site/website/layouts/header.html        |   248 +
 gemfire-site/website/lib/default.rb             |    60 +
 gemfire-site/website/lib/helpers_.rb            |    16 +
 gemfire-site/website/lib/pandoc.template        |     4 +
 gemfire-site/website/nanoc.yaml                 |    94 +
 gemfire-site/website/run.sh                     |    18 +
 gemfire-site/website/utilities/map-markers.rb   |    75 +
 gemfire-site/website/utilities/markers.txt      |   440 +
 .../website/utilities/snapshot-apigee.rb        |    88 +
 .../src/it/resources/test-regions.xml           |    17 +
 .../src/it/resources/test-retrieve-regions.xml  |    17 +
 gemfire-wan/build.gradle                        |    23 +
 .../client/internal/GatewaySenderBatchOp.java   |   313 +
 .../cache/client/internal/SenderProxy.java      |    43 +
 .../internal/locator/wan/LocatorDiscovery.java  |   227 +
 .../internal/locator/wan/LocatorHelper.java     |   143 +
 .../locator/wan/LocatorJoinMessage.java         |   105 +
 .../wan/LocatorMembershipListenerImpl.java      |   230 +
 .../locator/wan/RemoteLocatorJoinRequest.java   |    87 +
 .../locator/wan/RemoteLocatorJoinResponse.java  |    89 +
 .../locator/wan/RemoteLocatorPingRequest.java   |    56 +
 .../locator/wan/RemoteLocatorPingResponse.java  |    55 +
 .../locator/wan/RemoteLocatorRequest.java       |    66 +
 .../locator/wan/RemoteLocatorResponse.java      |    74 +
 .../internal/locator/wan/WANFactoryImpl.java    |    74 +
 .../locator/wan/WanLocatorDiscovererImpl.java   |   138 +
 .../cache/wan/AbstractRemoteGatewaySender.java  |   169 +
 .../cache/wan/GatewayReceiverFactoryImpl.java   |   147 +
 .../internal/cache/wan/GatewayReceiverImpl.java |   253 +
 .../wan/GatewaySenderEventRemoteDispatcher.java |   766 +
 .../cache/wan/GatewaySenderFactoryImpl.java     |   389 +
 .../wan/parallel/ParallelGatewaySenderImpl.java |   267 +
 ...rentParallelGatewaySenderEventProcessor.java |    67 +
 ...moteParallelGatewaySenderEventProcessor.java |   122 +
 ...urrentSerialGatewaySenderEventProcessor.java |    45 +
 ...RemoteSerialGatewaySenderEventProcessor.java |    50 +
 .../wan/serial/SerialGatewaySenderImpl.java     |   260 +
 ...ternal.locator.wan.LocatorMembershipListener |    15 +
 ...ne.gemfire.internal.cache.wan.spi.WANFactory |    15 +
 .../cache/CacheXml70GatewayDUnitTest.java       |   243 +
 .../cache/CacheXml80GatewayDUnitTest.java       |    77 +
 .../AnalyzeWANSerializablesJUnitTest.java       |    91 +
 .../internal/cache/UpdateVersionDUnitTest.java  |   965 +
 .../gemfire/internal/cache/wan/WANTestBase.java |  5187 +++++
 ...oncurrentParallelGatewaySenderDUnitTest.java |   863 +
 ...ntParallelGatewaySenderOffHeapDUnitTest.java |    32 +
 ...allelGatewaySenderOperation_1_DUnitTest.java |   851 +
 ...allelGatewaySenderOperation_2_DUnitTest.java |   541 +
 ...tSerialGatewaySenderOperationsDUnitTest.java |   111 +
 ...GatewaySenderOperationsOffHeapDUnitTest.java |    32 +
 .../ConcurrentWANPropogation_1_DUnitTest.java   |   610 +
 .../ConcurrentWANPropogation_2_DUnitTest.java   |   487 +
 .../cache/wan/disttx/DistTXWANDUnitTest.java    |   211 +
 .../CommonParallelGatewaySenderDUnitTest.java   |   485 +
 ...onParallelGatewaySenderOffHeapDUnitTest.java |    32 +
 ...wWANConcurrencyCheckForDestroyDUnitTest.java |   528 +
 .../cache/wan/misc/PDXNewWanDUnitTest.java      |   789 +
 ...dRegion_ParallelWANPersistenceDUnitTest.java |   753 +
 ...dRegion_ParallelWANPropogationDUnitTest.java |  1134 ++
 .../SenderWithTransportFilterDUnitTest.java     |   241 +
 ...downAllPersistentGatewaySenderDUnitTest.java |   209 +
 .../wan/misc/WANConfigurationJUnitTest.java     |   609 +
 .../wan/misc/WANLocatorServerDUnitTest.java     |   195 +
 .../cache/wan/misc/WANSSLDUnitTest.java         |   153 +
 .../wan/misc/WanAutoDiscoveryDUnitTest.java     |   559 +
 .../cache/wan/misc/WanValidationsDUnitTest.java |  1680 ++
 ...tewaySenderOperation_2_OffHeapDUnitTest.java |    32 +
 ...tewaySenderOperation_2_OffHeapDUnitTest.java |    32 +
 ...GatewaySenderOperationsOffHeapDUnitTest.java |    34 +
 ...ewaySenderQueueOverflowOffHeapDUnitTest.java |    34 +
 .../ParallelWANConflationOffHeapDUnitTest.java  |    34 +
 ...nceEnabledGatewaySenderOffHeapDUnitTest.java |    34 +
 ...ropogationConcurrentOpsOffHeapDUnitTest.java |    34 +
 .../ParallelWANPropogationOffHeapDUnitTest.java |    34 +
 ...erialGatewaySenderQueueOffHeapDUnitTest.java |    34 +
 ...nceEnabledGatewaySenderOffHeapDUnitTest.java |    34 +
 .../SerialWANPropogationOffHeapDUnitTest.java   |    34 +
 ...ation_PartitionedRegionOffHeapDUnitTest.java |    34 +
 ...allelGatewaySenderOperation_2_DUnitTest.java |    38 +
 ...arallelGatewaySenderOperationsDUnitTest.java |   639 +
 ...llelGatewaySenderQueueOverflowDUnitTest.java |   534 +
 .../ParallelWANConflationDUnitTest.java         |   497 +
 ...ersistenceEnabledGatewaySenderDUnitTest.java |  1824 ++
 ...llelWANPropagationClientServerDUnitTest.java |   114 +
 ...lelWANPropagationConcurrentOpsDUnitTest.java |   291 +
 .../ParallelWANPropagationDUnitTest.java        |  1451 ++
 ...ParallelWANPropagationLoopBackDUnitTest.java |   426 +
 .../wan/parallel/ParallelWANStatsDUnitTest.java |   532 +
 ...tewaySenderDistributedDeadlockDUnitTest.java |   408 +
 ...rialGatewaySenderEventListenerDUnitTest.java |   391 +
 .../SerialGatewaySenderOperationsDUnitTest.java |   656 +
 .../SerialGatewaySenderQueueDUnitTest.java      |   339 +
 ...ersistenceEnabledGatewaySenderDUnitTest.java |   604 +
 .../SerialWANPropagationLoopBackDUnitTest.java  |   539 +
 .../serial/SerialWANPropogationDUnitTest.java   |  1605 ++
 ...NPropogation_PartitionedRegionDUnitTest.java |   440 +
 .../SerialWANPropogationsFeatureDUnitTest.java  |   373 +
 .../wan/serial/SerialWANStatsDUnitTest.java     |   597 +
 .../wan/wancommand/WANCommandTestBase.java      |   513 +
 ...anCommandCreateGatewayReceiverDUnitTest.java |   699 +
 .../WanCommandCreateGatewaySenderDUnitTest.java |   764 +
 ...WanCommandGatewayReceiverStartDUnitTest.java |   328 +
 .../WanCommandGatewayReceiverStopDUnitTest.java |   333 +
 .../WanCommandGatewaySenderStartDUnitTest.java  |   418 +
 .../WanCommandGatewaySenderStopDUnitTest.java   |   369 +
 .../wan/wancommand/WanCommandListDUnitTest.java |   404 +
 .../WanCommandPauseResumeDUnitTest.java         |   717 +
 .../wancommand/WanCommandStatusDUnitTest.java   |   583 +
 .../management/WANManagementDUnitTest.java      |   521 +
 .../ClusterConfigurationDUnitTest.java          |  1057 +
 .../pulse/TestRemoteClusterDUnitTest.java       |   272 +
 .../gemfire/codeAnalysis/excludedClasses.txt    |     2 +
 .../gemstone/gemfire/codeAnalysis/openBugs.txt  |    21 +
 .../sanctionedDataSerializables.txt             |    28 +
 .../codeAnalysis/sanctionedSerializables.txt    |     0
 gemfire-web-api/build.gradle                    |    21 +-
 .../RestAPIsAndInterOpISDUnitTest.java          |    18 +-
 gemfire-web/build.gradle                        |    24 +-
 .../web/util/ConvertUtilsJUnitTest.java         |     2 +-
 gradle.properties                               |    19 +
 gradle/code-analysis.gradle                     |   113 +
 gradle/dependency-versions.properties           |    51 +-
 gradle/ide.gradle                               |    56 +
 gradle/java.gradle                              |   138 +
 gradle/publish.gradle                           |    83 +
 gradle/rat.gradle                               |   217 +
 gradle/test.gradle                              |   241 +
 gradle/utilities.gradle                         |    41 +
 settings.gradle                                 |    25 +-
 2094 files changed, 320422 insertions(+), 35517 deletions(-)
----------------------------------------------------------------------



[12/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DistributedTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DistributedTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DistributedTestCase.java
index 2bf013d..65224e8 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DistributedTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DistributedTestCase.java
@@ -16,26 +16,16 @@
  */
 package com.gemstone.gemfire.test.dunit;
 
-import java.io.File;
-import java.io.PrintWriter;
-import java.io.Serializable;
-import java.io.StringWriter;
-import java.net.UnknownHostException;
 import java.text.DecimalFormat;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.Set;
 
-import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 import org.junit.experimental.categories.Category;
 
-import com.gemstone.gemfire.InternalGemFireError;
-import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.SystemFailure;
 import com.gemstone.gemfire.admin.internal.AdminDistributedSystemImpl;
 import com.gemstone.gemfire.cache.Cache;
@@ -49,37 +39,22 @@ import com.gemstone.gemfire.cache30.MultiVMRegionTestCase;
 import com.gemstone.gemfire.cache30.RegionTestCase;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
-import com.gemstone.gemfire.distributed.internal.DistributionConfigImpl;
 import com.gemstone.gemfire.distributed.internal.DistributionMessageObserver;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem.CreationStackGenerator;
-import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManagerHelper;
-import com.gemstone.gemfire.internal.InternalDataSerializer;
-import com.gemstone.gemfire.internal.InternalInstantiator;
-import com.gemstone.gemfire.internal.OSProcess;
 import com.gemstone.gemfire.internal.SocketCreator;
 import com.gemstone.gemfire.internal.admin.ClientStatsManager;
 import com.gemstone.gemfire.internal.cache.DiskStoreObserver;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.HARegion;
 import com.gemstone.gemfire.internal.cache.InitialImageOperation;
-import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.tier.InternalClientMembership;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ClientProxyMembershipID;
-import com.gemstone.gemfire.internal.cache.tier.sockets.DataSerializerPropogationDUnitTest;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheCreation;
-import com.gemstone.gemfire.internal.logging.InternalLogWriter;
-import com.gemstone.gemfire.internal.logging.LocalLogWriter;
 import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.internal.logging.LogWriterFactory;
-import com.gemstone.gemfire.internal.logging.LogWriterImpl;
-import com.gemstone.gemfire.internal.logging.ManagerLogWriter;
-import com.gemstone.gemfire.internal.logging.log4j.LogWriterLogger;
-import com.gemstone.gemfire.internal.util.Callable;
 import com.gemstone.gemfire.management.internal.cli.LogWrapper;
-import com.jayway.awaitility.Awaitility;
 import com.gemstone.gemfire.test.dunit.standalone.DUnitLauncher;
 import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
@@ -100,49 +75,17 @@ import junit.framework.TestCase;
 @Category(DistributedTest.class)
 @SuppressWarnings("serial")
 public abstract class DistributedTestCase extends TestCase implements java.io.Serializable {
-  private static final Logger logger = LogService.getLogger();
-  private static final LogWriterLogger oldLogger = LogWriterLogger.create(logger);
-  private static final LinkedHashSet<String> testHistory = new LinkedHashSet<String>();
-
-  private static void setUpCreationStackGenerator() {
-    // the following is moved from InternalDistributedSystem to fix #51058
-    InternalDistributedSystem.TEST_CREATION_STACK_GENERATOR.set(
-    new CreationStackGenerator() {
-      @Override
-      public Throwable generateCreationStack(final DistributionConfig config) {
-        final StringBuilder sb = new StringBuilder();
-        final String[] validAttributeNames = config.getAttributeNames();
-        for (int i = 0; i < validAttributeNames.length; i++) {
-          final String attName = validAttributeNames[i];
-          final Object actualAtt = config.getAttributeObject(attName);
-          String actualAttStr = actualAtt.toString();
-          sb.append("  ");
-          sb.append(attName);
-          sb.append("=\"");
-          if (actualAtt.getClass().isArray()) {
-            actualAttStr = InternalDistributedSystem.arrayToString(actualAtt);
-          }
-          sb.append(actualAttStr);
-          sb.append("\"");
-          sb.append("\n");
-        }
-        return new Throwable("Creating distributed system with the following configuration:\n" + sb.toString());
-      }
-    });
-  }
   
-  private static void tearDownCreationStackGenerator() {
-    InternalDistributedSystem.TEST_CREATION_STACK_GENERATOR.set(InternalDistributedSystem.DEFAULT_CREATION_STACK_GENERATOR);
-  }
+  private static final Logger logger = LogService.getLogger();
   
+  private static final Set<String> testHistory = new LinkedHashSet<String>();
+
   /** This VM's connection to the distributed system */
   public static InternalDistributedSystem system;
   private static Class lastSystemCreatedInTest;
   private static Properties lastSystemProperties;
-  public static volatile String testName;
+  private static volatile String testMethodName;
   
-  private static ConcurrentLinkedQueue<ExpectedException> expectedExceptions = new ConcurrentLinkedQueue<ExpectedException>();
-
   /** For formatting timing info */
   private static final DecimalFormat format = new DecimalFormat("###.###");
 
@@ -150,315 +93,25 @@ public abstract class DistributedTestCase extends TestCase implements java.io.Se
 
   public static final boolean logPerTest = Boolean.getBoolean("dunitLogPerTest");
 
-  ///////////////////////  Utility Methods  ///////////////////////
-  
-  public void attachDebugger(VM vm, final String msg) {
-    vm.invoke(new SerializableRunnable("Attach Debugger") {
-      public void run() {
-        com.gemstone.gemfire.internal.util.DebuggerSupport.
-        waitForJavaDebugger(getSystem().getLogWriter().convertToLogWriterI18n(), msg);
-      } 
-    });
-  }
-
-
-  /**
-   * Invokes a <code>SerializableRunnable</code> in every VM that
-   * DUnit knows about.
-   *
-   * @see VM#invoke(SerializableRunnableIF)
-   */
-  public static void invokeInEveryVM(SerializableRunnable work) {
-    for (int h = 0; h < Host.getHostCount(); h++) {
-      Host host = Host.getHost(h);
-
-      for (int v = 0; v < host.getVMCount(); v++) {
-        VM vm = host.getVM(v);
-        vm.invoke(work);
-      }
-    }
-  }
-
-  public static void invokeInLocator(SerializableRunnable work) {
-    Host.getLocator().invoke(work);
-  }
-  
-  /**
-   * Invokes a <code>SerializableCallable</code> in every VM that
-   * DUnit knows about.
-   *
-   * @return a Map of results, where the key is the VM and the value is the result
-   * @see VM#invoke(SerializableCallableIF)
-   */
-  protected static Map invokeInEveryVM(SerializableCallable work) {
-    HashMap ret = new HashMap();
-    for (int h = 0; h < Host.getHostCount(); h++) {
-      Host host = Host.getHost(h);
-      for (int v = 0; v < host.getVMCount(); v++) {
-        VM vm = host.getVM(v);
-        ret.put(vm, vm.invoke(work));
-      }
-    }
-    return ret;
-  }
-
-  /**
-   * Invokes a method in every remote VM that DUnit knows about.
-   *
-   * @see VM#invoke(Class, String)
-   */
-  protected static void invokeInEveryVM(Class c, String method) {
-    for (int h = 0; h < Host.getHostCount(); h++) {
-      Host host = Host.getHost(h);
-
-      for (int v = 0; v < host.getVMCount(); v++) {
-        VM vm = host.getVM(v);
-        vm.invoke(c, method);
-      }
-    }
-  }
-
-  /**
-   * Invokes a method in every remote VM that DUnit knows about.
-   *
-   * @see VM#invoke(Class, String)
-   */
-  protected static void invokeInEveryVM(Class c, String method, Object[] methodArgs) {
-    for (int h = 0; h < Host.getHostCount(); h++) {
-      Host host = Host.getHost(h);
-
-      for (int v = 0; v < host.getVMCount(); v++) {
-        VM vm = host.getVM(v);
-        vm.invoke(c, method, methodArgs);
-      }
-    }
-  }
-  
-  /**
-   * The number of milliseconds to try repeating validation code in the
-   * event that AssertionFailedError is thrown.  For ACK scopes, no
-   * repeat should be necessary.
-   */
-  protected long getRepeatTimeoutMs() {
-    return 0;
-  }
-  
-  protected void invokeRepeatingIfNecessary(VM vm, RepeatableRunnable task) {
-    vm.invokeRepeatingIfNecessary(task, getRepeatTimeoutMs());
-  }
-  
-  /**
-   * Invokes a <code>SerializableRunnable</code> in every VM that
-   * DUnit knows about.  If work.run() throws an assertion failure, 
-   * its execution is repeated, until no assertion failure occurs or
-   * repeatTimeout milliseconds have passed.
-   *
-   * @see VM#invoke(SerializableRunnableIF)
-   */
-  protected void invokeInEveryVMRepeatingIfNecessary(RepeatableRunnable work) {
-    for (int h = 0; h < Host.getHostCount(); h++) {
-      Host host = Host.getHost(h);
-
-      for (int v = 0; v < host.getVMCount(); v++) {
-        VM vm = host.getVM(v);
-        vm.invokeRepeatingIfNecessary(work, getRepeatTimeoutMs());
-      }
-    }
-  }
-
-  /** Return the total number of VMs on all hosts */
-  protected static int getVMCount() {
-    int count = 0;
-    for (int h = 0; h < Host.getHostCount(); h++) {
-      Host host = Host.getHost(h);
-      count += host.getVMCount();
-    }
-    return count;
-  }
-
-
-  /** print a stack dump for this vm
-      @author bruce
-      @since 5.0
-   */
-  public static void dumpStack() {
-    com.gemstone.gemfire.internal.OSProcess.printStacks(0, false);
-  }
-  
-  /** print a stack dump for the given vm
-      @author bruce
-      @since 5.0
-   */
-  public static void dumpStack(VM vm) {
-    vm.invoke(com.gemstone.gemfire.test.dunit.DistributedTestCase.class, "dumpStack");
-  }
-  
-  /** print stack dumps for all vms on the given host
-      @author bruce
-      @since 5.0
-   */
-  public static void dumpStack(Host host) {
-    for (int v=0; v < host.getVMCount(); v++) {
-      host.getVM(v).invoke(com.gemstone.gemfire.test.dunit.DistributedTestCase.class, "dumpStack");
-    }
-  }
-  
-  /** print stack dumps for all vms
-      @author bruce
-      @since 5.0
-   */
-  public static void dumpAllStacks() {
-    for (int h=0; h < Host.getHostCount(); h++) {
-      dumpStack(Host.getHost(h));
-    }
-  }
-
-
-  public static String noteTiming(long operations, String operationUnit,
-                                  long beginTime, long endTime,
-                                  String timeUnit)
-  {
-    long delta = endTime - beginTime;
-    StringBuffer sb = new StringBuffer();
-    sb.append("  Performed ");
-    sb.append(operations);
-    sb.append(" ");
-    sb.append(operationUnit);
-    sb.append(" in ");
-    sb.append(delta);
-    sb.append(" ");
-    sb.append(timeUnit);
-    sb.append("\n");
-
-    double ratio = ((double) operations) / ((double) delta);
-    sb.append("    ");
-    sb.append(format.format(ratio));
-    sb.append(" ");
-    sb.append(operationUnit);
-    sb.append(" per ");
-    sb.append(timeUnit);
-    sb.append("\n");
-
-    ratio = ((double) delta) / ((double) operations);
-    sb.append("    ");
-    sb.append(format.format(ratio));
-    sb.append(" ");
-    sb.append(timeUnit);
-    sb.append(" per ");
-    sb.append(operationUnit);
-    sb.append("\n");
-
-    return sb.toString();
-  }
-
-  /**
-   * Creates a new LogWriter and adds it to the config properties. The config
-   * can then be used to connect to DistributedSystem, thus providing early
-   * access to the LogWriter before connecting. This call does not connect
-   * to the DistributedSystem. It simply creates and returns the LogWriter
-   * that will eventually be used by the DistributedSystem that connects using
-   * config.
-   * 
-   * @param config the DistributedSystem config properties to add LogWriter to
-   * @return early access to the DistributedSystem LogWriter
-   */
-  protected static LogWriter createLogWriter(Properties config) { // TODO:LOG:CONVERT: this is being used for ExpectedExceptions
-    Properties nonDefault = config;
-    if (nonDefault == null) {
-      nonDefault = new Properties();
-    }
-    addHydraProperties(nonDefault);
-    
-    DistributionConfig dc = new DistributionConfigImpl(nonDefault);
-    LogWriter logger = LogWriterFactory.createLogWriterLogger(
-        false/*isLoner*/, false/*isSecurityLog*/, dc, 
-        false);        
-    
-    // if config was non-null, then these will be added to it...
-    nonDefault.put(DistributionConfig.LOG_WRITER_NAME, logger);
-    
-    return logger;
-  }
-  
-  /**
-   * Fetches the GemFireDescription for this test and adds its 
-   * DistributedSystem properties to the provided props parameter.
-   * 
-   * @param config the properties to add hydra's test properties to
-   */
-  protected static void addHydraProperties(Properties config) {
-    Properties p = DUnitEnv.get().getDistributedSystemProperties();
-    for (Iterator iter = p.entrySet().iterator();
-        iter.hasNext(); ) {
-      Map.Entry entry = (Map.Entry) iter.next();
-      String key = (String) entry.getKey();
-      String value = (String) entry.getValue();
-      if (config.getProperty(key) == null) {
-        config.setProperty(key, value);
-      }
-    }
-  }
-  
-  ////////////////////////  Constructors  ////////////////////////
-
   /**
    * Creates a new <code>DistributedTestCase</code> test with the
    * given name.
    */
-  public DistributedTestCase(String name) {
+  public DistributedTestCase(final String name) {
     super(name);
     DUnitLauncher.launchIfNeeded();
   }
 
-  ///////////////////////  Instance Methods  ///////////////////////
-
-
-  protected Class getTestClass() {
-    Class clazz = getClass();
-    while (clazz.getDeclaringClass() != null) {
-      clazz = clazz.getDeclaringClass();
-    }
-    return clazz;
-  }
-  
+  //---------------------------------------------------------------------------
+  // methods for tests
+  //---------------------------------------------------------------------------
   
-  /**
-   * This finds the log level configured for the test run.  It should be used
-   * when creating a new distributed system if you want to specify a log level.
-   * @return the dunit log-level setting
-   */
-  public static String getDUnitLogLevel() {
-    Properties p = DUnitEnv.get().getDistributedSystemProperties();
-    String result = p.getProperty(DistributionConfig.LOG_LEVEL_NAME);
-    if (result == null) {
-      result = ManagerLogWriter.levelToString(DistributionConfig.DEFAULT_LOG_LEVEL);
-    }
-    return result;
-  }
-
-  public final static Properties getAllDistributedSystemProperties(Properties props) {
-    Properties p = DUnitEnv.get().getDistributedSystemProperties();
-    
-    // our tests do not expect auto-reconnect to be on by default
-    if (!p.contains(DistributionConfig.DISABLE_AUTO_RECONNECT_NAME)) {
-      p.put(DistributionConfig.DISABLE_AUTO_RECONNECT_NAME, "true");
-    }
-
-    for (Iterator iter = props.entrySet().iterator();
-    iter.hasNext(); ) {
-      Map.Entry entry = (Map.Entry) iter.next();
-      String key = (String) entry.getKey();
-      Object value = entry.getValue();
-      p.put(key, value);
-    }
-    return p;
-  }
-
-  public void setSystem(Properties props, DistributedSystem ds) {
+  public final void setSystem(final Properties props, final DistributedSystem ds) { // TODO: override getDistributedSystemProperties and then delete
     system = (InternalDistributedSystem)ds;
     lastSystemProperties = props;
-    lastSystemCreatedInTest = getTestClass();
+    lastSystemCreatedInTest = getClass(); // used to be getDeclaringClass()
   }
+  
   /**
    * Returns this VM's connection to the distributed system.  If
    * necessary, the connection will be lazily created using the given
@@ -470,17 +123,17 @@ public abstract class DistributedTestCase extends TestCase implements java.io.Se
    * see hydra.DistributedConnectionMgr#connect
    * @since 3.0
    */
-  public /*final*/ InternalDistributedSystem getSystem(Properties props) {
+  public /*final*/ InternalDistributedSystem getSystem(final Properties props) { // TODO: make final
     // Setting the default disk store name is now done in setUp
     if (system == null) {
       system = InternalDistributedSystem.getAnyInstance();
     }
     if (system == null || !system.isConnected()) {
       // Figure out our distributed system properties
-      Properties p = getAllDistributedSystemProperties(props);
-      lastSystemCreatedInTest = getTestClass();
+      Properties p = DistributedTestUtils.getAllDistributedSystemProperties(props);
+      lastSystemCreatedInTest = getClass(); // used to be getDeclaringClass()
       if (logPerTest) {
-        String testMethod = getTestName();
+        String testMethod = getTestMethodName();
         String testName = lastSystemCreatedInTest.getName() + '-' + testMethod;
         String oldLogFile = p.getProperty(DistributionConfig.LOG_FILE_NAME);
         p.put(DistributionConfig.LOG_FILE_NAME, 
@@ -493,11 +146,11 @@ public abstract class DistributedTestCase extends TestCase implements java.io.Se
       lastSystemProperties = p;
     } else {
       boolean needNewSystem = false;
-      if(!getTestClass().equals(lastSystemCreatedInTest)) {
-        Properties newProps = getAllDistributedSystemProperties(props);
+      if(!getClass().equals(lastSystemCreatedInTest)) { // used to be getDeclaringClass()
+        Properties newProps = DistributedTestUtils.getAllDistributedSystemProperties(props);
         needNewSystem = !newProps.equals(lastSystemProperties);
         if(needNewSystem) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Test class has changed and the new DS properties are not an exact match. "
                   + "Forcing DS disconnect. Old props = "
                   + lastSystemProperties + "new props=" + newProps);
@@ -511,7 +164,7 @@ public abstract class DistributedTestCase extends TestCase implements java.io.Se
           String value = (String) entry.getValue();
           if (!value.equals(activeProps.getProperty(key))) {
             needNewSystem = true;
-            getLogWriter().info("Forcing DS disconnect. For property " + key
+            LogWriterUtils.getLogWriter().info("Forcing DS disconnect. For property " + key
                                 + " old value = " + activeProps.getProperty(key)
                                 + " new value = " + value);
             break;
@@ -521,7 +174,7 @@ public abstract class DistributedTestCase extends TestCase implements java.io.Se
       if(needNewSystem) {
         // the current system does not meet our needs to disconnect and
         // call recursively to get a new system.
-        getLogWriter().info("Disconnecting from current DS in order to make a new one");
+        LogWriterUtils.getLogWriter().info("Disconnecting from current DS in order to make a new one");
         disconnectFromDS();
         getSystem(props);
       }
@@ -529,56 +182,6 @@ public abstract class DistributedTestCase extends TestCase implements java.io.Se
     return system;
   }
 
-
-  /**
-   * Crash the cache in the given VM in such a way that it immediately stops communicating with
-   * peers.  This forces the VM's membership manager to throw a ForcedDisconnectException by
-   * forcibly terminating the JGroups protocol stack with a fake EXIT event.<p>
-   * 
-   * NOTE: if you use this method be sure that you clean up the VM before the end of your
-   * test with disconnectFromDS() or disconnectAllFromDS().
-   */
-  public boolean crashDistributedSystem(VM vm) {
-    return (Boolean)vm.invoke(new SerializableCallable("crash distributed system") {
-      public Object call() throws Exception {
-        DistributedSystem msys = InternalDistributedSystem.getAnyInstance();
-        crashDistributedSystem(msys);
-        return true;
-      }
-    });
-  }
-  
-  /**
-   * Crash the cache in the given VM in such a way that it immediately stops communicating with
-   * peers.  This forces the VM's membership manager to throw a ForcedDisconnectException by
-   * forcibly terminating the JGroups protocol stack with a fake EXIT event.<p>
-   * 
-   * NOTE: if you use this method be sure that you clean up the VM before the end of your
-   * test with disconnectFromDS() or disconnectAllFromDS().
-   */
-  public void crashDistributedSystem(final DistributedSystem msys) {
-    MembershipManagerHelper.crashDistributedSystem(msys);
-    MembershipManagerHelper.inhibitForcedDisconnectLogging(false);
-    WaitCriterion wc = new WaitCriterion() {
-      public boolean done() {
-        return !msys.isConnected();
-      }
-      public String description() {
-        return "waiting for distributed system to finish disconnecting: " + msys;
-      }
-    };
-//    try {
-      waitForCriterion(wc, 10000, 1000, true);
-//    } finally {
-//      dumpMyThreads(getLogWriter());
-//    }
-  }
-
-  private String getDefaultDiskStoreName() {
-    String vmid = System.getProperty("vmid");
-    return "DiskStore-"  + vmid + "-"+ getTestClass().getCanonicalName() + "." + getTestName();
-  }
-
   /**
    * Returns this VM's connection to the distributed system.  If
    * necessary, the connection will be lazily created using the
@@ -590,7 +193,7 @@ public abstract class DistributedTestCase extends TestCase implements java.io.Se
    * @since 3.0
    */
   public final InternalDistributedSystem getSystem() {
-    return getSystem(this.getDistributedSystemProperties());
+    return getSystem(getDistributedSystemProperties());
   }
 
   /**
@@ -600,7 +203,7 @@ public abstract class DistributedTestCase extends TestCase implements java.io.Se
    * @since 6.5
    */
   public final InternalDistributedSystem getLonerSystem() {
-    Properties props = this.getDistributedSystemProperties();
+    Properties props = getDistributedSystemProperties();
     props.put(DistributionConfig.MCAST_PORT_NAME, "0");
     props.put(DistributionConfig.LOCATORS_NAME, "");
     return getSystem(props);
@@ -612,7 +215,7 @@ public abstract class DistributedTestCase extends TestCase implements java.io.Se
    * Added specifically to test scenario of defect #47181.
    */
   public final InternalDistributedSystem getLonerSystemWithEnforceUniqueHost() {
-    Properties props = this.getDistributedSystemProperties();
+    Properties props = getDistributedSystemProperties();
     props.put(DistributionConfig.MCAST_PORT_NAME, "0");
     props.put(DistributionConfig.LOCATORS_NAME, "");
     props.put(DistributionConfig.ENFORCE_UNIQUE_HOST_NAME, "true");
@@ -641,227 +244,16 @@ public abstract class DistributedTestCase extends TestCase implements java.io.Se
     return new Properties();
   }
 
-  /**
-   * Sets up the test (noop).
-   */
-  @Override
-  public void setUp() throws Exception {
-    logTestHistory();
-    setUpCreationStackGenerator();
-    testName = getName();
-    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");
-    
-    if (testName != null) {
-      GemFireCacheImpl.setDefaultDiskStoreName(getDefaultDiskStoreName());
-      String baseDefaultDiskStoreName = getTestClass().getCanonicalName() + "." + getTestName();
-      for (int h = 0; h < Host.getHostCount(); h++) {
-        Host host = Host.getHost(h);
-        for (int v = 0; v < host.getVMCount(); v++) {
-          VM vm = host.getVM(v);
-          String vmDefaultDiskStoreName = "DiskStore-" + h + "-" + v + "-" + baseDefaultDiskStoreName;
-          vm.invoke(DistributedTestCase.class, "perVMSetUp", new Object[] {testName, vmDefaultDiskStoreName});
-        }
-      }
-    }
-    System.out.println("\n\n[setup] START TEST " + getClass().getSimpleName()+"."+testName+"\n\n");
-  }
-
-  /**
-   * Write a message to the log about what tests have ran previously. This
-   * makes it easier to figure out if a previous test may have caused problems
-   */
-  private void logTestHistory() {
-    String classname = getClass().getSimpleName();
-    testHistory.add(classname);
-    System.out.println("Previously run tests: " + testHistory);
-  }
-
-  public static void perVMSetUp(String name, String defaultDiskStoreName) {
-    setTestName(name);
-    GemFireCacheImpl.setDefaultDiskStoreName(defaultDiskStoreName);
-    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");    
-  }
-  public static void setTestName(String name) {
-    testName = name;
-  }
-  
-  public static String getTestName() {
-    return testName;
-  }
-
-  /**
-   * For logPerTest to work, we have to disconnect from the DS, but all
-   * subclasses do not call super.tearDown(). To prevent this scenario
-   * this method has been declared final. Subclasses must now override
-   * {@link #tearDown2()} instead.
-   * @throws Exception
-   */
-  @Override
-  public final void tearDown() throws Exception {
-    tearDownCreationStackGenerator();
-    tearDown2();
-    realTearDown();
-    tearDownAfter();
-  }
-
-  /**
-   * Tears down the test. This method is called by the final {@link #tearDown()} method and should be overridden to
-   * perform actual test cleanup and release resources used by the test.  The tasks executed by this method are
-   * performed before the DUnit test framework using Hydra cleans up the client VMs.
-   * <p/>
-   * @throws Exception if the tear down process and test cleanup fails.
-   * @see #tearDown
-   * @see #tearDownAfter()
-   */
-  // TODO rename this method to tearDownBefore and change the access modifier to protected!
-  public void tearDown2() throws Exception {
-  }
-
-  protected void realTearDown() throws Exception {
-    if (logPerTest) {
-      disconnectFromDS();
-      invokeInEveryVM(DistributedTestCase.class, "disconnectFromDS");
-    }
-    cleanupAllVms();
-  }
-  
-  /**
-   * Tears down the test.  Performs additional tear down tasks after the DUnit tests framework using Hydra cleans up
-   * the client VMs.  This method is called by the final {@link #tearDown()} method and should be overridden to perform
-   * post tear down activities.
-   * <p/>
-   * @throws Exception if the test tear down process fails.
-   * @see #tearDown()
-   * @see #tearDown2()
-   */
-  protected void tearDownAfter() throws Exception {
-  }
-
-  public static void cleanupAllVms()
-  {
-    cleanupThisVM();
-    invokeInEveryVM(DistributedTestCase.class, "cleanupThisVM");
-    invokeInLocator(new SerializableRunnable() {
-      public void run() {
-        DistributionMessageObserver.setInstance(null);
-        unregisterInstantiatorsInThisVM();
-      }
-    });
-    DUnitLauncher.closeAndCheckForSuspects();
-  }
-
-
-  private static void cleanupThisVM() {
-    closeCache();
-    
-    SocketCreator.resolve_dns = true;
-    SocketCreator.resetHostNameCache();
-    CacheCreation.clearThreadLocals();
-    System.getProperties().remove("gemfire.log-level");
-    System.getProperties().remove("jgroups.resolve_dns");
-    InitialImageOperation.slowImageProcessing = 0;
-    DistributionMessageObserver.setInstance(null);
-    QueryTestUtils.setCache(null);
-    CacheServerTestUtil.clearCacheReference();
-    RegionTestCase.preSnapshotRegion = null;
-    GlobalLockingDUnitTest.region_testBug32356 = null;
-    LogWrapper.close();
-    ClientProxyMembershipID.system = null;
-    MultiVMRegionTestCase.CCRegion = null;
-    InternalClientMembership.unregisterAllListeners();
-    ClientStatsManager.cleanupForTests();
-    ClientServerTestCase.AUTO_LOAD_BALANCE = false;
-    unregisterInstantiatorsInThisVM();
-    DistributionMessageObserver.setInstance(null);
-    QueryObserverHolder.reset();
-    DiskStoreObserver.setInstance(null);
-    
-    if (InternalDistributedSystem.systemAttemptingReconnect != null) {
-      InternalDistributedSystem.systemAttemptingReconnect.stopReconnecting();
-    }
-    ExpectedException ex;
-    while((ex = expectedExceptions.poll()) != null) {
-      ex.remove();
-    }
-  }
-
-  private static void closeCache() {
-    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-    if(cache != null && !cache.isClosed()) {
-      destroyRegions(cache);
-      cache.close();
-    }
-  }
-  
-  protected static final void destroyRegions(Cache cache)
-      throws InternalGemFireError, Error, VirtualMachineError {
-    if (cache != null && !cache.isClosed()) {
-      //try to destroy the root regions first so that
-      //we clean up any persistent files.
-      for (Iterator itr = cache.rootRegions().iterator(); itr.hasNext();) {
-        Region root = (Region)itr.next();
-        //for colocated regions you can't locally destroy a partitioned
-        //region.
-        if(root.isDestroyed() || root instanceof HARegion || root instanceof PartitionedRegion) {
-          continue;
-        }
-        try {
-          root.localDestroyRegion("teardown");
-        }
-        catch (VirtualMachineError e) {
-          SystemFailure.initiateFailure(e);
-          throw e;
-        }
-        catch (Throwable t) {
-          getLogWriter().error(t);
-        }
-      }
-    }
-  }
-  
-  
-  public static void unregisterAllDataSerializersFromAllVms()
-  {
-    unregisterDataSerializerInThisVM();
-    invokeInEveryVM(new SerializableRunnable() {
-      public void run() {
-        unregisterDataSerializerInThisVM();
-      }
-    });
-    invokeInLocator(new SerializableRunnable() {
-      public void run() {
-        unregisterDataSerializerInThisVM();
-      }
-    });
-  }
-
-  public static void unregisterInstantiatorsInThisVM() {
-    // unregister all the instantiators
-    InternalInstantiator.reinitialize();
-    assertEquals(0, InternalInstantiator.getInstantiators().length);
-  }
-  
-  public static void unregisterDataSerializerInThisVM()
-  {
-    DataSerializerPropogationDUnitTest.successfullyLoadedTestDataSerializer = false;
-    // unregister all the Dataserializers
-    InternalDataSerializer.reinitialize();
-    // ensure that all are unregistered
-    assertEquals(0, InternalDataSerializer.getSerializers().length);
-  }
-
-
-  protected static void disconnectAllFromDS() {
+  public static void disconnectAllFromDS() {
     disconnectFromDS();
-    invokeInEveryVM(DistributedTestCase.class,
-                    "disconnectFromDS");
+    Invoke.invokeInEveryVM(()->disconnectFromDS());
   }
 
   /**
    * Disconnects this VM from the distributed system
    */
   public static void disconnectFromDS() {
-    testName = null;
+    setTestMethodName(null);
     GemFireCacheImpl.testCacheXml = null;
     if (system != null) {
       system.disconnect();
@@ -875,560 +267,266 @@ public abstract class DistributedTestCase extends TestCase implements java.io.Se
       }
       try {
         ds.disconnect();
-      }
-      catch (Exception e) {
+      } catch (Exception e) {
         // ignore
       }
     }
     
-    {
-      AdminDistributedSystemImpl ads = 
-          AdminDistributedSystemImpl.getConnectedInstance();
-      if (ads != null) {// && ads.isConnected()) {
-        ads.disconnect();
-      }
+    AdminDistributedSystemImpl ads = AdminDistributedSystemImpl.getConnectedInstance();
+    if (ads != null) {// && ads.isConnected()) {
+      ads.disconnect();
     }
   }
 
-  /**
-   * Strip the package off and gives just the class name.
-   * Needed because of Windows file name limits.
-   */
-  private String getShortClassName() {
-    String result = this.getClass().getName();
-    int idx = result.lastIndexOf('.');
-    if (idx != -1) {
-      result = result.substring(idx+1);
-    }
-    return result;
-  }
+  //---------------------------------------------------------------------------
+  // name methods
+  //---------------------------------------------------------------------------
   
-  /** get the host name to use for a server cache in client/server dunit
-   * testing
-   * @param host
-   * @return the host name
-   */
-  public static String getServerHostName(Host host) {
-    return System.getProperty("gemfire.server-bind-address") != null?
-        System.getProperty("gemfire.server-bind-address")
-        : host.getHostName();
+  public static String getTestMethodName() {
+    return testMethodName;
   }
 
-  /** get the IP literal name for the current host, use this instead of  
-   * "localhost" to avoid IPv6 name resolution bugs in the JDK/machine config.
-   * @return an ip literal, this method honors java.net.preferIPvAddresses
-   */
-  public static String getIPLiteral() {
-    try {
-      return SocketCreator.getLocalHost().getHostAddress();
-    } catch (UnknownHostException e) {
-      throw new Error("problem determining host IP address", e);
-    }
+  public static void setTestMethodName(final String testMethodName) { // TODO: delete
+    DistributedTestCase.testMethodName = testMethodName;
   }
- 
- 
-  /**
-   * Get the port that the standard dunit locator is listening on.
-   * @return
-   */
-  public static int getDUnitLocatorPort() {
-    return DUnitEnv.get().getLocatorPort();
-  }
-    
   
   /**
    * Returns a unique name for this test method.  It is based on the
    * name of the class as well as the name of the method.
    */
   public String getUniqueName() {
-    return getShortClassName() + "_" + this.getName();
-  }
-
-  /**
-   * Returns a <code>LogWriter</code> for logging information
-   * @deprecated Use a static logger from the log4j2 LogService.getLogger instead.
-   */
-  @Deprecated
-  public static InternalLogWriter getLogWriter() {
-    return oldLogger;
+    return getClass().getSimpleName() + "_" + getName();
   }
 
+  //---------------------------------------------------------------------------
+  // setup methods
+  //---------------------------------------------------------------------------
+  
   /**
-   * Helper method that causes this test to fail because of the given
-   * exception.
+   * Sets up the DistributedTestCase.
+   * <p>
+   * Do not override this method. Override {@link #preSetUp()} with work that
+   * needs to occur before setUp() or override {@link #postSetUp()} with work
+   * that needs to occur after setUp().
    */
-  public static void fail(String message, Throwable ex) {
-    StringWriter sw = new StringWriter();
-    PrintWriter pw = new PrintWriter(sw, true);
-    pw.print(message);
-    pw.print(": ");
-    ex.printStackTrace(pw);
-    fail(sw.toString());
-  }
-
-  // utility methods
-
-  /** pause for a default interval */
-  protected void pause() {
-    pause(250);
+  @Override
+  public void setUp() throws Exception {
+    preSetUp();
+    setUpDistributedTestCase();
+    postSetUp();
   }
-
+  
   /**
-   * Use of this function indicates a place in the tests tree where t
-   * he use of Thread.sleep() is
-   * highly questionable.
+   * Sets up DistributedTest in controller and remote VMs. This includes the
+   * defining the test name, setting the default disk store name, logging the 
+   * test history, and capturing a creation stack for detecting the source of
+   * incompatible DistributedSystem connections.
    * <p>
-   * Some places in the system, especially those that test expirations and other
-   * timeouts, have a very good reason to call {@link Thread#sleep(long)}.  The
-   * <em>other</em> places are marked by the use of this method.
-   * 
-   * @param ms
+   * Do not override this method.
    */
-  static public final void staticPause(int ms) {
-//    getLogWriter().info("FIXME: Pausing for " + ms + " ms..."/*, new Exception()*/);
-    final long target = System.currentTimeMillis() + ms;
-    try {
-      for (;;) {
-        long msLeft = target - System.currentTimeMillis();
-        if (msLeft <= 0) {
-          break;
-        }
-        Thread.sleep(msLeft);
+  private final void setUpDistributedTestCase() {
+    final String className = getClass().getCanonicalName();
+    final String methodName = getName();
+    
+    logTestHistory();
+    
+    setUpVM(methodName, getDefaultDiskStoreName(0, -1, className, methodName));
+    
+    for (int hostIndex = 0; hostIndex < Host.getHostCount(); hostIndex++) {
+      Host host = Host.getHost(hostIndex);
+      for (int vmIndex = 0; vmIndex < host.getVMCount(); vmIndex++) {
+        final String vmDefaultDiskStoreName = getDefaultDiskStoreName(hostIndex, vmIndex, className, methodName);
+        host.getVM(vmIndex).invoke(()->setUpVM(methodName, vmDefaultDiskStoreName));
       }
     }
-    catch (InterruptedException e) {
-      fail("interrupted", e);
-    }
     
+    logTestStart();
   }
-  
+
   /**
-   * Blocks until the clock used for expiration moves forward.
-   * @return the last time stamp observed
+   * <code>preSetUp()</code> is invoked before {@link #setUpDistributedTestCase()}.
+   * <p>
+   * Override this as needed. Default implementation is empty.
    */
-  public static final long waitForExpiryClockToChange(LocalRegion lr) {
-    return waitForExpiryClockToChange(lr, lr.cacheTimeMillis());
+  protected void preSetUp() throws Exception {
   }
+  
   /**
-   * Blocks until the clock used for expiration moves forward.
-   * @param baseTime the timestamp that the clock must exceed
-   * @return the last time stamp observed
+   * <code>postSetUp()</code> is invoked after {@link #setUpDistributedTestCase()}.
+   * <p>
+   * Override this as needed. Default implementation is empty.
    */
-  public static final long waitForExpiryClockToChange(LocalRegion lr, final long baseTime) {
-    long nowTime;
-    do {
-      Thread.yield();
-      nowTime = lr.cacheTimeMillis();
-    } while ((nowTime - baseTime) <= 0L);
-    return nowTime;
+  protected void postSetUp() throws Exception {
   }
   
-  /** pause for specified ms interval
-   * Make sure system clock has advanced by the specified number of millis before
-   * returning.
-   */
-  public static final void pause(int ms) {
-    LogWriter log = getLogWriter();
-    if (ms >= 1000 || log.fineEnabled()) { // check for fine but log at info
-      getLogWriter().info("Pausing for " + ms + " ms..."/*, new Exception()*/);
-    }
-    final long target = System.currentTimeMillis() + ms;
-    try {
-      for (;;) {
-        long msLeft = target - System.currentTimeMillis();
-        if (msLeft <= 0) {
-          break;
-        }
-        Thread.sleep(msLeft);
-      }
-    }
-    catch (InterruptedException e) {
-      fail("interrupted", e);
-    }
+  private static String getDefaultDiskStoreName(final int hostIndex, final int vmIndex, final String className, final String methodName) {
+    return "DiskStore-" + String.valueOf(hostIndex) + "-" + String.valueOf(vmIndex) + "-" + className + "." + methodName; // used to be getDeclaringClass()
   }
   
-  public interface WaitCriterion {
-    public boolean done();
-    public String description();
-  }
-  
-  public interface WaitCriterion2 extends WaitCriterion {
-    /**
-     * If this method returns true then quit waiting even if we are not done.
-     * This allows a wait to fail early.
-     */
-    public boolean stopWaiting();
+  private static void setUpVM(final String methodName, final String defaultDiskStoreName) {
+    setTestMethodName(methodName);
+    GemFireCacheImpl.setDefaultDiskStoreName(defaultDiskStoreName);
+    System.setProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP, "true");    
+    setUpCreationStackGenerator();
   }
 
-  /**
-   * If true, we randomize the amount of time we wait before polling a
-   * {@link WaitCriterion}.
-   */
-  static private final boolean USE_JITTER = true;
-  static private final Random jitter = new Random();
-  
-  /**
-   * Return a jittered interval up to a maximum of <code>ms</code>
-   * milliseconds, inclusive.
-   * 
-   * The result is bounded by 50 ms as a minimum and 5000 ms as a maximum.
-   * 
-   * @param ms total amount of time to wait
-   * @return randomized interval we should wait
-   */
-  private static int jitterInterval(long ms) {
-    final int minLegal = 50;
-    final int maxLegal = 5000;
-    if (ms <= minLegal) {
-      return (int)ms; // Don't ever jitter anything below this.
-    }
-
-    int maxReturn = maxLegal;
-    if (ms < maxLegal) {
-      maxReturn = (int)ms;
-    }
-
-    return minLegal + jitter.nextInt(maxReturn - minLegal + 1);
+  private void logTestStart() {
+    System.out.println("\n\n[setup] START TEST " + getClass().getSimpleName()+"."+testMethodName+"\n\n");
   }
   
-  /**
-   * Wait until given criterion is met
-   * @param ev criterion to wait on
-   * @param ms total time to wait, in milliseconds
-   * @param interval pause interval between waits
-   * @param throwOnTimeout if false, don't generate an error
-   * @deprecated Use {@link Awaitility} instead.
-   */
-  @Deprecated
-  static public void waitForCriterion(WaitCriterion ev, long ms, 
-      long interval, boolean throwOnTimeout) {
-    long waitThisTime;
-    if (USE_JITTER) {
-      waitThisTime = jitterInterval(interval);
-    }
-    else {
-      waitThisTime = interval;
-    }
-    final long tilt = System.currentTimeMillis() + ms;
-    for (;;) {
-//      getLogWriter().info("Testing to see if event has occurred: " + ev.description());
-      if (ev.done()) {
-        return; // success
-      }
-      if (ev instanceof WaitCriterion2) {
-        WaitCriterion2 ev2 = (WaitCriterion2)ev;
-        if (ev2.stopWaiting()) {
-          if (throwOnTimeout) {
-            fail("stopWaiting returned true: " + ev.description());
+  private static void setUpCreationStackGenerator() {
+    // the following is moved from InternalDistributedSystem to fix #51058
+    InternalDistributedSystem.TEST_CREATION_STACK_GENERATOR.set(
+    new CreationStackGenerator() {
+      @Override
+      public Throwable generateCreationStack(final DistributionConfig config) {
+        final StringBuilder sb = new StringBuilder();
+        final String[] validAttributeNames = config.getAttributeNames();
+        for (int i = 0; i < validAttributeNames.length; i++) {
+          final String attName = validAttributeNames[i];
+          final Object actualAtt = config.getAttributeObject(attName);
+          String actualAttStr = actualAtt.toString();
+          sb.append("  ");
+          sb.append(attName);
+          sb.append("=\"");
+          if (actualAtt.getClass().isArray()) {
+            actualAttStr = InternalDistributedSystem.arrayToString(actualAtt);
           }
-          return;
-        }
-      }
-
-      // Calculate time left
-      long timeLeft = tilt - System.currentTimeMillis();
-      if (timeLeft <= 0) {
-        if (!throwOnTimeout) {
-          return; // not an error, but we're done
+          sb.append(actualAttStr);
+          sb.append("\"");
+          sb.append("\n");
         }
-        fail("Event never occurred after " + ms + " ms: " + ev.description());
-      }
-      
-      if (waitThisTime > timeLeft) {
-        waitThisTime = timeLeft;
-      }
-      
-      // Wait a little bit
-      Thread.yield();
-      try {
-//        getLogWriter().info("waiting " + waitThisTime + "ms for " + ev.description());
-        Thread.sleep(waitThisTime);
-      } catch (InterruptedException e) {
-        fail("interrupted");
+        return new Throwable("Creating distributed system with the following configuration:\n" + sb.toString());
       }
-    }
+    });
   }
-
+  
   /**
-   * Wait on a mutex.  This is done in a loop in order to address the
-   * "spurious wakeup" "feature" in Java.
-   * @param ev condition to test
-   * @param mutex object to lock and wait on
-   * @param ms total amount of time to wait
-   * @param interval interval to pause for the wait
-   * @param throwOnTimeout if false, no error is thrown.
+   * Write a message to the log about what tests have ran previously. This
+   * makes it easier to figure out if a previous test may have caused problems
    */
-  static public void waitMutex(WaitCriterion ev, Object mutex, long ms, 
-      long interval, boolean throwOnTimeout) {
-    final long tilt = System.currentTimeMillis() + ms;
-    long waitThisTime;
-    if (USE_JITTER) {
-      waitThisTime = jitterInterval(interval);
-    }
-    else {
-      waitThisTime = interval;
-    }
-    synchronized (mutex) {
-      for (;;) {
-        if (ev.done()) {
-          break;
-        }
-        
-        long timeLeft = tilt - System.currentTimeMillis();
-        if (timeLeft <= 0) {
-          if (!throwOnTimeout) {
-            return; // not an error, but we're done
-          }
-          fail("Event never occurred after " + ms + " ms: " + ev.description());
-        }
-        
-        if (waitThisTime > timeLeft) {
-          waitThisTime = timeLeft;
-        }
-        
-        try {
-          mutex.wait(waitThisTime);
-        } catch (InterruptedException e) {
-          fail("interrupted");
-        }
-      } // for
-    } // synchronized
+  private void logTestHistory() {
+    String classname = getClass().getSimpleName();
+    testHistory.add(classname);
+    System.out.println("Previously run tests: " + testHistory);
   }
 
+  //---------------------------------------------------------------------------
+  // teardown methods
+  //---------------------------------------------------------------------------
+
   /**
-   * Wait for a thread to join
-   * @param t thread to wait on
-   * @param ms maximum time to wait
-   * @throws AssertionError if the thread does not terminate
+   * Tears down the DistributedTestCase.
+   * <p>
+   * Do not override this method. Override {@link #preTearDown()} with work that
+   * needs to occur before tearDown() or override {@link #postTearDown()} with work
+   * that needs to occur after tearDown().
    */
-  static public void join(Thread t, long ms, LogWriter logger) {
-    final long tilt = System.currentTimeMillis() + ms;
-    final long incrementalWait;
-    if (USE_JITTER) {
-      incrementalWait = jitterInterval(ms);
-    }
-    else {
-      incrementalWait = ms; // wait entire time, no looping.
-    }
-    final long start = System.currentTimeMillis();
-    for (;;) {
-      // I really do *not* understand why this check is necessary
-      // but it is, at least with JDK 1.6.  According to the source code
-      // and the javadocs, one would think that join() would exit immediately
-      // if the thread is dead.  However, I can tell you from experimentation
-      // that this is not the case. :-(  djp 2008-12-08
-      if (!t.isAlive()) {
-        break;
-      }
-      try {
-        t.join(incrementalWait);
-      } catch (InterruptedException e) {
-        fail("interrupted");
-      }
-      if (System.currentTimeMillis() >= tilt) {
-        break;
-      }
-    } // for
-    if (logger == null) {
-      logger = new LocalLogWriter(LogWriterImpl.INFO_LEVEL, System.out);
-    }
-    if (t.isAlive()) {
-      logger.info("HUNG THREAD");
-      dumpStackTrace(t, t.getStackTrace(), logger);
-      dumpMyThreads(logger);
-      t.interrupt(); // We're in trouble!
-      fail("Thread did not terminate after " + ms + " ms: " + t);
-//      getLogWriter().warning("Thread did not terminate" 
-//          /* , new Exception()*/
-//          );
-    }
-    long elapsedMs = (System.currentTimeMillis() - start);
-    if (elapsedMs > 0) {
-      String msg = "Thread " + t + " took " 
-        + elapsedMs
-        + " ms to exit.";
-      logger.info(msg);
-    }
+  @Override
+  public final void tearDown() throws Exception {
+    preTearDown();
+    tearDownDistributedTestCase();
+    postTearDown();
   }
 
-  public static void dumpStackTrace(Thread t, StackTraceElement[] stack, LogWriter logger) {
-    StringBuilder msg = new StringBuilder();
-    msg.append("Thread=<")
-      .append(t)
-      .append("> stackDump:\n");
-    for (int i=0; i < stack.length; i++) {
-      msg.append("\t")
-        .append(stack[i])
-        .append("\n");
+  private final void tearDownDistributedTestCase() throws Exception {
+    Invoke.invokeInEveryVM(()->tearDownCreationStackGenerator());
+    if (logPerTest) {
+      disconnectFromDS();
+      Invoke.invokeInEveryVM(()->disconnectFromDS());
     }
-    logger.info(msg.toString());
+    cleanupAllVms();
   }
+  
   /**
-   * Dump all thread stacks
+   * <code>preTearDown()</code> is invoked before {@link #tearDownDistributedTestCase()}.
+   * <p>
+   * Override this as needed. Default implementation is empty.
    */
-  public static void dumpMyThreads(LogWriter logger) {
-    OSProcess.printStacks(0, false);
+  protected void preTearDown() throws Exception {
   }
   
   /**
-   * A class that represents an currently logged expected exception, which
-   * should be removed
-   * 
-   * @author Mitch Thomas
-   * @since 5.7bugfix
+   * <code>postTearDown()</code> is invoked after {@link #tearDownDistributedTestCase()}.
+   * <p>
+   * Override this as needed. Default implementation is empty.
    */
-  public static class ExpectedException implements Serializable {
-    private static final long serialVersionUID = 1L;
-
-    final String ex;
-
-    final transient VM v;
-
-    public ExpectedException(String exception) {
-      this.ex = exception;
-      this.v = null;
-    }
-
-    ExpectedException(String exception, VM vm) {
-      this.ex = exception;
-      this.v = vm;
-    }
-
-    public String getRemoveString() {
-      return "<ExpectedException action=remove>" + ex + "</ExpectedException>";
-    }
-
-    public String getAddString() {
-      return "<ExpectedException action=add>" + ex + "</ExpectedException>";
-    }
+  protected void postTearDown() throws Exception {
+  }
+  
+  public static void cleanupAllVms() { // TODO: make private
+    tearDownVM();
+    Invoke.invokeInEveryVM(()->tearDownVM());
+    Invoke.invokeInLocator(()->{
+      DistributionMessageObserver.setInstance(null);
+      DistributedTestUtils.unregisterInstantiatorsInThisVM();
+    });
+    DUnitLauncher.closeAndCheckForSuspects();
+  }
 
-    public void remove() {
-      SerializableRunnable removeRunnable = new SerializableRunnable(
-          "removeExpectedExceptions") {
-        public void run() {
-          final String remove = getRemoveString();
-          final InternalDistributedSystem sys = InternalDistributedSystem
-              .getConnectedInstance();
-          if (sys != null) {
-            sys.getLogWriter().info(remove);
-          }
-          try {
-            getLogWriter().info(remove);
-          } catch (Exception noHydraLogger) {
-          }
+  private static void tearDownVM() {
+    closeCache();
 
-          logger.info(remove);
-        }
-      };
+    // keep alphabetized to detect duplicate lines
+    CacheCreation.clearThreadLocals();
+    CacheServerTestUtil.clearCacheReference();
+    ClientProxyMembershipID.system = null;
+    ClientServerTestCase.AUTO_LOAD_BALANCE = false;
+    ClientStatsManager.cleanupForTests();
+    DiskStoreObserver.setInstance(null);
+    DistributedTestUtils.unregisterInstantiatorsInThisVM();
+    DistributionMessageObserver.setInstance(null);
+    GlobalLockingDUnitTest.region_testBug32356 = null;
+    InitialImageOperation.slowImageProcessing = 0;
+    InternalClientMembership.unregisterAllListeners();
+    LogWrapper.close();
+    MultiVMRegionTestCase.CCRegion = null;
+    QueryObserverHolder.reset();
+    QueryTestUtils.setCache(null);
+    RegionTestCase.preSnapshotRegion = null;
+    SocketCreator.resetHostNameCache();
+    SocketCreator.resolve_dns = true;
 
-      if (this.v != null) {
-        v.invoke(removeRunnable);
-      }
-      else {
-        invokeInEveryVM(removeRunnable);
-      }
-      String s = getRemoveString();
-      LogManager.getLogger(LogService.BASE_LOGGER_NAME).info(s);
-      // log it locally
-      final InternalDistributedSystem sys = InternalDistributedSystem
-          .getConnectedInstance();
-      if (sys != null) { // avoid creating a system
-        sys.getLogWriter().info(s);
-      }
-      getLogWriter().info(s);
+    // clear system properties -- keep alphabetized
+    System.clearProperty("gemfire.log-level");
+    System.clearProperty(HoplogConfig.ALLOW_LOCAL_HDFS_PROP);    
+    System.clearProperty("jgroups.resolve_dns");
+    
+    if (InternalDistributedSystem.systemAttemptingReconnect != null) {
+      InternalDistributedSystem.systemAttemptingReconnect.stopReconnecting();
     }
+    
+    IgnoredException.removeAllExpectedExceptions();
   }
 
-  /**
-   * Log in all VMs, in both the test logger and the GemFire logger the
-   * expected exception string to prevent grep logs from complaining. The
-   * expected string is used by the GrepLogs utility and so can contain
-   * regular expression characters.
-   * 
-   * If you do not remove the expected exception, it will be removed at the
-   * end of your test case automatically.
-   * 
-   * @since 5.7bugfix
-   * @param exception
-   *          the exception string to expect
-   * @return an ExpectedException instance for removal
-   */
-  public static ExpectedException addExpectedException(final String exception) {
-    return addExpectedException(exception, null);
-  }
-
-  /**
-   * Log in all VMs, in both the test logger and the GemFire logger the
-   * expected exception string to prevent grep logs from complaining. The
-   * expected string is used by the GrepLogs utility and so can contain
-   * regular expression characters.
-   * 
-   * @since 5.7bugfix
-   * @param exception
-   *          the exception string to expect
-   * @param v
-   *          the VM on which to log the expected exception or null for all VMs
-   * @return an ExpectedException instance for removal purposes
-   */
-  public static ExpectedException addExpectedException(final String exception,
-      VM v) {
-    final ExpectedException ret;
-    if (v != null) {
-      ret = new ExpectedException(exception, v);
-    }
-    else {
-      ret = new ExpectedException(exception);
+  private static void closeCache() {
+    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
+    if (cache != null && !cache.isClosed()) {
+      destroyRegions(cache);
+      cache.close();
     }
-    // define the add and remove expected exceptions
-    final String add = ret.getAddString();
-    SerializableRunnable addRunnable = new SerializableRunnable(
-        "addExpectedExceptions") {
-      public void run() {
-        final InternalDistributedSystem sys = InternalDistributedSystem
-            .getConnectedInstance();
-        if (sys != null) {
-          sys.getLogWriter().info(add);
+  }
+  
+  protected static final void destroyRegions(final Cache cache) { // TODO: make private
+    if (cache != null && !cache.isClosed()) {
+      // try to destroy the root regions first so that we clean up any persistent files.
+      for (Iterator itr = cache.rootRegions().iterator(); itr.hasNext();) {
+        Region root = (Region)itr.next();
+        String regionFullPath = root == null ? null : root.getFullPath();
+        // for colocated regions you can't locally destroy a partitioned region.
+        if(root.isDestroyed() || root instanceof HARegion || root instanceof PartitionedRegion) {
+          continue;
         }
         try {
-          getLogWriter().info(add);
-        } catch (Exception noHydraLogger) {
+          root.localDestroyRegion("teardown");
+        } catch (Throwable t) {
+          logger.error("Failure during tearDown destroyRegions for " + regionFullPath, t);
         }
- 
-        logger.info(add);
-      }
-    };
-    if (v != null) {
-      v.invoke(addRunnable);
-    }
-    else {
-      invokeInEveryVM(addRunnable);
-    }
-    
-    LogManager.getLogger(LogService.BASE_LOGGER_NAME).info(add);
-    // Log it locally too
-    final InternalDistributedSystem sys = InternalDistributedSystem
-        .getConnectedInstance();
-    if (sys != null) { // avoid creating a cache
-      sys.getLogWriter().info(add);
-    }
-    getLogWriter().info(add);
-    expectedExceptions.add(ret);
-    return ret;
-  }
-
-  /** 
-   * delete locator state files.  Use this after getting a random port
-   * to ensure that an old locator state file isn't picked up by the
-   * new locator you're starting.
-   * @param ports
-   */
-  public void deleteLocatorStateFile(int... ports) {
-    for (int i=0; i<ports.length; i++) {
-      File stateFile = new File("locator"+ports[i]+"view.dat");
-      if (stateFile.exists()) {
-        stateFile.delete();
       }
     }
   }
   
+  private static void tearDownCreationStackGenerator() {
+    InternalDistributedSystem.TEST_CREATION_STACK_GENERATOR.set(InternalDistributedSystem.DEFAULT_CREATION_STACK_GENERATOR);
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DistributedTestUtils.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DistributedTestUtils.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DistributedTestUtils.java
new file mode 100755
index 0000000..a040a32
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/DistributedTestUtils.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.File;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+
+import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManagerHelper;
+import com.gemstone.gemfire.internal.InternalDataSerializer;
+import com.gemstone.gemfire.internal.InternalInstantiator;
+
+/**
+ * <code>DistributedTestUtils</code> provides static utility methods that 
+ * affect the runtime environment or artifacts generated by a DistributedTest.
+ * 
+ * These methods can be used directly: <code>DistributedTestUtils.crashDistributedSystem(...)</code>, 
+ * however, they are intended to be referenced through static import:
+ *
+ * <pre>
+ * import static com.gemstone.gemfire.test.dunit.DistributedTestUtils.*;
+ *    ...
+ *    crashDistributedSystem(...);
+ * </pre>
+ *
+ * Extracted from DistributedTestCase.
+ */
+public class DistributedTestUtils {
+
+  protected DistributedTestUtils() {
+  }
+
+  /**
+   * Fetches the GemFireDescription for this test and adds its 
+   * DistributedSystem properties to the provided props parameter.
+   * 
+   * @param properties the properties to add hydra's test properties to
+   */
+  public static void addHydraProperties(final Properties properties) {
+    Properties dsProperties = DUnitEnv.get().getDistributedSystemProperties();
+    for (Iterator<Map.Entry<Object, Object>> iter = dsProperties.entrySet().iterator(); iter.hasNext();) {
+      Map.Entry<Object, Object> entry = iter.next();
+      String key = (String) entry.getKey();
+      String value = (String) entry.getValue();
+      if (properties.getProperty(key) == null) {
+        properties.setProperty(key, value);
+      }
+    }
+  }
+
+  /**
+   * Crash the cache in the given VM in such a way that it immediately stops communicating with
+   * peers.  This forces the VM's membership manager to throw a ForcedDisconnectException by
+   * forcibly terminating the JGroups protocol stack with a fake EXIT event.<p>
+   * 
+   * NOTE: if you use this method be sure that you clean up the VM before the end of your
+   * test with disconnectFromDS() or disconnectAllFromDS().
+   */
+  public static void crashDistributedSystem(final DistributedSystem system) {
+    MembershipManagerHelper.crashDistributedSystem(system);
+    MembershipManagerHelper.inhibitForcedDisconnectLogging(false);
+    WaitCriterion wc = new WaitCriterion() {
+      public boolean done() {
+        return !system.isConnected();
+      }
+      public String description() {
+        return "Waiting for distributed system to finish disconnecting: " + system;
+      }
+    };
+    Wait.waitForCriterion(wc, 10000, 1000, true);
+  }
+
+  /**
+   * Crash the cache in the given VM in such a way that it immediately stops communicating with
+   * peers.  This forces the VM's membership manager to throw a ForcedDisconnectException by
+   * forcibly terminating the JGroups protocol stack with a fake EXIT event.<p>
+   * 
+   * NOTE: if you use this method be sure that you clean up the VM before the end of your
+   * test with disconnectFromDS() or disconnectAllFromDS().
+   */
+  public static boolean crashDistributedSystem(final VM vm) {
+    return vm.invoke(()->{
+        DistributedSystem system = InternalDistributedSystem.getAnyInstance();
+        crashDistributedSystem(system);
+        return true;
+      }
+    );
+  }
+  
+  /** 
+   * Delete locator state files.  Use this after getting a random port
+   * to ensure that an old locator state file isn't picked up by the
+   * new locator you're starting.
+   */
+  public static void deleteLocatorStateFile(final int... ports) {
+    for (int index = 0; index < ports.length; index++) {
+      File stateFile = new File("locator"+ports[index]+"view.dat");
+      if (stateFile.exists()) {
+        stateFile.delete();
+      }
+    }
+  }
+  
+  public final static Properties getAllDistributedSystemProperties(final Properties properties) {
+    Properties dsProperties = DUnitEnv.get().getDistributedSystemProperties();
+    
+    // our tests do not expect auto-reconnect to be on by default
+    if (!dsProperties.contains(DistributionConfig.DISABLE_AUTO_RECONNECT_NAME)) {
+      dsProperties.put(DistributionConfig.DISABLE_AUTO_RECONNECT_NAME, "true");
+    }
+  
+    for (Iterator<Map.Entry<Object,Object>> iterator = properties.entrySet().iterator(); iterator.hasNext();) {
+      Map.Entry<Object,Object> entry = iterator.next();
+      String key = (String) entry.getKey();
+      Object value = entry.getValue();
+      dsProperties.put(key, value);
+    }
+    return dsProperties;
+  }
+  
+  /**
+   * Get the port that the standard dunit locator is listening on.
+   */
+  public static int getDUnitLocatorPort() {
+    return DUnitEnv.get().getLocatorPort();
+  }
+
+  public static void unregisterAllDataSerializersFromAllVms() {
+    DistributedTestUtils.unregisterDataSerializerInThisVM();
+    Invoke.invokeInEveryVM(()->unregisterDataSerializerInThisVM());
+    Invoke.invokeInLocator(()->unregisterDataSerializerInThisVM());
+  }
+
+  public static void unregisterDataSerializerInThisVM() {
+    // TODO:KIRK: delete DataSerializerPropogationDUnitTest.successfullyLoadedTestDataSerializer = false;
+    // unregister all the Dataserializers
+    InternalDataSerializer.reinitialize();
+    // ensure that all are unregistered
+    assertEquals(0, InternalDataSerializer.getSerializers().length);
+  }
+
+  public static void unregisterInstantiatorsInThisVM() {
+    // unregister all the instantiators
+    InternalInstantiator.reinitialize();
+    assertEquals(0, InternalInstantiator.getInstantiators().length);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Host.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Host.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Host.java
index 4ec6165..95d6f0d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Host.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Host.java
@@ -16,6 +16,7 @@
  */
 package com.gemstone.gemfire.test.dunit;
 
+import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -33,9 +34,9 @@ import com.gemstone.gemfire.test.dunit.standalone.RemoteDUnitVMIF;
  * started on other hosts via additional Hydra configuration.</P>
  *
  * @author David Whitlock
- *
  */
-public abstract class Host implements java.io.Serializable {
+@SuppressWarnings("serial")
+public abstract class Host implements Serializable {
 
   /** The available hosts */
   protected static List hosts = new ArrayList();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/IgnoredException.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/IgnoredException.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/IgnoredException.java
new file mode 100755
index 0000000..d0cead6
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/IgnoredException.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+import java.io.Serializable;
+import java.util.concurrent.ConcurrentLinkedQueue;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.internal.logging.LogService;
+
+/**
+ * <code>IgnoredException</code> provides static utility methods that
+ * will log messages to add or remove <code>IgnoredException</code>s.
+ * Each <code>IgnoredException</code> allows you to specify a suspect string
+ * that will be ignored by the <code>GrepLogs</code> utility which is run 
+ * after each <code>DistributedTest</code> test method.
+ * 
+ * These methods can be used directly: 
+ * <code>IgnoredException.addIgnoredException(...)</code>, 
+ * however, they are intended to be referenced through static import:
+ *
+ * <pre>
+ * import static com.gemstone.gemfire.test.dunit.IgnoredException.*;
+ *    ...
+ *    addIgnoredException(...);
+ * </pre>
+ *
+ * A test should use <code>addIgnoredException(...)</code> before executing
+ * the code that will potentially log the suspect string. The test should
+ * then <code>remove()</code> the <code>IgnoredException</code> immediately
+ * after. Note that <code>DistributedTestCase.tearDown()</code> will 
+ * automatically remove all current <code>IgnoredException</code>s by
+ * invoking <code>removeAllIgnoredExceptions</code>.
+ *  
+ * A suspect string is typically an Exception class and/or message string.
+ *
+ * The <code>GrepLogs</code> utility is part of Hydra which is not included
+ * in Apache Geode. The Hydra class which consumes logs and reports suspect
+ * strings is <code>batterytest.greplogs.GrepLogs</code>.
+ * 
+ * Extracted from DistributedTestCase.
+ * 
+ * @author Mitch Thomas
+ * @since 5.7bugfix
+ */
+@SuppressWarnings("serial")
+public class IgnoredException implements Serializable {
+  
+  private static final Logger logger = LogService.getLogger();
+
+  private final String suspectString;
+
+  private final transient VM vm;
+  
+  private static ConcurrentLinkedQueue<IgnoredException> ignoredExceptions = new ConcurrentLinkedQueue<IgnoredException>();
+
+  public IgnoredException(final String suspectString) {
+    this.suspectString = suspectString;
+    this.vm = null;
+  }
+
+  IgnoredException(final String suspectString, final VM vm) {
+    this.suspectString = suspectString;
+    this.vm = vm;
+  }
+
+  String suspectString() {
+    return this.suspectString;
+  }
+  
+  VM vm() {
+    return this.vm;
+  }
+  
+  public String getRemoveMessage() {
+    return "<ExpectedException action=remove>" + this.suspectString + "</ExpectedException>";
+  }
+
+  public String getAddMessage() {
+    return "<ExpectedException action=add>" + this.suspectString + "</ExpectedException>";
+  }
+
+  public void remove() {
+    final String removeMessage = getRemoveMessage();
+    
+    @SuppressWarnings("serial")
+    SerializableRunnable removeRunnable = new SerializableRunnable(IgnoredException.class.getSimpleName()+" remove") {
+      public void run() {
+        // TODO: delete use of system.getLogWriter
+        DistributedSystem system = InternalDistributedSystem.getConnectedInstance();
+        if (system != null) {
+          system.getLogWriter().info(removeMessage);
+        }
+        
+        // TODO: delete use of LogWriterUtils
+        try {
+          LogWriterUtils.getLogWriter().info(removeMessage);
+        } catch (Exception noHydraLogger) {
+        }
+
+        logger.info(removeMessage);
+      }
+    };
+
+    removeRunnable.run();
+    
+    if (this.vm != null) {
+      vm.invoke(removeRunnable);
+    } else {
+      Invoke.invokeInEveryVM(removeRunnable);
+    }
+  }
+
+  public static void removeAllExpectedExceptions() {
+    IgnoredException ignoredException;
+    while ((ignoredException = ignoredExceptions.poll()) != null) {
+      ignoredException.remove();
+    }
+  }
+
+  /**
+   * Log in all VMs, in both the test logger and the GemFire logger the
+   * ignored exception string to prevent grep logs from complaining. The
+   * suspect string is used by the GrepLogs utility and so can contain
+   * regular expression characters.
+   * 
+   * @since 5.7bugfix
+   * @param suspectString the exception string to expect
+   * @param vm the VM on which to log the expected exception or null for all VMs
+   * @return an IgnoredException instance for removal purposes
+   */
+  public static IgnoredException addIgnoredException(final String suspectString, final VM vm) {
+    final IgnoredException ignoredException = new IgnoredException(suspectString, vm);
+    final String addMessage = ignoredException.getAddMessage();
+    
+    @SuppressWarnings("serial")
+    SerializableRunnable addRunnable = new SerializableRunnable(IgnoredException.class.getSimpleName()+" addIgnoredException") {
+      public void run() {
+        // TODO: delete use of system.getLogWriter
+        DistributedSystem system = InternalDistributedSystem.getConnectedInstance();
+        if (system != null) {
+          system.getLogWriter().info(addMessage);
+        }
+        
+        // TODO: delete use of LogWriterUtils
+        try {
+          LogWriterUtils.getLogWriter().info(addMessage);
+        } catch (Exception noHydraLogger) {
+        }
+  
+        logger.info(addMessage);
+      }
+    };
+    
+    addRunnable.run();
+    
+    if (vm != null) {
+      vm.invoke(addRunnable);
+    } else {
+      Invoke.invokeInEveryVM(addRunnable);
+    }
+    
+    ignoredExceptions.add(ignoredException);
+    return ignoredException;
+  }
+
+  /**
+   * Log in all VMs, in both the test logger and the GemFire logger the
+   * ignored exception string to prevent grep logs from complaining. The
+   * suspect string is used by the GrepLogs utility and so can contain
+   * regular expression characters.
+   * 
+   * If you do not remove the ignored exception, it will be removed at the
+   * end of your test case automatically.
+   * 
+   * @since 5.7bugfix
+   * @param suspectString the exception string to expect
+   * @return an IgnoredException instance for removal
+   */
+  public static IgnoredException addIgnoredException(final String suspectString) {
+    return addIgnoredException(suspectString, null);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Invoke.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Invoke.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Invoke.java
new file mode 100755
index 0000000..5a4ca15
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Invoke.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * <code>Invoke</code> provides static utility methods that allow a
+ * <code>DistributedTest</code> to invoke a <code>SerializableRunnable</code>
+ * or <code>SerializableCallable</code> in a remote test <code>VM</code>.
+ * 
+ * These methods can be used directly: <code>Invoke.invokeInEveryVM(...)</code>, 
+ * however, they are intended to be referenced through static import:
+ *
+ * <pre>
+ * import static com.gemstone.gemfire.test.dunit.Invoke.*;
+ *    ...
+ *    invokeInEveryVM(...);
+ * </pre>
+ *
+ * Extracted from DistributedTestCase.
+ */
+public class Invoke {
+
+  protected Invoke() {
+  }
+  
+  /**
+   * Invokes a <code>SerializableRunnable</code> in every VM that
+   * DUnit knows about.
+   * <p>
+   * Note: this does NOT include the controller VM or locator VM.
+   *
+   * @see VM#invoke(SerializableRunnableIF)
+   */
+  public static void invokeInEveryVM(final SerializableRunnableIF runnable) {
+    for (int hostIndex = 0; hostIndex < Host.getHostCount(); hostIndex++) {
+      Host host = Host.getHost(hostIndex);
+  
+      for (int vmIndex = 0; vmIndex < host.getVMCount(); vmIndex++) {
+        VM vm = host.getVM(vmIndex);
+        vm.invoke(runnable);
+      }
+    }
+  }
+
+  /**
+   * Invokes a method in every remote VM that DUnit knows about.
+   *
+   * @see VM#invoke(Class, String)
+   * @deprecated Please use {@link #invokeInEveryVM(SerializableRunnableIF)} or another non-deprecated method in <code>Invoke</code> instead.
+   */
+  @Deprecated
+  public static void invokeInEveryVM(final Class<?> targetClass, final String targetMethod) {
+    for (int hostIndex = 0; hostIndex < Host.getHostCount(); hostIndex++) {
+      Host host = Host.getHost(hostIndex);
+  
+      for (int vmIndex = 0; vmIndex < host.getVMCount(); vmIndex++) {
+        VM vm = host.getVM(vmIndex);
+        vm.invoke(targetClass, targetMethod);
+      }
+    }
+  }
+
+  /**
+   * Invokes a method in every remote VM that DUnit knows about.
+   *
+   * @see VM#invoke(Class, String)
+   * @deprecated Please use {@link #invokeInEveryVM(SerializableRunnableIF)} or another non-deprecated method in <code>Invoke</code> instead.
+   */
+  public static void invokeInEveryVM(final Class<?> targetClass, final String targetMethod, final Object[] methodArgs) {
+    for (int hostIndex = 0; hostIndex < Host.getHostCount(); hostIndex++) {
+      Host host = Host.getHost(hostIndex);
+  
+      for (int vmIndex = 0; vmIndex < host.getVMCount(); vmIndex++) {
+        VM vm = host.getVM(vmIndex);
+        vm.invoke(targetClass, targetMethod, methodArgs);
+      }
+    }
+  }
+
+  /**
+   * Invokes a <code>SerializableCallable</code> in every VM that
+   * DUnit knows about.
+   *
+   * @return a Map of results, where the key is the VM and the value is the result for that VM
+   * @see VM#invoke(SerializableCallableIF)
+   */
+  public static <T> Map<VM, T> invokeInEveryVM(final SerializableCallableIF<T> callable) {
+    Map<VM, T> ret = new HashMap<VM, T>();
+    for (int h = 0; h < Host.getHostCount(); h++) {
+      Host host = Host.getHost(h);
+      for (int v = 0; v < host.getVMCount(); v++) {
+        VM vm = host.getVM(v);
+        ret.put(vm, vm.invoke(callable));
+      }
+    }
+    return ret;
+  }
+
+  public static void invokeInLocator(final SerializableRunnableIF runnable) {
+    Host.getLocator().invoke(runnable);
+  }
+
+  /**
+   * @deprecated Please use {@link com.jayway.awaitility.Awaitility} with {@link #invokeInEveryVM(SerializableCallableIF)} instead.
+   */
+  public static void invokeRepeatingIfNecessary(final VM vm, final RepeatableRunnable runnable) {
+    vm.invokeRepeatingIfNecessary(runnable, 0);
+  }
+
+  /**
+   * @deprecated Please use {@link com.jayway.awaitility.Awaitility} with {@link #invokeInEveryVM(SerializableCallableIF)} instead.
+   */
+  public static void invokeRepeatingIfNecessary(final VM vm, final RepeatableRunnable runnable, final long repeatTimeoutMs) {
+    vm.invokeRepeatingIfNecessary(runnable, repeatTimeoutMs);
+  }
+
+  /**
+   * @deprecated Please use {@link com.jayway.awaitility.Awaitility} with {@link #invokeInEveryVM(SerializableCallableIF)} instead.
+   */
+  public static void invokeInEveryVMRepeatingIfNecessary(final RepeatableRunnable runnable) {
+    Invoke.invokeInEveryVMRepeatingIfNecessary(runnable, 0);
+  }
+
+  /**
+   * Invokes a <code>SerializableRunnable</code> in every VM that
+   * DUnit knows about.  If <code>run()</code> throws an assertion failure, 
+   * its execution is repeated, until no assertion failure occurs or
+   * <code>repeatTimeoutMs</code> milliseconds have passed.
+   * 
+   * @see VM#invoke(RepeatableRunnable)
+   * @deprecated Please use {@link com.jayway.awaitility.Awaitility} with {@link #invokeInEveryVM(SerializableCallableIF)} instead.
+   */
+  public static void invokeInEveryVMRepeatingIfNecessary(final RepeatableRunnable runnable, final long repeatTimeoutMs) {
+    for (int h = 0; h < Host.getHostCount(); h++) {
+      Host host = Host.getHost(h);
+  
+      for (int v = 0; v < host.getVMCount(); v++) {
+        VM vm = host.getVM(v);
+        vm.invokeRepeatingIfNecessary(runnable, repeatTimeoutMs);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Jitter.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Jitter.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Jitter.java
new file mode 100755
index 0000000..08fe71a
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Jitter.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+import java.util.Random;
+
+/**
+ * Extracted from DistributedTestCase
+ */
+class Jitter {
+
+  /**
+   * If true, we randomize the amount of time we wait before polling a
+   * {@link WaitCriterion}.
+   */
+  private static final boolean USE_JITTER = true;
+  
+  private static final Random jitter = new Random();
+
+  protected Jitter() {
+  }
+
+  /**
+   * Returns an adjusted interval from <code>minimum()</code to 
+   * <code>intervalMillis</code> milliseconds. If jittering is disabled then 
+   * the value returned will be equal to intervalMillis.
+   * 
+   * @param intervalMillis
+   * @return adjust milliseconds to use as interval for WaitCriteria polling
+   */
+  static long jitterInterval(long intervalMillis) {
+    if (USE_JITTER) {
+      return adjustIntervalIfJitterIsEnabled(intervalMillis);
+    } else {
+      return intervalMillis;
+    }
+  }
+  
+  static int minimum() {
+    return 10;
+  }
+  
+  static int maximum() {
+    return 5000;
+  }
+  
+  /**
+   * If jittering is enabled then returns a jittered interval up to a maximum
+   * of <code>intervalMillis</code> milliseconds, inclusive.
+   * 
+   * If jittering is disabled then returns <code>intervalMillis</code>.
+   * 
+   * The result is bounded by 50 ms as a minimum and 5000 ms as a maximum.
+   * 
+   * @param ms total amount of time to wait
+   * @return randomized interval we should wait
+   */
+  private static int adjustIntervalIfJitterIsEnabled(final long intervalMillis) {
+    final int minLegal = minimum();
+    final int maxLegal = maximum();
+    
+    if (intervalMillis <= minLegal) {
+      return (int)intervalMillis; // Don't ever jitter anything below this.
+    }
+
+    int maxValue = maxLegal;
+    if (intervalMillis < maxLegal) {
+      maxValue = (int)intervalMillis;
+    }
+
+    return minLegal + jitter.nextInt(maxValue - minLegal + 1);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/LogWriterUtils.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/LogWriterUtils.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/LogWriterUtils.java
new file mode 100755
index 0000000..9556af2
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/LogWriterUtils.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+import java.util.Properties;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.LogWriter;
+import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.distributed.internal.DistributionConfigImpl;
+import com.gemstone.gemfire.internal.logging.InternalLogWriter;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.internal.logging.LogWriterFactory;
+import com.gemstone.gemfire.internal.logging.ManagerLogWriter;
+import com.gemstone.gemfire.internal.logging.log4j.LogWriterLogger;
+
+/**
+ * <code>LogWriterUtils</code> provides static utility methods to access a
+ * <code>LogWriter</code> within a test. 
+ * 
+ * These methods can be used directly: <code>LogWriterUtils.getLogWriter(...)</code>, 
+ * however, they are intended to be referenced through static import:
+ *
+ * <pre>
+ * import static com.gemstone.gemfire.test.dunit.LogWriterUtils.*;
+ *    ...
+ *    LogWriter logWriter = getLogWriter(...);
+ * </pre>
+ *
+ * Extracted from DistributedTestCase.
+ * 
+ * @deprecated Please use a <code>Logger</code> from  {@link LogService#getLogger()} instead.
+ */
+@Deprecated
+public class LogWriterUtils {
+
+  private static final Logger logger = LogService.getLogger();
+  private static final LogWriterLogger oldLogger = LogWriterLogger.create(logger);
+  
+  protected LogWriterUtils() {
+  }
+  
+  /**
+   * Returns a <code>LogWriter</code> for logging information
+   * 
+   * @deprecated Please use a <code>Logger</code> from  {@link LogService#getLogger()} instead.
+   */
+  public static InternalLogWriter getLogWriter() {
+    return LogWriterUtils.oldLogger;
+  }
+
+  /**
+   * Creates a new LogWriter and adds it to the config properties. The config
+   * can then be used to connect to DistributedSystem, thus providing early
+   * access to the LogWriter before connecting. This call does not connect
+   * to the DistributedSystem. It simply creates and returns the LogWriter
+   * that will eventually be used by the DistributedSystem that connects using
+   * config.
+   * 
+   * @param properties the DistributedSystem config properties to add LogWriter to
+   * @return early access to the DistributedSystem LogWriter
+   * @deprecated Please use a <code>Logger</code> from  {@link LogService#getLogger()} instead.
+   */
+  public static LogWriter createLogWriter(final Properties properties) {
+    Properties nonDefault = properties;
+    if (nonDefault == null) {
+      nonDefault = new Properties();
+    }
+    DistributedTestUtils.addHydraProperties(nonDefault);
+    
+    DistributionConfig dc = new DistributionConfigImpl(nonDefault);
+    LogWriter logger = LogWriterFactory.createLogWriterLogger(
+        false/*isLoner*/, false/*isSecurityLog*/, dc, 
+        false);        
+    
+    // if config was non-null, then these will be added to it...
+    nonDefault.put(DistributionConfig.LOG_WRITER_NAME, logger);
+    
+    return logger;
+  }
+
+  /**
+   * This finds the log level configured for the test run.  It should be used
+   * when creating a new distributed system if you want to specify a log level.
+   * 
+   * @return the dunit log-level setting
+   */
+  public static String getDUnitLogLevel() {
+    Properties dsProperties = DUnitEnv.get().getDistributedSystemProperties();
+    String result = dsProperties.getProperty(DistributionConfig.LOG_LEVEL_NAME);
+    if (result == null) {
+      result = ManagerLogWriter.levelToString(DistributionConfig.DEFAULT_LOG_LEVEL);
+    }
+    return result;
+  }
+}



[04/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ClientCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ClientCommandsDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ClientCommandsDUnitTest.java
index d1d9559..c77ab2b 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ClientCommandsDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ClientCommandsDUnitTest.java
@@ -63,11 +63,14 @@ import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData;
 import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData.SectionResultData;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
 
 
@@ -140,7 +143,7 @@ public void waitForListClientMbean(){
             return "waitForListClientMbean Probing ...";
           }
         };
-        DistributedTestCase.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
+        Wait.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
       }
     }); 
     
@@ -189,7 +192,7 @@ public void waitForListClientMbean2(){
           return "waitForListClientMbean2 Probing ...";
         }
       };
-      DistributedTestCase.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
+      Wait.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
     }
   }); 
   
@@ -249,7 +252,7 @@ public void waitForListClientMbean2(){
             return "waitForMbean Probing for ";
           }
         };
-        DistributedTestCase.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
+        Wait.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
       }
     }); 
     
@@ -301,7 +304,7 @@ public void waitForListClientMbean2(){
             return "waitForListClientMbean3 Probing ...";
           }
         };
-        DistributedTestCase.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
+        Wait.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
       }
     }); 
     
@@ -981,7 +984,7 @@ public void verifyClientStats(CommandResult commandResultForClient, String serve
           getSystem(props);
           
           final ClientCacheFactory ccf = new ClientCacheFactory(props);
-          ccf.addPoolServer(getServerHostName(server.getHost()), port);
+          ccf.addPoolServer(NetworkUtils.getServerHostName(server.getHost()), port);
           ccf.setPoolSubscriptionEnabled(true);
           ccf.setPoolPingInterval(1);
           ccf.setPoolStatisticInterval(1);
@@ -1001,7 +1004,7 @@ public void verifyClientStats(CommandResult commandResultForClient, String serve
         }else{
           String poolName = "new_pool_" + System.currentTimeMillis();
           try{                      
-            PoolImpl p = (PoolImpl) PoolManager.createFactory().addServer(getServerHostName(server.getHost()), port)
+            PoolImpl p = (PoolImpl) PoolManager.createFactory().addServer(NetworkUtils.getServerHostName(server.getHost()), port)
               .setThreadLocalConnections(true)
               .setMinConnections(1)
               .setSubscriptionEnabled(true)
@@ -1054,7 +1057,7 @@ public void verifyClientStats(CommandResult commandResultForClient, String serve
 
   protected Properties getServerProperties() {
     Properties p = new Properties();
-    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+getDUnitLocatorPort()+"]");
+    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     return p;
   }
   
@@ -1098,7 +1101,7 @@ public void waitForNonSubCliMBean(){
             return "waitForNonSubScribedClientMBean Probing for ";
           }
         };
-        DistributedTestCase.waitForCriterion(waitForMaangerMBean, 5* 60 * 1000, 2000, true);
+        Wait.waitForCriterion(waitForMaangerMBean, 5* 60 * 1000, 2000, true);
       }
     }); 
     
@@ -1146,7 +1149,7 @@ public void waitForMixedClients(){
             return "waitForMixedClients Probing for ";
           }
         };
-        DistributedTestCase.waitForCriterion(waitForMaangerMBean, 5* 60 * 1000, 2000, true);
+        Wait.waitForCriterion(waitForMaangerMBean, 5* 60 * 1000, 2000, true);
       }
     }); 
     
@@ -1394,7 +1397,7 @@ private void setUpNonSubscribedClient() throws Exception {
           getSystem(props);
           
           final ClientCacheFactory ccf = new ClientCacheFactory(props);
-          ccf.addPoolServer(getServerHostName(server.getHost()), port);
+          ccf.addPoolServer(NetworkUtils.getServerHostName(server.getHost()), port);
           ccf.setPoolSubscriptionEnabled(false);
           ccf.setPoolPingInterval(1);
           ccf.setPoolStatisticInterval(1);
@@ -1414,7 +1417,7 @@ private void setUpNonSubscribedClient() throws Exception {
         }else{
           String poolName = "new_pool_" + System.currentTimeMillis();
           try{                      
-            PoolImpl p = (PoolImpl) PoolManager.createFactory().addServer(getServerHostName(server.getHost()), port)
+            PoolImpl p = (PoolImpl) PoolManager.createFactory().addServer(NetworkUtils.getServerHostName(server.getHost()), port)
               .setThreadLocalConnections(true)
               .setMinConnections(1)
               .setSubscriptionEnabled(false)
@@ -1433,8 +1436,8 @@ private void setUpNonSubscribedClient() throws Exception {
     });
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     Host.getHost(0).getVM(0).invoke(CacheServerTestUtil.class, "closeCache");
     Host.getHost(0).getVM(1).invoke(CacheServerTestUtil.class, "closeCache");
     Host.getHost(0).getVM(2).invoke(CacheServerTestUtil.class, "closeCache");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DurableClientCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DurableClientCommandsDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DurableClientCommandsDUnitTest.java
index 89c7d5e..d8e65d9 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DurableClientCommandsDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DurableClientCommandsDUnitTest.java
@@ -44,7 +44,10 @@ import com.gemstone.gemfire.management.cli.Result.Status;
 import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -247,7 +250,7 @@ public class DurableClientCommandsDUnitTest extends CliCommandTestBase {
   }
   
   private void writeToLog(String text, String resultAsString) {
-    getLogWriter().info(getUniqueName() + ": " + text + "\n" + resultAsString);
+    LogWriterUtils.getLogWriter().info(getUniqueName() + ": " + text + "\n" + resultAsString);
   }
   
   private void setupSystem() throws Exception {
@@ -357,7 +360,7 @@ public class DurableClientCommandsDUnitTest extends CliCommandTestBase {
         getSystem(props);
         
         final ClientCacheFactory ccf = new ClientCacheFactory(props);
-        ccf.addPoolServer(getServerHostName(server.getHost()), port);
+        ccf.addPoolServer(NetworkUtils.getServerHostName(server.getHost()), port);
         ccf.setPoolSubscriptionEnabled(true);
         
         ClientCache cache = (ClientCache)getClientCache(ccf);
@@ -420,12 +423,12 @@ public class DurableClientCommandsDUnitTest extends CliCommandTestBase {
 
   protected Properties getServerProperties() {
     Properties p = new Properties();
-    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+getDUnitLocatorPort()+"]");
+    p.setProperty(DistributionConfig.LOCATORS_NAME, "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     return p;
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     Host.getHost(0).getVM(0).invoke(CacheServerTestUtil.class, "closeCache");
     Host.getHost(0).getVM(1).invoke(CacheServerTestUtil.class, "closeCache");
     Host.getHost(0).getVM(2).invoke(CacheServerTestUtil.class, "closeCache");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestCQDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestCQDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestCQDUnitTest.java
index 131cbcd..4ba48df 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestCQDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestCQDUnitTest.java
@@ -21,7 +21,11 @@ import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.management.DistributedSystemMXBean;
 import com.gemstone.gemfire.management.ManagementService;
 import com.gemstone.gemfire.management.ManagementTestBase;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This is for testing continuous query.
@@ -47,10 +51,6 @@ public class TestCQDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   public static long getNumOfCQ() {
     
     final WaitCriterion waitCriteria = new WaitCriterion() {
@@ -71,7 +71,7 @@ public class TestCQDUnitTest extends ManagementTestBase {
         return "wait for getNumOfCQ to complete and get results";
       }
     };
-    waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);    
+    Wait.waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);    
     final DistributedSystemMXBean bean = getManagementService().getDistributedSystemMXBean();
     assertNotNull(bean);
     return bean.getActiveCQCount();
@@ -79,12 +79,12 @@ public class TestCQDUnitTest extends ManagementTestBase {
 
   public void testNumOfCQ() throws Exception {
     initManagement(false);
-    getLogWriter().info("started testNumOfCQ");
+    LogWriterUtils.getLogWriter().info("started testNumOfCQ");
 
     VM server = managedNodeList.get(1);
     VM client = managedNodeList.get(2);    
     
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     int serverPort = AvailablePortHelper.getRandomAvailableTCPPort();
     cqDUnitTest.createServer(server, serverPort);
@@ -128,7 +128,7 @@ public class TestCQDUnitTest extends ManagementTestBase {
     long numOfCQ = ((Number) managingNode.invoke(TestCQDUnitTest.class,
         "getNumOfCQ")).intValue();
 
-    getLogWriter().info("testNumOfCQ numOfCQ= " + numOfCQ);
+    LogWriterUtils.getLogWriter().info("testNumOfCQ numOfCQ= " + numOfCQ);
 
     cqDUnitTest.closeClient(client);
     cqDUnitTest.closeServer(server);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestClientsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestClientsDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestClientsDUnitTest.java
index 814e07e..1659731 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestClientsDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestClientsDUnitTest.java
@@ -21,7 +21,11 @@ import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.management.DistributedSystemMXBean;
 import com.gemstone.gemfire.management.ManagementService;
 import com.gemstone.gemfire.management.ManagementTestBase;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This is for testing Number of clients and can be extended for relevant test
@@ -44,10 +48,6 @@ public class TestClientsDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   public static Integer getNumOfClients() {
     final WaitCriterion waitCriteria = new WaitCriterion() {
       @Override
@@ -69,7 +69,7 @@ public class TestClientsDUnitTest extends ManagementTestBase {
       }
     };
 
-    waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);
+    Wait.waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);
     final DistributedSystemMXBean bean = getManagementService()
         .getDistributedSystemMXBean();
     assertNotNull(bean);
@@ -87,11 +87,11 @@ public class TestClientsDUnitTest extends ManagementTestBase {
     cqDUnitTest.createServer(server, serverPort);
     final int port = server.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     cqDUnitTest.createClient(client, port, host0);
     Integer numOfClients = (Integer) managingNode.invoke(
         TestClientsDUnitTest.class, "getNumOfClients");
-    getLogWriter().info("testNumOfClients numOfClients = " + numOfClients);
+    LogWriterUtils.getLogWriter().info("testNumOfClients numOfClients = " + numOfClients);
     cqDUnitTest.closeClient(client);
     cqDUnitTest.closeServer(server);
     assertEquals(1, numOfClients.intValue());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestServerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestServerDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestServerDUnitTest.java
index 6475fa5..8fa29a7 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestServerDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/management/internal/pulse/TestServerDUnitTest.java
@@ -21,7 +21,10 @@ import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.management.DistributedSystemMXBean;
 import com.gemstone.gemfire.management.ManagementService;
 import com.gemstone.gemfire.management.ManagementTestBase;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This is for testing server count details from MBean
@@ -42,10 +45,6 @@ public class TestServerDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   public static int getNumOfServersFromMBean() {
    
    final WaitCriterion waitCriteria = new WaitCriterion() {
@@ -66,7 +65,7 @@ public class TestServerDUnitTest extends ManagementTestBase {
       }
     };
 
-    waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);    
+    Wait.waitForCriterion(waitCriteria, 2 * 60 * 1000, 3000, true);    
     final DistributedSystemMXBean bean = getManagementService().getDistributedSystemMXBean();
     assertNotNull(bean);
     return bean.listCacheServers().length;
@@ -83,7 +82,7 @@ public class TestServerDUnitTest extends ManagementTestBase {
     cqDUnitTest.createServer(server, serverPort);    
     int serverCount = ((Number) managingNode.invoke(TestServerDUnitTest.class,
         "getNumOfServersFromMBean")).intValue();
-    getLogWriter().info("TestServerDUnitTest serverCount =" + serverCount);
+    LogWriterUtils.getLogWriter().info("TestServerDUnitTest serverCount =" + serverCount);
     cqDUnitTest.closeServer(server);
     assertEquals(1, serverCount);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationTwoDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationTwoDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationTwoDUnitTest.java
index 7725ef6..86e38f3 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationTwoDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationTwoDUnitTest.java
@@ -18,6 +18,7 @@ package com.gemstone.gemfire.security;
 
 import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 
 /**
  * Tests for authorization from client to server. This tests for authorization
@@ -64,11 +65,11 @@ public class ClientAuthorizationTwoDUnitTest extends
   // Region: Tests
 
   public void testAllOpsWithFailover2() {
-    addExpectedException("Read timed out");
-    addExpectedException("Connection reset");
-    addExpectedException("SocketTimeoutException");
-    addExpectedException("ServerConnectivityException");
-    addExpectedException("Socket Closed");
+    IgnoredException.addIgnoredException("Read timed out");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("SocketTimeoutException");
+    IgnoredException.addIgnoredException("ServerConnectivityException");
+    IgnoredException.addIgnoredException("Socket Closed");
 
     OperationWithAction[] allOps = {
         // Register interest in all keys using list
@@ -230,9 +231,7 @@ public class ClientAuthorizationTwoDUnitTest extends
   // End Region: Tests
 
   @Override
-  public void tearDown2() throws Exception {
-
-    super.tearDown2();
+  protected final void preTearDown() throws Exception {
     // close the clients first
     client1.invoke(SecurityTestUtil.class, "closeCache");
     client2.invoke(SecurityTestUtil.class, "closeCache");
@@ -241,5 +240,4 @@ public class ClientAuthorizationTwoDUnitTest extends
     server1.invoke(SecurityTestUtil.class, "closeCache");
     server2.invoke(SecurityTestUtil.class, "closeCache");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientAuthzObjectModDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientAuthzObjectModDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientAuthzObjectModDUnitTest.java
index 58f26bb..e26db30 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientAuthzObjectModDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientAuthzObjectModDUnitTest.java
@@ -34,6 +34,7 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.security.ObjectWithAuthz;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -139,8 +140,8 @@ public class ClientAuthzObjectModDUnitTest extends ClientAuthorizationTestBase {
     server2.invoke(registerInstantiator);
   }
   
-  public void tearDown2() throws Exception  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     DistributedTestCase.cleanupAllVms();
   }
 
@@ -338,15 +339,15 @@ public class ClientAuthzObjectModDUnitTest extends ClientAuthorizationTestBase {
     String authInit = gen.getAuthInit();
     String authenticator = gen.getAuthenticator();
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testPutsGetsObjectModWithFailover: Using authinit: " + authInit);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testPutsGetsObjectModWithFailover: Using authenticator: "
             + authenticator);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testPutsGetsObjectModWithFailover: Using pre-operation accessor: "
             + preAccessor);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testPutsGetsObjectModWithFailover: Using post-operation accessor: "
             + postAccessor);
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientCQPostAuthorizationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientCQPostAuthorizationDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientCQPostAuthorizationDUnitTest.java
index 2854c24..e17acc0 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientCQPostAuthorizationDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientCQPostAuthorizationDUnitTest.java
@@ -42,9 +42,12 @@ import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.logging.InternalLogWriter;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 /**
  * This is for multiuser-authentication
  * 
@@ -71,7 +74,7 @@ public class ClientCQPostAuthorizationDUnitTest extends
 
     super.setUp();
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -92,8 +95,8 @@ public class ClientCQPostAuthorizationDUnitTest extends
     SecurityTestUtil.registerExpectedExceptions(clientExpectedExceptions);
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     client1.invoke(SecurityTestUtil.class, "closeCache");
     client2.invoke(SecurityTestUtil.class, "closeCache");
     server1.invoke(SecurityTestUtil.class, "closeCache");
@@ -301,7 +304,7 @@ public class ClientCQPostAuthorizationDUnitTest extends
           + SecurityTestUtil.proxyCaches[i].getRegion(regionName).getFullPath();
       // Create CQ Attributes.
       CqAttributesFactory cqf = new CqAttributesFactory();
-      CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+      CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
       ((CqQueryTestListener)cqListeners[0]).cqName = cqName;
 
       cqf.initCqListeners(cqListeners);
@@ -315,7 +318,7 @@ public class ClientCQPostAuthorizationDUnitTest extends
         AssertionError err = new AssertionError("Failed to create CQ " + cqName
             + " . ");
         err.initCause(ex);
-        getLogWriter().info("CqService is :" + cqService, err);
+        LogWriterUtils.getLogWriter().info("CqService is :" + cqService, err);
         throw err;
       }
     }
@@ -343,16 +346,16 @@ public class ClientCQPostAuthorizationDUnitTest extends
         try {
           cq1 = cqService.getCq(cqName);
           if (cq1 == null) {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "Failed to get CqQuery object for CQ name: " + cqName);
             fail("Failed to get CQ " + cqName);
           } else {
-            getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
+            LogWriterUtils.getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
             assertTrue("newCq() state mismatch", cq1.getState().isStopped());
           }
         } catch (Exception ex) {
-          getLogWriter().info("CqService is :" + cqService);
-          getLogWriter().error(ex);
+          LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
+          LogWriterUtils.getLogWriter().error(ex);
           AssertionError err = new AssertionError("Failed to execute CQ "
               + cqName);
           err.initCause(ex);
@@ -366,9 +369,9 @@ public class ClientCQPostAuthorizationDUnitTest extends
             cqResults = cq1.executeWithInitialResults();
           } catch (CqException ce) {
             if (ce.getCause() instanceof NotAuthorizedException && !postAuthzAllowed[i]) {
-              getLogWriter().info("Got expected exception for CQ " + cqName);
+              LogWriterUtils.getLogWriter().info("Got expected exception for CQ " + cqName);
             } else {
-              getLogWriter().info("CqService is: " + cqService);
+              LogWriterUtils.getLogWriter().info("CqService is: " + cqService);
               ce.printStackTrace();
               AssertionError err = new AssertionError("Failed to execute CQ "
                   + cqName);
@@ -376,14 +379,14 @@ public class ClientCQPostAuthorizationDUnitTest extends
               throw err;
             }
           } catch (Exception ex) {
-            getLogWriter().info("CqService is: " + cqService);
+            LogWriterUtils.getLogWriter().info("CqService is: " + cqService);
             ex.printStackTrace();
             AssertionError err = new AssertionError("Failed to execute CQ "
                 + cqName);
             err.initCause(ex);
             throw err;
           }
-          getLogWriter().info("initial result size = " + cqResults.size());
+          LogWriterUtils.getLogWriter().info("initial result size = " + cqResults.size());
           assertTrue("executeWithInitialResults() state mismatch", cq1
               .getState().isRunning());
           if (expectedResultsSize >= 0) {
@@ -395,9 +398,9 @@ public class ClientCQPostAuthorizationDUnitTest extends
             cq1.execute();
           } catch (CqException ce) {
             if (ce.getCause() instanceof NotAuthorizedException && !postAuthzAllowed[i]) {
-              getLogWriter().info("Got expected exception for CQ " + cqName);
+              LogWriterUtils.getLogWriter().info("Got expected exception for CQ " + cqName);
             } else {
-              getLogWriter().info("CqService is: " + cqService);
+              LogWriterUtils.getLogWriter().info("CqService is: " + cqService);
               ce.printStackTrace();
               AssertionError err = new AssertionError("Failed to execute CQ "
                   + cqName);
@@ -409,7 +412,7 @@ public class ClientCQPostAuthorizationDUnitTest extends
                 + cqName);
             err.initCause(ex);
             if (expectedErr == null) {
-              getLogWriter().info("CqService is: " + cqService, err);
+              LogWriterUtils.getLogWriter().info("CqService is: " + cqService, err);
             }
             throw err;
           }
@@ -492,7 +495,7 @@ public class ClientCQPostAuthorizationDUnitTest extends
             + " CQs to be registered on this server.";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 100, false);
+    Wait.waitForCriterion(wc, 60 * 1000, 100, false);
   }
 
   public static void checkCQListeners(Integer numOfUsers,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientPostAuthorizationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientPostAuthorizationDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientPostAuthorizationDUnitTest.java
index bd22dfb..78a45b1 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientPostAuthorizationDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientPostAuthorizationDUnitTest.java
@@ -27,6 +27,7 @@ import security.CredentialGenerator;
 import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 
 /**
  * Tests for authorization from client to server. This tests for authorization
@@ -126,10 +127,10 @@ public class ClientPostAuthorizationDUnitTest extends
       String accessor = gen.getAuthorizationCallback();
       TestAuthzCredentialGenerator tgen = new TestAuthzCredentialGenerator(gen);
 
-      getLogWriter().info("testAllPostOps: Using authinit: " + authInit);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info("testAllPostOps: Using authinit: " + authInit);
+      LogWriterUtils.getLogWriter().info(
           "testAllPostOps: Using authenticator: " + authenticator);
-      getLogWriter().info("testAllPostOps: Using accessor: " + accessor);
+      LogWriterUtils.getLogWriter().info("testAllPostOps: Using accessor: " + accessor);
 
       // Start servers with all required properties
       Properties serverProps = buildProperties(authenticator, accessor, true,
@@ -315,7 +316,7 @@ public class ClientPostAuthorizationDUnitTest extends
         OperationWithAction.OPBLOCK_NO_FAILOVER };
 
       AuthzCredentialGenerator gen = getXmlAuthzGenerator();
-      getLogWriter().info("Executing opblocks with credential generator " + gen);
+      LogWriterUtils.getLogWriter().info("Executing opblocks with credential generator " + gen);
       CredentialGenerator cGen = gen.getCredentialGenerator();
       Properties extraAuthProps = cGen.getSystemProperties();
       Properties javaProps = cGen.getJavaProperties();
@@ -325,11 +326,11 @@ public class ClientPostAuthorizationDUnitTest extends
       String accessor = gen.getAuthorizationCallback();
       TestAuthzCredentialGenerator tgen = new TestAuthzCredentialGenerator(gen);
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testAllOpsNotifications: Using authinit: " + authInit);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testAllOpsNotifications: Using authenticator: " + authenticator);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testAllOpsNotifications: Using accessor: " + accessor);
 
       // Start servers with all required properties
@@ -383,9 +384,8 @@ public class ClientPostAuthorizationDUnitTest extends
 
   // End Region: Tests
 
-  public void tearDown2() throws Exception {
-
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     client1.invoke(SecurityTestUtil.class, "closeCache");
     client2.invoke(SecurityTestUtil.class, "closeCache");
@@ -394,5 +394,4 @@ public class ClientPostAuthorizationDUnitTest extends
     server1.invoke(SecurityTestUtil.class, "closeCache");
     server2.invoke(SecurityTestUtil.class, "closeCache");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/security/MultiuserAPIDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/MultiuserAPIDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/MultiuserAPIDUnitTest.java
index 26f2e31..e3931b0 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/MultiuserAPIDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/MultiuserAPIDUnitTest.java
@@ -36,7 +36,9 @@ import com.gemstone.gemfire.cache.query.Query;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.PoolManagerImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 import security.DummyCredentialGenerator;
@@ -136,11 +138,11 @@ public class MultiuserAPIDUnitTest extends ClientAuthorizationTestBase {
     String authenticator = gen.getAuthenticator();
     String authInit = gen.getAuthInit();
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testValidCredentials: Using scheme: " + gen.classCode());
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testValidCredentials: Using authenticator: " + authenticator);
-    getLogWriter().info("testValidCredentials: Using authinit: " + authInit);
+    LogWriterUtils.getLogWriter().info("testValidCredentials: Using authinit: " + authInit);
 
     // Start the servers
     Integer locPort1 = SecurityTestUtil.getLocatorPort();
@@ -156,12 +158,12 @@ public class MultiuserAPIDUnitTest extends ClientAuthorizationTestBase {
     // Start the clients with valid credentials
     Properties credentials1 = gen.getValidCredentials(1);
     Properties javaProps1 = gen.getJavaProperties();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testValidCredentials: For first client credentials: " + credentials1
             + " : " + javaProps1);
     Properties credentials2 = gen.getValidCredentials(2);
     Properties javaProps2 = gen.getJavaProperties();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testValidCredentials: For second client credentials: " + credentials2
             + " : " + javaProps2);
     client1.invoke(MultiuserAPIDUnitTest.class, "createCacheClient",
@@ -198,7 +200,7 @@ public class MultiuserAPIDUnitTest extends ClientAuthorizationTestBase {
         Log.getLogWriter().info(op + ": Got expected exception: " + uoe);
         success = true;
       } catch (Exception e) {
-        fail("Got unexpected exception while doing " + op, e);
+        Assert.fail("Got unexpected exception while doing " + op, e);
       }
       if (!success) {
         fail("Did not get exception while doing " + op);
@@ -369,7 +371,7 @@ public class MultiuserAPIDUnitTest extends ClientAuthorizationTestBase {
           Log.getLogWriter().info(op + ": Got expected exception: " + uoe);
           success = true;
         } catch (Exception e) {
-          fail("Got unexpected exception while doing " + op, e);
+          Assert.fail("Got unexpected exception while doing " + op, e);
         }
         if (!success) {
           fail("Did not get exception while doing " + op);
@@ -378,8 +380,8 @@ public class MultiuserAPIDUnitTest extends ClientAuthorizationTestBase {
     }
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     client1.invoke(SecurityTestUtil.class, "closeCache");
     client2.invoke(SecurityTestUtil.class, "closeCache");
@@ -387,5 +389,4 @@ public class MultiuserAPIDUnitTest extends ClientAuthorizationTestBase {
     server1.invoke(SecurityTestUtil.class, "closeCache");
     server2.invoke(SecurityTestUtil.class, "closeCache");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/security/MultiuserDurableCQAuthzDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/MultiuserDurableCQAuthzDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/MultiuserDurableCQAuthzDUnitTest.java
index 675f8c6..23f496d 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/MultiuserDurableCQAuthzDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/MultiuserDurableCQAuthzDUnitTest.java
@@ -39,6 +39,8 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -62,7 +64,7 @@ public class MultiuserDurableCQAuthzDUnitTest extends
   public void setUp() throws Exception {
     super.setUp();
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -318,7 +320,7 @@ public class MultiuserDurableCQAuthzDUnitTest extends
           + SecurityTestUtil.proxyCaches[i].getRegion(regionName).getFullPath();
       // Create CQ Attributes.
       CqAttributesFactory cqf = new CqAttributesFactory();
-      CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+      CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
       ((CqQueryTestListener)cqListeners[0]).cqName = cqName;
 
       cqf.initCqListeners(cqListeners);
@@ -332,7 +334,7 @@ public class MultiuserDurableCQAuthzDUnitTest extends
         AssertionError err = new AssertionError("Failed to create CQ " + cqName
             + " . ");
         err.initCause(ex);
-        getLogWriter().info("CqService is :" + cqService, err);
+        LogWriterUtils.getLogWriter().info("CqService is :" + cqService, err);
         throw err;
       }
     }
@@ -360,16 +362,16 @@ public class MultiuserDurableCQAuthzDUnitTest extends
         try {
           cq1 = cqService.getCq(cqName);
           if (cq1 == null) {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "Failed to get CqQuery object for CQ name: " + cqName);
             fail("Failed to get CQ " + cqName);
           } else {
-            getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
+            LogWriterUtils.getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
             assertTrue("newCq() state mismatch", cq1.getState().isStopped());
           }
         } catch (Exception ex) {
-          getLogWriter().info("CqService is :" + cqService);
-          getLogWriter().error(ex);
+          LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
+          LogWriterUtils.getLogWriter().error(ex);
           AssertionError err = new AssertionError("Failed to execute CQ "
               + cqName);
           err.initCause(ex);
@@ -382,14 +384,14 @@ public class MultiuserDurableCQAuthzDUnitTest extends
           try {
             cqResults = cq1.executeWithInitialResults();
           } catch (Exception ex) {
-            getLogWriter().info("CqService is: " + cqService);
+            LogWriterUtils.getLogWriter().info("CqService is: " + cqService);
             ex.printStackTrace();
             AssertionError err = new AssertionError("Failed to execute CQ "
                 + cqName);
             err.initCause(ex);
             throw err;
           }
-          getLogWriter().info("initial result size = " + cqResults.size());
+          LogWriterUtils.getLogWriter().info("initial result size = " + cqResults.size());
           assertTrue("executeWithInitialResults() state mismatch", cq1
               .getState().isRunning());
           if (expectedResultsSize >= 0) {
@@ -404,7 +406,7 @@ public class MultiuserDurableCQAuthzDUnitTest extends
                 + cqName);
             err.initCause(ex);
             if (expectedErr == null) {
-              getLogWriter().info("CqService is: " + cqService, err);
+              LogWriterUtils.getLogWriter().info("CqService is: " + cqService, err);
             }
             throw err;
           }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
index 70ecac5..2d1e2cd 100644
--- a/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
+++ b/gemfire-wan/src/test/java/com/gemstone/gemfire/internal/cache/UpdateVersionDUnitTest.java
@@ -54,10 +54,15 @@ import com.gemstone.gemfire.internal.cache.versions.VersionStamp;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
 import com.gemstone.gemfire.internal.cache.wan.InternalGatewaySenderFactory;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author Shobhit Agarwal
@@ -67,7 +72,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
 
   protected static final String regionName = "testRegion";
   protected static Cache cache;
-  private static Set<ExpectedException>expectedExceptions = new HashSet<ExpectedException>();
+  private static Set<IgnoredException>expectedExceptions = new HashSet<IgnoredException>();
 
   
   
@@ -75,10 +80,10 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
     super(name);
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
-    invokeInEveryVM(new SerializableRunnable() { public void run() {
+    Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() {
       closeCache();
      } });
   }
@@ -184,7 +189,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
               throw new RuntimeException("unexpected exception", e);
             }
             if (entry != null) {
-              getLogWriter().info("found entry " + entry);
+              LogWriterUtils.getLogWriter().info("found entry " + entry);
             }
             return (entry != null);
           }
@@ -193,7 +198,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
             return "Expected "+key+" to be received on remote WAN site";
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 30000, 500, true);
+        Wait.waitForCriterion(wc, 30000, 500, true);
 
         wc = new WaitCriterion() {
           public boolean done() {
@@ -206,7 +211,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
             return "waiting for timestamp to be updated";
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 30000, 500, true);
+        Wait.waitForCriterion(wc, 30000, 500, true);
 
         Entry entry = region.getEntry(key);
         assertTrue("entry class is wrong: " + entry, entry instanceof EntrySnapshot);
@@ -318,7 +323,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
             return "Expected key-1 to be received on remote WAN site";
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 30000, 500, true);
+        Wait.waitForCriterion(wc, 30000, 500, true);
 
         wc = new WaitCriterion() {
           public boolean done() {
@@ -331,7 +336,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
             return "waiting for timestamp to be updated";
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 30000, 500, true);
+        Wait.waitForCriterion(wc, 30000, 500, true);
 
         Entry entry = region.getEntry(key);
         assertTrue(entry instanceof NonTXEntry);
@@ -448,7 +453,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
               throw new RuntimeException("unexpected exception", e);
             }
             if (entry != null) {
-              getLogWriter().info("found entry " + entry);
+              LogWriterUtils.getLogWriter().info("found entry " + entry);
             }
             return (entry != null);
           }
@@ -457,7 +462,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
             return "Expected key-1 to be received on remote WAN site";
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 30000, 500, true);
+        Wait.waitForCriterion(wc, 30000, 500, true);
 
         wc = new WaitCriterion() {
           public boolean done() {
@@ -470,7 +475,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
             return "waiting for timestamp to be updated";
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 30000, 500, true);
+        Wait.waitForCriterion(wc, 30000, 500, true);
 
         Entry entry = region.getEntry(key);
         assertTrue(entry instanceof EntrySnapshot);
@@ -587,7 +592,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
               throw new RuntimeException("unexpected exception", e);
             }
             if (entry != null) {
-              getLogWriter().info("found entry " + entry);
+              LogWriterUtils.getLogWriter().info("found entry " + entry);
             }
             return (entry != null);
           }
@@ -596,7 +601,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
             return "Expected key-1 to be received on remote WAN site";
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 30000, 500, true);
+        Wait.waitForCriterion(wc, 30000, 500, true);
 
         wc = new WaitCriterion() {
           public boolean done() {
@@ -609,7 +614,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
             return "waiting for timestamp to be updated";
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 30000, 500, true);
+        Wait.waitForCriterion(wc, 30000, 500, true);
 
         Entry entry = region.getEntry(key);
         assertTrue(entry instanceof EntrySnapshot);
@@ -646,27 +651,27 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
    */
 
   private static void createCache(Integer locPort) {
-    UpdateVersionDUnitTest test = new UpdateVersionDUnitTest(testName);
+    UpdateVersionDUnitTest test = new UpdateVersionDUnitTest(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort + "]");
-    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
     props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
     props.setProperty(DistributionConfig.USE_CLUSTER_CONFIGURATION_NAME, "false");
     InternalDistributedSystem ds = test.getSystem(props);
     cache = CacheFactory.create(ds); 
-    ExpectedException ex = new ExpectedException("could not get remote locator information for remote site");
-    cache.getLogger().info(ex.getAddString());
+    IgnoredException ex = new IgnoredException("could not get remote locator information for remote site");
+    cache.getLogger().info(ex.getAddMessage());
     expectedExceptions.add(ex);
-    ex = new ExpectedException("Pool ln1 is not available");
-    cache.getLogger().info(ex.getAddString());
+    ex = new IgnoredException("Pool ln1 is not available");
+    cache.getLogger().info(ex.getAddMessage());
     expectedExceptions.add(ex);
   }
   
   private static void closeCache() {
     if (cache != null && !cache.isClosed()) {
-      for (ExpectedException expectedException: expectedExceptions) {
-        cache.getLogger().info(expectedException.getRemoveString());
+      for (IgnoredException expectedException: expectedExceptions) {
+        cache.getLogger().info(expectedException.getRemoveMessage());
       }
       expectedExceptions.clear();
       cache.getDistributedSystem().disconnect();
@@ -781,11 +786,11 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
         return "Expected sender isRunning state to be true but is false";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 300000, 500, true);
+    Wait.waitForCriterion(wc, 300000, 500, true);
   }
 
   public static Integer createFirstRemoteLocator(int dsId, int remoteLocPort) {
-    UpdateVersionDUnitTest test = new UpdateVersionDUnitTest(testName);
+    UpdateVersionDUnitTest test = new UpdateVersionDUnitTest(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
@@ -853,7 +858,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
   }
 
   public static int createReceiver(int locPort) {
-    UpdateVersionDUnitTest test = new UpdateVersionDUnitTest(testName);
+    UpdateVersionDUnitTest test = new UpdateVersionDUnitTest(getTestMethodName());
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, "localhost[" + locPort
@@ -945,7 +950,7 @@ public class UpdateVersionDUnitTest extends DistributedTestCase {
   }
 
   public static Integer createFirstLocatorWithDSId(int dsId) {
-    UpdateVersionDUnitTest test = new UpdateVersionDUnitTest(testName);
+    UpdateVersionDUnitTest test = new UpdateVersionDUnitTest(getTestMethodName());
     int port = AvailablePortHelper.getRandomAvailablePortForDUnitSite();
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");


[48/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
index 8384c18..fcc96dc 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingFunctionContextDUnitTest.java
@@ -55,7 +55,12 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.LocalDataSet;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.execute.PRClientServerTestBase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -130,12 +135,15 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(CacheTestCase.class, "disconnectFromDS");
-    super.tearDown2();
-    invokeInEveryVM(QueryObserverHolder.class, "reset");
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(CacheTestCase.class, "disconnectFromDS");
+  }
+  
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(QueryObserverHolder.class, "reset");
     cache = null;
-    invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+    Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
   }
 
   @Override
@@ -156,7 +164,7 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
    * Test on Replicated Region.
    */
   public void testQueriesWithFilterKeysOnReplicatedRegion() {
-    addExpectedException("IllegalArgumentException");
+    IgnoredException.addIgnoredException("IllegalArgumentException");
 
     Object[][] r = new Object[queriesForRR.length][2];
 
@@ -226,7 +234,7 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
 
   public void testInvalidQueries() {
     
-    addExpectedException("Syntax error");
+    IgnoredException.addIgnoredException("Syntax error");
     client.invoke(new CacheSerializableRunnable("Test query on client and server") {
       
       @Override
@@ -361,7 +369,7 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
    *
    */
   public void testQueriesWithFilterKeysOnPRWithBucketDestroy() {
-    addExpectedException("QueryInvocationTargetException");
+    IgnoredException.addIgnoredException("QueryInvocationTargetException");
     Object[][] r = new Object[queries.length][2];
     Set filter =  new HashSet();
 
@@ -433,11 +441,11 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
   *
   */
  public void testQueriesWithFilterKeysOnPRWithRebalancing() {
-   addExpectedException("QueryInvocationTargetException");
-   addExpectedException("java.net.SocketException");
-   addExpectedException("ServerConnectivityException");
-   addExpectedException("FunctionException");
-   addExpectedException("IOException");
+   IgnoredException.addIgnoredException("QueryInvocationTargetException");
+   IgnoredException.addIgnoredException("java.net.SocketException");
+   IgnoredException.addIgnoredException("ServerConnectivityException");
+   IgnoredException.addIgnoredException("FunctionException");
+   IgnoredException.addIgnoredException("IOException");
 
    // Close cache on server1
    server1.invoke(new CacheSerializableRunnable("Set QueryObserver in cache on server1") {
@@ -512,7 +520,7 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
 
  
  public void testNonColocatedRegionQueries() {
-   addExpectedException("UnsupportedOperationException");
+   IgnoredException.addIgnoredException("UnsupportedOperationException");
    client.invoke(new CacheSerializableRunnable("Test query on non-colocated regions on server") {
      @Override
      public void run2() throws CacheException {
@@ -527,7 +535,7 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
           fail("Function call did not fail for query with function context");
          } catch (FunctionException e) {
           if (!(e.getCause() instanceof UnsupportedOperationException)) {
-            fail("Should have received an UnsupportedOperationException but received", e);
+            Assert.fail("Should have received an UnsupportedOperationException but received", e);
           }
          }  
         }   
@@ -725,7 +733,7 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
 
     //Create client cache without regions
     client.invoke(QueryUsingFunctionContextDUnitTest.class, "createCacheClientWithoutRegion",
-        new Object[] { getServerHostName(server1.getHost()), port1, port2,
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()), port1, port2,
             port3 });
 
     //Create proxy regions on client.
@@ -1016,7 +1024,7 @@ public class QueryUsingFunctionContextDUnitTest extends CacheTestCase {
         Region region = cache.getRegion(regionName);
         for (int j = from; j < to; j++)
           region.put(new Integer(j), portfolio[j]);
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info(
                 "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: Inserted Portfolio data on Region "
                     + regionName);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingPoolDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingPoolDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingPoolDUnitTest.java
index 35f71fe..d1e42d3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingPoolDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryUsingPoolDUnitTest.java
@@ -49,9 +49,15 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientNotifier;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Tests remote (client/server) query execution.
@@ -94,20 +100,15 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
   public void setUp() throws Exception {
     super.setUp();
     disconnectAllFromDS();
-    addExpectedException("Connection reset");
-    addExpectedException("Socket input is shutdown");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("Socket input is shutdown");
   }
 
-  public void tearDown2() throws Exception {
-    try {
-      super.tearDown2();
-    }
-    finally {
-      disconnectAllFromDS();
-    }
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
+    disconnectAllFromDS();
   }
 
-
   public void createPool(VM vm, String poolName, String server, int port, boolean subscriptionEnabled) {
     createPool(vm, poolName, new String[]{server}, new int[]{port}, subscriptionEnabled);  
   }
@@ -126,7 +127,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         PoolFactory cpf = PoolManager.createFactory();
         cpf.setSubscriptionEnabled(subscriptionEnabled);
         for (int i=0; i < servers.length; i++){
-          getLogWriter().info("### Adding to Pool. ### Server : " + servers[i] + " Port : " + ports[i]);
+          LogWriterUtils.getLogWriter().info("### Adding to Pool. ### Server : " + servers[i] + " Port : " + ports[i]);
           cpf.addServer(servers[i], ports[i]);
         }
 
@@ -144,7 +145,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         if (count == compiledQueryCount){
           break;
         } else {
-          pause(1 * 100);
+          Wait.pause(1 * 100);
         }
       }
       assertEquals(compiledQueryCount, count);
@@ -169,16 +170,16 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         system = (InternalDistributedSystem) DistributedSystem.connect(config);
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
         createRegion(name, rootRegionName, factory.create());
-        pause(1000);
+        Wait.pause(1000);
         try {
           startBridgeServer(0, false);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     });
@@ -195,7 +196,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
 
 
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
     final String regionName = "/" + rootRegionName + "/" + name;
 
     // Create client pool.
@@ -213,7 +214,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
 
         queryString = "import com.gemstone.gemfire.admin.QueryUsingPoolDUnitTest.TestObject; select distinct * from " + regionName;
@@ -222,7 +223,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }          
 
         assertEquals(numberOfEntries, results.size());
@@ -233,7 +234,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(numberOfEntries, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -243,7 +244,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(0, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -253,7 +254,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(numberOfEntries/2, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -263,7 +264,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -273,7 +274,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -306,16 +307,16 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         system = (InternalDistributedSystem) DistributedSystem.connect(config);
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
         createRegion(name, factory.create());
-        pause(1000);
+        Wait.pause(1000);
         try {
           startBridgeServer(0, false);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     });
@@ -331,7 +332,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     });
 
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName = "/" + rootRegionName + "/" + name;
 
@@ -350,7 +351,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
 
         queryString = "import com.gemstone.gemfire.admin.QueryUsingPoolDUnitTest.TestObject; select distinct ticker, price from " + regionName;
@@ -358,7 +359,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(numberOfEntries, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -368,7 +369,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(numberOfEntries, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -378,7 +379,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(0, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -388,7 +389,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(numberOfEntries/2, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -398,7 +399,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -408,7 +409,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -438,16 +439,16 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         system = (InternalDistributedSystem) DistributedSystem.connect(config);
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
         createRegion(name, factory.create());
-        pause(1000);
+        Wait.pause(1000);
         try {
           startBridgeServer(0, false);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     });
@@ -571,7 +572,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
 
     // Create client region
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
     vm1.invoke(new CacheSerializableRunnable("Create region") {
       public void run2() throws CacheException {
         Properties config = new Properties();
@@ -600,9 +601,9 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           results = region.query(queryString);
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
-        getLogWriter().fine("size: " + results.size());
+        LogWriterUtils.getLogWriter().fine("size: " + results.size());
         //assertEquals(numberOfEntries, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
       }
@@ -634,16 +635,16 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         system = (InternalDistributedSystem) DistributedSystem.connect(config);
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
         createRegion(name, factory.create());
-        pause(1000);
+        Wait.pause(1000);
         try {
           startBridgeServer(0, false);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     });
@@ -660,7 +661,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
 
     // Create client region
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName = "/" + rootRegionName + "/" + name;
 
@@ -681,7 +682,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
 
         // value query
@@ -690,7 +691,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -702,7 +703,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -714,7 +715,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(numberOfEntries, results.size());
         // All order-by query results are stored in a ResultsCollectionWrapper
@@ -738,7 +739,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(numberOfEntries, results.size());
         // All order-by query results are stored in a ResultsCollectionWrapper
@@ -762,7 +763,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         Object result = results.iterator().next();
@@ -776,7 +777,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertEquals("key-1", results.asList().get(0));
@@ -841,7 +842,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
 
     // Create client region
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName = "/" + this.rootRegionName + "/" + this.regionName;
 
@@ -861,16 +862,16 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
 
         for (int i=0; i < queryString.length; i++){
           try {
-            getLogWriter().info("### Executing Query :" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query :" + queryString[i]);
             Query query = qService.newQuery(queryString[i]);
             results = (SelectResults)query.execute(params[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
           try {
             assertEquals(expectedResults[i], results.size());
@@ -894,16 +895,16 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
         for (int x=0; x < useMaintainedCompiledQueries; x++){
           for (int i=0; i < queryString.length; i++){
             try {
-              getLogWriter().info("### Executing Query :" + queryString[i]);
+              LogWriterUtils.getLogWriter().info("### Executing Query :" + queryString[i]);
               Query query = qService.newQuery(queryString[i]);
               results = (SelectResults)query.execute(params[i]);
             } catch (Exception e) {
-              fail("Failed executing " + queryString[i], e);
+              Assert.fail("Failed executing " + queryString[i], e);
             }
             try {
               assertEquals(expectedResults[i], results.size());
@@ -1000,7 +1001,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     final int port0 = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
     final int port1 = vm1.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
 
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName = "/" + this.rootRegionName + "/" + this.regionName;
 
@@ -1020,16 +1021,16 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
         for (int j=0; j < queryString.length; j++){
           for (int i=0; i < queryString.length; i++){
             try {
-              getLogWriter().info("### Executing Query :" + queryString[i]);
+              LogWriterUtils.getLogWriter().info("### Executing Query :" + queryString[i]);
               Query query = qService.newQuery(queryString[i]);
               results = (SelectResults)query.execute(params[i]);
             } catch (Exception e) {
-              fail("Failed executing " + queryString[i], e);
+              Assert.fail("Failed executing " + queryString[i], e);
             }
             try {
               assertEquals(expectedResults[i], results.size());
@@ -1151,7 +1152,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     });
 
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName = "/" + this.rootRegionName + "/" + this.regionName;
 
@@ -1242,7 +1243,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     });
 
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName = "/" + this.rootRegionName + "/" + this.regionName;
 
@@ -1281,7 +1282,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     vm2.invokeAsync(new CacheSerializableRunnable("Execute queries") {
       public void run2() throws CacheException {
         for (int i=0; i < 10; i++) {
-          pause(200);
+          Wait.pause(200);
           executeCompiledQueries(poolName, params);
         }
       }
@@ -1291,7 +1292,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     validateCompiledQuery(vm0, queryString.length);
 
     // Let the compiled queries to be idle (not used).
-    pause(2 * 1000);    
+    Wait.pause(2 * 1000);    
     
     // Validate maintained compiled queries.
     this.validateCompiledQuery(vm0, 0);
@@ -1362,7 +1363,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     });
 
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName = "/" + this.rootRegionName + "/" + this.regionName;
 
@@ -1403,7 +1404,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     vm2.invokeAsync(new CacheSerializableRunnable("Execute queries") {
       public void run2() throws CacheException {
         for (int i=0; i < 10; i++) {
-          pause(10);
+          Wait.pause(10);
           executeCompiledQueries(poolName, params);
         }
       }
@@ -1508,7 +1509,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     // Create client region
     final int port0 = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
 
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName = "/" + this.rootRegionName + "/" + this.regionName;
 
@@ -1527,25 +1528,25 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }      
         // Use minimal query, so that there will be multiple 
         // clients using the same compiled query at server.
         for (int j=0; j < 5; j++){
           for (int i=0; i < 2; i++){
             try {
-              getLogWriter().info("### Executing Query :" + queryString[i]);
+              LogWriterUtils.getLogWriter().info("### Executing Query :" + queryString[i]);
               Query query = qService.newQuery(queryString[i]);
               rs[0][0] = (SelectResults)query.execute(params[i]);
               Query query2 = qService.newQuery(querys[i]);
               rs[0][1] = (SelectResults)query2.execute();
               // Compare results.
             } catch (Exception e) {
-              fail("Failed executing " + queryString[i], e);
+              Assert.fail("Failed executing " + queryString[i], e);
             }
-            getLogWriter().info("### Comparing results for Query :" + ((i+1) * (j+1)) + " : " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Comparing results for Query :" + ((i+1) * (j+1)) + " : " + queryString[i]);
             compareQueryResultsWithoutAndWithIndexes(rs, 1);
-            getLogWriter().info("### Done Comparing results for Query :" + ((i+1) * (j+1)) + " : " + queryString[i]); 
+            LogWriterUtils.getLogWriter().info("### Done Comparing results for Query :" + ((i+1) * (j+1)) + " : " + queryString[i]); 
           }
         }
       }
@@ -1560,9 +1561,9 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
       public void run2() throws CacheException {
         long compiledQueryUsedCount = -1;
         while (true) {
-          getLogWriter().info("### CompiledQueryUsedCount :" + compiledQueryUsedCount);
+          LogWriterUtils.getLogWriter().info("### CompiledQueryUsedCount :" + compiledQueryUsedCount);
           if (compiledQueryUsedCount == CacheClientNotifier.getInstance().getStats().getCompiledQueryUsedCount()) {
-            getLogWriter().info("### previous and current CompiledQueryUsedCounts are same :" + compiledQueryUsedCount);
+            LogWriterUtils.getLogWriter().info("### previous and current CompiledQueryUsedCounts are same :" + compiledQueryUsedCount);
             break;
           } else {
             compiledQueryUsedCount = CacheClientNotifier.getInstance().getStats().getCompiledQueryUsedCount();
@@ -1589,12 +1590,12 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
         for (int j=0; j < queryString.length; j++){
           for (int i=0; i < queryString.length; i++){
             try {
-              getLogWriter().info("### Executing Query :" + queryString[i]);
+              LogWriterUtils.getLogWriter().info("### Executing Query :" + queryString[i]);
               Query query = qService.newQuery(queryString[i]);
               rs[0][0] = (SelectResults)query.execute(params[i]);
               Query query2 = qService.newQuery(querys[i]);
@@ -1602,7 +1603,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
               // Compare results.
               compareQueryResultsWithoutAndWithIndexes(rs, 1);
             } catch (Exception e) {
-              fail("Failed executing " + queryString[i], e);
+              Assert.fail("Failed executing " + queryString[i], e);
             }
           }
         }
@@ -1618,9 +1619,9 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
       public void run2() throws CacheException {
         long compiledQueryUsedCount = -1;
         while (true) {
-          getLogWriter().info("### previous CompiledQueryUsedCount :" + compiledQueryUsedCount);
+          LogWriterUtils.getLogWriter().info("### previous CompiledQueryUsedCount :" + compiledQueryUsedCount);
           if (compiledQueryUsedCount == CacheClientNotifier.getInstance().getStats().getCompiledQueryUsedCount()) {
-            getLogWriter().info("### previous and current CompiledQueryUsedCounts are same :" + compiledQueryUsedCount);
+            LogWriterUtils.getLogWriter().info("### previous and current CompiledQueryUsedCounts are same :" + compiledQueryUsedCount);
             break;
           } else {
             compiledQueryUsedCount = CacheClientNotifier.getInstance().getStats().getCompiledQueryUsedCount();
@@ -1664,17 +1665,17 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         system = (InternalDistributedSystem) DistributedSystem.connect(config);
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
         createRegion(name+"1", factory.create());
         createRegion(name+"2", factory.create());
-        pause(1000);
+        Wait.pause(1000);
         try {
           startBridgeServer(0, false);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     });
@@ -1695,7 +1696,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
 
     // Create client region
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
     final String regionName1 = "/" + rootRegionName + "/" + name+"1";
     final String regionName2 = "/" + rootRegionName + "/" + name+"2";
 
@@ -1713,7 +1714,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
 
         queryString =
@@ -1722,7 +1723,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(numberOfEntries, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -1733,7 +1734,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -1767,16 +1768,16 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         system = (InternalDistributedSystem) DistributedSystem.connect(config);
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
         createRegion(name, factory.create());
-        pause(1000);
+        Wait.pause(1000);
         try {
           startBridgeServer(0, false);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     });
@@ -1792,7 +1793,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     });
 
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName = "/" + rootRegionName + "/" + name;
 
@@ -1813,7 +1814,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName1)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
 
         queryString = "SELECT DISTINCT itr.value FROM " + regionName + ".entries itr where itr.key = 'key-1'";
@@ -1821,7 +1822,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && !results.getCollectionType().getElementType().isStructType());
@@ -1832,7 +1833,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && !results.getCollectionType().getElementType().isStructType());
@@ -1850,7 +1851,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName2)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
 
         queryString = "SELECT DISTINCT itr.value FROM " + regionName + ".entries itr where itr.key = 'key-1'";
@@ -1858,7 +1859,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && !results.getCollectionType().getElementType().isStructType());
@@ -1869,7 +1870,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           Query query = qService.newQuery(queryString);
           results = (SelectResults)query.execute();
         } catch (Exception e) {
-          fail("Failed executing " + queryString, e);
+          Assert.fail("Failed executing " + queryString, e);
         }
         assertEquals(1, results.size());
         assertTrue(!results.getCollectionType().allowsDuplicates() && !results.getCollectionType().getElementType().isStructType());
@@ -1905,14 +1906,14 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         system = (InternalDistributedSystem) DistributedSystem.connect(config);
 
-        pause(1000);
+        Wait.pause(1000);
         try {
           startBridgeServer(0, false);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     });
@@ -1930,7 +1931,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           }
 
         });
-        pause(1000);
+        Wait.pause(1000);
         for (int i=0; i<numberOfEntries; i++) {
           region1.put("key-"+i, new TestObject(i, "ibm"));
           region2.put("key-"+i, new TestObject(i, "ibm"));
@@ -1940,7 +1941,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     });
 
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName1 = "/" + rootRegionName + "/" + name;
     final String regionName2 = "/" + rootRegionName + "/" + name + "_2";
@@ -1959,7 +1960,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }
 
         try {
@@ -2001,16 +2002,16 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         system = (InternalDistributedSystem) DistributedSystem.connect(config);
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
         createRegion(name, factory.create());
-        pause(1000);
+        Wait.pause(1000);
         try {
           startBridgeServer(0, false);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     });
@@ -2027,7 +2028,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
           QueryService qService = region.getCache().getQueryService();
           qService.createIndex("idIndex",IndexType.FUNCTIONAL, "id", region.getFullPath());
         } catch (Exception e) {
-          fail("Failed to create index.", e);
+          Assert.fail("Failed to create index.", e);
         }          
 
       }
@@ -2035,7 +2036,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
 
     // Create client region
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName = "/" + rootRegionName + "/" + name;
 
@@ -2058,7 +2059,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }          
 
         // order by value query
@@ -2073,7 +2074,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
             Query query = qService.newQuery(queryString);
             results = (SelectResults)query.execute();
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries, results.size());
           // All order-by query results are stored in a ResultsCollectionWrapper
@@ -2114,7 +2115,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
             Query query = qService.newQuery(queryString);
             results = (SelectResults)query.execute();
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries, results.size());
           // All order-by query results are stored in a ResultsCollectionWrapper
@@ -2159,14 +2160,14 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
       public void run2() throws CacheException {
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         system = (InternalDistributedSystem) DistributedSystem.connect(config);
 
-        pause(1000);
+        Wait.pause(1000);
         try {
           startBridgeServer(0, false);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     });
@@ -2186,7 +2187,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     });
 
     final int port = vm0.invokeInt(QueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     final String regionName1 = "/" + rootRegionName + "/" + name;
 //    final String regionName2 = "/" + rootRegionName + "/" + name + "_2";
@@ -2210,7 +2211,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }
 
         // Testing Remote Query with params.
@@ -2220,7 +2221,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         } catch(UnsupportedOperationException e) {
           // Expected behavior.
         } catch (Exception e){
-          fail("Failed with UnExpected Exception.", e);
+          Assert.fail("Failed with UnExpected Exception.", e);
         }
 
         // Test with Index.
@@ -2229,7 +2230,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         } catch(UnsupportedOperationException e) {
           // Expected behavior.
         } catch (Exception e){
-          fail("Failed with UnExpected Exception.", e);
+          Assert.fail("Failed with UnExpected Exception.", e);
         }
 
         try {
@@ -2238,7 +2239,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
         } catch(UnsupportedOperationException e) {
           // Expected behavior.
         } catch (Exception e){
-          fail("Failed with UnExpected Exception.", e);
+          Assert.fail("Failed with UnExpected Exception.", e);
         }
 
         try {
@@ -2304,16 +2305,16 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
       type1 = ((SelectResults)r[j][0]).getCollectionType().getElementType();
       type2 = ((SelectResults)r[j][1]).getCollectionType().getElementType();
       if ((type1.getClass().getName()).equals(type2.getClass().getName())) {
-        getLogWriter().info("Both SelectResults are of the same Type i.e.--> "
+        LogWriterUtils.getLogWriter().info("Both SelectResults are of the same Type i.e.--> "
             + ((SelectResults)r[j][0]).getCollectionType().getElementType());
       }
       else {
-        getLogWriter().info("Classes are : " + type1.getClass().getName() + " "
+        LogWriterUtils.getLogWriter().info("Classes are : " + type1.getClass().getName() + " "
             + type2.getClass().getName());
         fail("FAILED:Select result Type is different in both the cases");
       }
       if (((SelectResults)r[j][0]).size() == ((SelectResults)r[j][1]).size()) {
-        getLogWriter().info("Both SelectResults are of Same Size i.e.  Size= "
+        LogWriterUtils.getLogWriter().info("Both SelectResults are of Same Size i.e.  Size= "
             + ((SelectResults)r[j][1]).size());
       }
       else {
@@ -2324,7 +2325,7 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
       set2 = (((SelectResults)r[j][1]).asSet());
       set1 = (((SelectResults)r[j][0]).asSet());
       
-      getLogWriter().info(" SIZE1 = " + set1.size() + " SIZE2 = " + set2.size());
+      LogWriterUtils.getLogWriter().info(" SIZE1 = " + set1.size() + " SIZE2 = " + set2.size());
       
 //      boolean pass = true;
       itert1 = set1.iterator();
@@ -2334,10 +2335,10 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
 
         boolean exactMatch = false;
         while (itert2.hasNext()) {
-          getLogWriter().info("### Comparing results..");
+          LogWriterUtils.getLogWriter().info("### Comparing results..");
           Object p2 = itert2.next();
           if (p1 instanceof Struct) {
-            getLogWriter().info("ITS a Set");
+            LogWriterUtils.getLogWriter().info("ITS a Set");
             Object[] values1 = ((Struct)p1).getFieldValues();
             Object[] values2 = ((Struct)p2).getFieldValues();
             assertEquals(values1.length, values2.length);
@@ -2350,41 +2351,41 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
             exactMatch = elementEqual;
           }
           else {
-            getLogWriter().info("Not a Set p2:" + p2 + " p1: " + p1);
+            LogWriterUtils.getLogWriter().info("Not a Set p2:" + p2 + " p1: " + p1);
             if (p2 instanceof TestObject) {
-              getLogWriter().info("An instance of TestObject");
+              LogWriterUtils.getLogWriter().info("An instance of TestObject");
               exactMatch = p2.equals(p1);
             } else {
-              getLogWriter().info("Not an instance of TestObject" + p2.getClass().getCanonicalName());
+              LogWriterUtils.getLogWriter().info("Not an instance of TestObject" + p2.getClass().getCanonicalName());
               exactMatch = p2.equals(p1);
             }
           }
           if (exactMatch) {
-            getLogWriter().info("Exact MATCH");
+            LogWriterUtils.getLogWriter().info("Exact MATCH");
             break;
           }
         }
         if (!exactMatch) {
-          getLogWriter().info("NOT A MATCH");
+          LogWriterUtils.getLogWriter().info("NOT A MATCH");
           fail("Atleast one element in the pair of SelectResults supposedly identical, is not equal ");
         }
       }
-      getLogWriter().info("### Done Comparing results..");
+      LogWriterUtils.getLogWriter().info("### Done Comparing results..");
     }
   }
 
   protected void configAndStartBridgeServer() {
     Properties config = new Properties();
-    config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+    config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     system = (InternalDistributedSystem) DistributedSystem.connect(config);
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
     createRegion(this.regionName, this.rootRegionName, factory.create());
-    pause(1000);
+    Wait.pause(1000);
     try {
       startBridgeServer(0, false);
     } catch (Exception ex) {
-      fail("While starting CacheServer", ex);
+      Assert.fail("While starting CacheServer", ex);
     }
   }
 
@@ -2397,16 +2398,16 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     try {
       qService = (PoolManager.find(poolName)).getQueryService();
     } catch (Exception e) {
-      fail("Failed to get QueryService.", e);
+      Assert.fail("Failed to get QueryService.", e);
     }          
 
     for (int i=0; i < queryString.length; i++){
       try {
-        getLogWriter().info("### Executing Query :" + queryString[i]);
+        LogWriterUtils.getLogWriter().info("### Executing Query :" + queryString[i]);
         Query query = qService.newQuery(queryString[i]);
         results = (SelectResults)query.execute(params[i]);
       } catch (Exception e) {
-        fail("Failed executing " + queryString[i], e);
+        Assert.fail("Failed executing " + queryString[i], e);
       }
     }        
   }
@@ -2441,17 +2442,17 @@ public class QueryUsingPoolDUnitTest extends CacheTestCase {
     SerializableRunnable closeCache =
       new CacheSerializableRunnable("Close Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close Client. ###");
+        LogWriterUtils.getLogWriter().info("### Close Client. ###");
         try {
           closeCache();
         } catch (Exception ex) {
-          getLogWriter().info("### Failed to get close client. ###");
+          LogWriterUtils.getLogWriter().info("### Failed to get close client. ###");
         }
       }
     };
     
     client.invoke(closeCache);
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
   }
   
   private static int getCacheServerPort() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/RemoteQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/RemoteQueryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/RemoteQueryDUnitTest.java
index dfbe69e..fc79893 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/RemoteQueryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/RemoteQueryDUnitTest.java
@@ -42,9 +42,14 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.cache30.ClientServerTestCase;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 import cacheRunner.Portfolio;
 import cacheRunner.Position;
@@ -74,13 +79,8 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    try {
-      super.tearDown2();
-    }
-    finally {
-      disconnectAllFromDS();
-    }
+  protected final void postTearDownCacheTestCase() throws Exception {
+    disconnectAllFromDS();
   }
 
   /**
@@ -98,16 +98,16 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
         public void run2() throws CacheException {
           Properties config = new Properties();
-          config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+          config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
           system = (InternalDistributedSystem) DistributedSystem.connect(config);
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.LOCAL);
           createRegion(name, factory.create());
-          pause(1000);
+          Wait.pause(1000);
           try {
             startBridgeServer(0, false);
           } catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
         }
       });
@@ -124,7 +124,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
 
     // Create client region
     final int port = vm0.invokeInt(RemoteQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
     vm1.invoke(new CacheSerializableRunnable("Create region") {
         public void run2() throws CacheException {
           Properties config = new Properties();
@@ -149,7 +149,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries, results.size());
           assertTrue(results.getClass() == ResultsBag.class);
@@ -159,7 +159,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(0, results.size());
           assertTrue(results.getClass() == ResultsBag.class);
@@ -169,7 +169,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries/2, results.size());
           assertTrue(results.getClass() == ResultsBag.class);
@@ -179,7 +179,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(results.getClass() == ResultsBag.class);
@@ -189,7 +189,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(results.getClass() == ResultsBag.class);
@@ -241,16 +241,16 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
         public void run2() throws CacheException {
           Properties config = new Properties();
-          config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+          config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
           system = (InternalDistributedSystem) DistributedSystem.connect(config);
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.LOCAL);
           createRegion(name, factory.create());
-          pause(1000);
+          Wait.pause(1000);
           try {
             startBridgeServer(0, false);
           } catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
         }
       });
@@ -267,7 +267,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
 
     // Create client region
     final int port = vm0.invokeInt(RemoteQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
     vm1.invoke(new CacheSerializableRunnable("Create region") {
         public void run2() throws CacheException {
           Properties config = new Properties();
@@ -293,7 +293,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -302,7 +302,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -311,7 +311,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(0, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -320,7 +320,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries/2, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -329,7 +329,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -338,7 +338,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -369,16 +369,16 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
         public void run2() throws CacheException {
           Properties config = new Properties();
-          config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+          config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
           system = (InternalDistributedSystem) DistributedSystem.connect(config);
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.LOCAL);
           createRegion(name, factory.create());
-          pause(1000);
+          Wait.pause(1000);
           try {
             startBridgeServer(0, false);
           } catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
         }
       });
@@ -395,7 +395,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
 
     // Create client region
     final int port = vm0.invokeInt(RemoteQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
     vm1.invoke(new CacheSerializableRunnable("Create region") {
         public void run2() throws CacheException {
           Properties config = new Properties();
@@ -420,7 +420,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -429,7 +429,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -438,7 +438,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(0, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -447,7 +447,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries/2, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -456,7 +456,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -465,7 +465,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -495,16 +495,16 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
         public void run2() throws CacheException {
           Properties config = new Properties();
-          config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+          config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
           system = (InternalDistributedSystem) DistributedSystem.connect(config);
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.LOCAL);
           createRegion(name, factory.create());
-          pause(1000);
+          Wait.pause(1000);
           try {
             startBridgeServer(0, false);
           } catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
         }
       });
@@ -628,7 +628,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
 
     // Create client region
     final int port = vm0.invokeInt(RemoteQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
     vm1.invoke(new CacheSerializableRunnable("Create region") {
         public void run2() throws CacheException {
           Properties config = new Properties();
@@ -657,9 +657,9 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
-          getLogWriter().fine("size: " + results.size());
+          LogWriterUtils.getLogWriter().fine("size: " + results.size());
           //assertEquals(numberOfEntries, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
         }
@@ -689,16 +689,16 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
         public void run2() throws CacheException {
           Properties config = new Properties();
-          config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+          config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
           system = (InternalDistributedSystem) DistributedSystem.connect(config);
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.LOCAL);
           createRegion(name, factory.create());
-          pause(1000);
+          Wait.pause(1000);
           try {
             startBridgeServer(0, false);
           } catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
         }
       });
@@ -715,7 +715,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
 
     // Create client region
     final int port = vm0.invokeInt(RemoteQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
     vm1.invoke(new CacheSerializableRunnable("Create region") {
         public void run2() throws CacheException {
           Properties config = new Properties();
@@ -743,7 +743,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -754,7 +754,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates());
@@ -765,7 +765,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
         }
           assertEquals(numberOfEntries, results.size());
           // All order-by query results are stored in a ResultsCollectionWrapper
@@ -788,7 +788,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries, results.size());
           // All order-by query results are stored in a ResultsCollectionWrapper
@@ -811,7 +811,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           Object result = results.iterator().next();
@@ -824,7 +824,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertEquals("key-1", results.asList().get(0));
@@ -854,17 +854,17 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
         public void run2() throws CacheException {
           Properties config = new Properties();
-          config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+          config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
           system = (InternalDistributedSystem) DistributedSystem.connect(config);
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.LOCAL);
           createRegion(name+"1", factory.create());
           createRegion(name+"2", factory.create());
-          pause(1000);
+          Wait.pause(1000);
           try {
             startBridgeServer(0, false);
           } catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
         }
       });
@@ -885,7 +885,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
 
     // Create client region
     final int port = vm0.invokeInt(RemoteQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
     vm1.invoke(new CacheSerializableRunnable("Create region") {
         public void run2() throws CacheException {
           Properties config = new Properties();
@@ -913,7 +913,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region1.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(numberOfEntries, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -923,7 +923,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region1.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && results.getCollectionType().getElementType().isStructType());
@@ -955,7 +955,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
         public void run2() throws CacheException {
           Properties config = new Properties();
-          config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+          config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
           system = (InternalDistributedSystem) DistributedSystem.connect(config);
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.LOCAL);
@@ -963,7 +963,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             startBridgeServer(0, false);
           } catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
         }
       });
@@ -979,7 +979,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
       });
 
     final int port = vm0.invokeInt(RemoteQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(vm0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
     // Create client region in VM1
     vm1.invoke(new CacheSerializableRunnable("Create region") {
@@ -1022,7 +1022,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && !results.getCollectionType().getElementType().isStructType());
@@ -1032,7 +1032,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && !results.getCollectionType().getElementType().isStructType());
@@ -1051,7 +1051,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && !results.getCollectionType().getElementType().isStructType());
@@ -1061,7 +1061,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
           try {
             results = region.query(queryString);
           } catch (Exception e) {
-            fail("Failed executing " + queryString, e);
+            Assert.fail("Failed executing " + queryString, e);
           }
           assertEquals(1, results.size());
           assertTrue(!results.getCollectionType().allowsDuplicates() && !results.getCollectionType().getElementType().isStructType());
@@ -1114,7 +1114,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
      vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
          public void run2() throws CacheException {
            Properties config = new Properties();
-           config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+           config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
            system = (InternalDistributedSystem) DistributedSystem.connect(config);
            AttributesFactory factory = new AttributesFactory();
            factory.setScope(Scope.LOCAL);
@@ -1122,7 +1122,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
            try {
              startBridgeServer(0, false);
            } catch (Exception ex) {
-             fail("While starting CacheServer", ex);
+             Assert.fail("While starting CacheServer", ex);
            }
          }
        });
@@ -1138,7 +1138,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
        });
 
      final int port = vm0.invokeInt(RemoteQueryDUnitTest.class, "getCacheServerPort");
-     final String host0 = getServerHostName(vm0.getHost());
+     final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
      // Create client region in VM1
      vm1.invoke(new CacheSerializableRunnable("Create region") {
@@ -1167,7 +1167,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
              try {
                results = region.query(queryStrings[i]);
              } catch (Exception e) {
-               fail("Failed executing " + queryStrings[i], e);
+               Assert.fail("Failed executing " + queryStrings[i], e);
              }
              assertEquals(9, results.size());
              String msg = "results expected to be instance of ResultsBag,"
@@ -1218,7 +1218,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
       vm0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
           public void run2() throws CacheException {
             Properties config = new Properties();
-            config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+            config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
             system = (InternalDistributedSystem) DistributedSystem.connect(config);
             AttributesFactory factory = new AttributesFactory();
             factory.setScope(Scope.LOCAL);
@@ -1233,7 +1233,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
             try {
               startBridgeServer(0, false);
             } catch (Exception ex) {
-              fail("While starting CacheServer", ex);
+              Assert.fail("While starting CacheServer", ex);
             }
           }
         });
@@ -1249,7 +1249,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
         });
 
       final int port = vm0.invokeInt(RemoteQueryDUnitTest.class, "getCacheServerPort");
-      final String host0 = getServerHostName(vm0.getHost());
+      final String host0 = NetworkUtils.getServerHostName(vm0.getHost());
 
       // Create client region in VM1
       vm1.invoke(new CacheSerializableRunnable("Create region") {
@@ -1279,7 +1279,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
               } catch (QueryInvocationTargetException qte) {
                 //Ok test passed
               } catch(Exception e) {
-                fail("Failed executing query " + queryStrings+ " due  to unexpected Excecption", e);
+                Assert.fail("Failed executing query " + queryStrings+ " due  to unexpected Excecption", e);
             }
           }
         });
@@ -1318,7 +1318,7 @@ public class RemoteQueryDUnitTest extends CacheTestCase {
               } catch (QueryInvocationTargetException qte) {
                 //Ok test passed
               } catch(Exception e) {
-                fail("Failed executing query " + queryString+ " due  to unexpected Excecption", e);
+                Assert.fail("Failed executing query " + queryString+ " due  to unexpected Excecption", e);
             }
           }
         });


[49/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxStringQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxStringQueryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxStringQueryDUnitTest.java
index 52a3763..42459c9 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxStringQueryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/PdxStringQueryDUnitTest.java
@@ -61,8 +61,13 @@ import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.persistence.query.CloseableIterator;
 import com.gemstone.gemfire.pdx.internal.PdxString;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -129,7 +134,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         try {
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         } 
         Index index = null;
         // create an index on statusIndexed is created
@@ -165,7 +170,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
     final int port1 = server1.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
     final int port2 = server2.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
 
-    final String host0 = getServerHostName(server0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
 
     // Create client pool.
     final String poolName = "testClientServerQueryPool"; 
@@ -178,7 +183,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         factory.setScope(Scope.LOCAL);
         ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null);
         Region region = createRegion(regionName, rootRegionName,  factory.create());
-        getLogWriter().info("Put PortfolioPdx");
+        LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
         for (int i=0; i<numberOfEntries; i++) {
           region.put("key-"+i, new PortfolioPdx(i));
          }
@@ -200,43 +205,43 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
           remoteQueryService = (PoolManager.find(poolName)).getQueryService();
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }
         
         for (int i=0; i < queryString.length; i++){
           try {
-            getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
             Query query = remoteQueryService.newQuery(queryString[i]);
             rs[0][0] = (SelectResults)query.execute();
             resWithoutIndexRemote[i] = rs[0][0];
-            getLogWriter().info("RR remote indexType: no index  size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("RR remote indexType: no index  size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][0].asList(), queryString[i]);
             
-            getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
             query = localQueryService.newQuery(queryString[i]);
             rs[0][1] = (SelectResults)query.execute();
             resWithoutIndexLocal[i] = rs[0][1];
-            getLogWriter().info("RR  client local indexType:no index size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("RR  client local indexType:no index size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][1].asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
           try{
             // to compare remote query results with and without index
-            getLogWriter().info("### Executing Query on remote server for region2:" + queryString2[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query on remote server for region2:" + queryString2[i]);
             Query query = remoteQueryService.newQuery(queryString2[i]);
             resWithIndexRemote[i] = (SelectResults)query.execute();
-            getLogWriter().info("RR  remote region2 size of resultset: "+ resWithIndexRemote[i].size() + " for query: " + queryString2[i]);;
+            LogWriterUtils.getLogWriter().info("RR  remote region2 size of resultset: "+ resWithIndexRemote[i].size() + " for query: " + queryString2[i]);;
             checkForPdxString(resWithIndexRemote[i].asList(), queryString2[i]);
 
            // to compare local query results with and without index
-            getLogWriter().info("### Executing Query on local for region2:" + queryString2[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query on local for region2:" + queryString2[i]);
             query = localQueryService.newQuery(queryString2[i]);
             resWithIndexLocal[i] = (SelectResults)query.execute();
-            getLogWriter().info("RR  local region2 size of resultset: "+ resWithIndexLocal[i].size() + " for query: " + queryString2[i]);;
+            LogWriterUtils.getLogWriter().info("RR  local region2 size of resultset: "+ resWithIndexLocal[i].size() + " for query: " + queryString2[i]);;
             checkForPdxString(resWithIndexLocal[i].asList(), queryString2[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString2[i], e);
+            Assert.fail("Failed executing " + queryString2[i], e);
           }
 
             if(i < orderByQueryIndex){
@@ -290,7 +295,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
-        getLogWriter().info("Put Objects locally on server");
+        LogWriterUtils.getLogWriter().info("Put Objects locally on server");
         for (int i=numberOfEntries; i<numberOfEntries*2; i++) {
           region.put("key-"+i, new Portfolio(i));
          }
@@ -299,22 +304,22 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server local indexType: no  size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server local indexType: no  size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
 
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
           try{
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString2[i]).execute();
-            getLogWriter().info("RR server local indexType: no size of resultset: " + rs.size() + " for query: " + queryString2[i]);
+            LogWriterUtils.getLogWriter().info("RR server local indexType: no size of resultset: " + rs.size() + " for query: " + queryString2[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString2[i]);
           }catch (Exception e) {
-            fail("Failed executing " + queryString2[i], e);
+            Assert.fail("Failed executing " + queryString2[i], e);
           }
         }
        }
@@ -330,13 +335,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("isPR: false server local readSerializedTrue: indexType: false size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("isPR: false server local readSerializedTrue: indexType: false size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -352,13 +357,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 remotely to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server remote readSerializedTrue: indexType: false size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType: false size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -387,7 +392,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         try {
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         } 
         // Verify the type of index created  
         Index index = null;
@@ -423,7 +428,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
     final int port1 = server1.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
     final int port2 = server2.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
 
-    final String host0 = getServerHostName(server0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
 
     // Create client pool.
     final String poolName = "testClientServerQueryPool"; 
@@ -436,7 +441,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         factory.setScope(Scope.LOCAL);
         ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null);
         Region region = createRegion(regionName, rootRegionName,  factory.create());
-        getLogWriter().info("Put PortfolioPdx");
+        LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
         for (int i=0; i<numberOfEntries; i++) {
           region.put("key-"+i, new PortfolioPdx(i));
          }
@@ -472,21 +477,21 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
           remoteQueryService = (PoolManager.find(poolName)).getQueryService();
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }
         
         for (int i=0; i < queryString.length; i++){
           try {
-            getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
             Query query = remoteQueryService.newQuery(queryString[i]);
             rs[0][0] = (SelectResults)query.execute();
-            getLogWriter().info("RR remote indexType: CompactRange size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("RR remote indexType: CompactRange size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][0].asList(), queryString[i]);
              
-            getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
             query = localQueryService.newQuery(queryString[i]);
             rs[0][1] = (SelectResults)query.execute();
-            getLogWriter().info("RR  client local indexType: CompactRange size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("RR  client local indexType: CompactRange size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][1].asList(), queryString[i]);
              
             if(i < orderByQueryIndex){
@@ -500,7 +505,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
               compareResultsOrder(rs, false);
             }
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
         
@@ -514,7 +519,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
-        getLogWriter().info("Put Objects locally on server");
+        LogWriterUtils.getLogWriter().info("Put Objects locally on server");
         for (int i=numberOfEntries; i<numberOfEntries*2; i++) {
           region.put("key-"+i, new Portfolio(i));
          }
@@ -523,13 +528,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server local indexType:Range  size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server local indexType:Range  size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
        }
@@ -544,13 +549,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server local readSerializedTrue: indexType: CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server local readSerializedTrue: indexType: CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -566,13 +571,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 remotely to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server remote readSerializedTrue: indexType:CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType:CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -600,7 +605,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         try {
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         } 
         // Verify the type of index created  
         Index index = null;
@@ -635,7 +640,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
     final int port0 = server0.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
     final int port1 = server1.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
     final int port2 = server2.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
 
     // Create client pool.
     final String poolName = "testClientServerQueryPool"; 
@@ -648,7 +653,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         factory.setScope(Scope.LOCAL);
         ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null);
         Region region = createRegion(regionName, rootRegionName,  factory.create());
-        getLogWriter().info("Put PortfolioPdx");
+        LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
         for (int i=0; i<numberOfEntries; i++) {
           region.put("key-"+i, new PortfolioPdx(i));
          }
@@ -680,21 +685,21 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
           remoteQueryService = (PoolManager.find(poolName)).getQueryService();
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }
         
         for (int i=0; i < queryString.length; i++){
           try {
-            getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
             Query query = remoteQueryService.newQuery(queryString[i]);
             rs[0][0] = (SelectResults)query.execute();
-            getLogWriter().info("RR remote indexType: Range size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("RR remote indexType: Range size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][0].asList(), queryString[i]);
            
-            getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
             query = localQueryService.newQuery(queryString[i]);
             rs[0][1] = (SelectResults)query.execute();
-            getLogWriter().info("RR  client local indexType: Range size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("RR  client local indexType: Range size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][1].asList(), queryString[i]);
             
             if(i < orderByQueryIndex){
@@ -708,7 +713,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
               compareResultsOrder(rs, false);
             }
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
         }
@@ -721,7 +726,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
-        getLogWriter().info("Put Objects locally on server");
+        LogWriterUtils.getLogWriter().info("Put Objects locally on server");
         for (int i=numberOfEntries; i<numberOfEntries*2; i++) {
           region.put("key-"+i, new Portfolio(i));
          }
@@ -730,13 +735,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server local indexType:Range  size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server local indexType:Range  size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
        }
@@ -750,13 +755,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
        // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR  server local readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR  server local readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -770,13 +775,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 remotely to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server remote readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -805,7 +810,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         try {
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         } 
        
         Index index = null;
@@ -849,7 +854,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
     final int port1 = server1.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
     final int port2 = server2.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
 
-    final String host0 = getServerHostName(server0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
 
     // Create client pool.
     final String poolName = "testClientServerQueryPool"; 
@@ -862,7 +867,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         factory.setScope(Scope.LOCAL);
         ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null);
         Region region = createRegion(regionName, rootRegionName,  factory.create());
-        getLogWriter().info("Put PortfolioPdx");
+        LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
         for (int i=0; i<numberOfEntries; i++) {
           region.put("key-"+i, new PortfolioPdx(i));
          }
@@ -884,50 +889,50 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
           remoteQueryService = (PoolManager.find(poolName)).getQueryService();
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }
         
         for (int i=0; i < queryString.length; i++){
           try {
-            getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
             Query query = remoteQueryService.newQuery(queryString[i]);
             rs[0][0] = (SelectResults)query.execute();
             resWithoutIndexRemote[i] = rs[0][0];
-            getLogWriter().info("RR remote no index size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("RR remote no index size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][0].asList(), queryString[i]);
 
-            getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
             query = localQueryService.newQuery(queryString[i]);
             rs[0][1] = (SelectResults)query.execute();
             resWithoutIndexLocal[i] = rs[0][1];
-            getLogWriter().info("isPR: " + isPr+ "  client local indexType:no index size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("isPR: " + isPr+ "  client local indexType:no index size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][1].asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
           try{  
             // to compare remote query results with and without index
-            getLogWriter().info("### Executing Query on remote server for region2:" + queryString2[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query on remote server for region2:" + queryString2[i]);
             Query query = remoteQueryService.newQuery(queryString2[i]);
             resWithIndexRemote[i] = (SelectResults)query.execute();
-            getLogWriter().info("isPR: " + isPr+ "  remote region2 size of resultset: "+ resWithIndexRemote[i].size() + " for query: " + queryString2[i]);;
+            LogWriterUtils.getLogWriter().info("isPR: " + isPr+ "  remote region2 size of resultset: "+ resWithIndexRemote[i].size() + " for query: " + queryString2[i]);;
             checkForPdxString(resWithIndexRemote[i].asList(), queryString2[i]);
 
            // to compare local query results with and without index
-            getLogWriter().info("### Executing Query on local for region2:" + queryString2[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query on local for region2:" + queryString2[i]);
             query = localQueryService.newQuery(queryString2[i]);
             resWithIndexLocal[i] = (SelectResults)query.execute();
-            getLogWriter().info("isPR: " + isPr+ "  local region2 size of resultset: "+ resWithIndexLocal[i].size() + " for query: " + queryString2[i]);;
+            LogWriterUtils.getLogWriter().info("isPR: " + isPr+ "  local region2 size of resultset: "+ resWithIndexLocal[i].size() + " for query: " + queryString2[i]);;
             checkForPdxString(resWithIndexLocal[i].asList(), queryString2[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString2[i], e);
+            Assert.fail("Failed executing " + queryString2[i], e);
           }
 
             if(i < orderByQueryIndex){
               // Compare local and remote query results.
               if (!compareResultsOfWithAndWithoutIndex(rs)){
-                getLogWriter().info("result0="+rs[0][0].asList());
-                getLogWriter().info("result1="+rs[0][1].asList());
+                LogWriterUtils.getLogWriter().info("result0="+rs[0][0].asList());
+                LogWriterUtils.getLogWriter().info("result1="+rs[0][1].asList());
                fail("Local and Remote Query Results are not matching for query :" + queryString[i]);  
               }
             }
@@ -975,7 +980,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
-        getLogWriter().info("Put Objects locally on server");
+        LogWriterUtils.getLogWriter().info("Put Objects locally on server");
         for (int i=numberOfEntries; i<numberOfEntries*2; i++) {
           region.put("key-"+i, new Portfolio(i));
          }
@@ -984,21 +989,21 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("PR server local indexType:no  size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("PR server local indexType:no  size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
           try{
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString2[i]).execute();
-            getLogWriter().info("PR server local indexType: no size of resultset: " + rs.size() + " for query: " + queryString2[i]);
+            LogWriterUtils.getLogWriter().info("PR server local indexType: no size of resultset: " + rs.size() + " for query: " + queryString2[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString2[i]);
           }catch (Exception e) {
-            fail("Failed executing " + queryString2[i], e);
+            Assert.fail("Failed executing " + queryString2[i], e);
           }
 
         }
@@ -1014,13 +1019,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
        // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("isPR: " + isPr+ " server local readSerializedTrue: indexType: no index size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("isPR: " + isPr+ " server local readSerializedTrue: indexType: no index size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -1035,13 +1040,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 remotely to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server remote readSerializedTrue: indexType:no index size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType:no index size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -1070,7 +1075,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         try {
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         } 
         // Verify the type of index created  
         Index index = null;
@@ -1113,7 +1118,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
     final int port1 = server1.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
     final int port2 = server2.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
 
-    final String host0 = getServerHostName(server0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
 
     // Create client pool.
     final String poolName = "testClientServerQueryPool"; 
@@ -1126,7 +1131,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         factory.setScope(Scope.LOCAL);
         ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null);
         Region region = createRegion(regionName, rootRegionName,  factory.create());
-        getLogWriter().info("Put PortfolioPdx");
+        LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
         for (int i=0; i<numberOfEntries; i++) {
           region.put("key-"+i, new PortfolioPdx(i));
          }
@@ -1169,21 +1174,21 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
           remoteQueryService = (PoolManager.find(poolName)).getQueryService();
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }
         
         for (int i=0; i < queryString.length; i++){
           try {
-            getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
             Query query = remoteQueryService.newQuery(queryString[i]);
             rs[0][0] = (SelectResults)query.execute();
-            getLogWriter().info("RR remote indexType:CompactRange size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("RR remote indexType:CompactRange size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][0].asList(), queryString[i]);
            
-            getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
             query = localQueryService.newQuery(queryString[i]);
             rs[0][1] = (SelectResults)query.execute();
-            getLogWriter().info("isPR: " + isPr+ "  client local indexType:CompactRange size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("isPR: " + isPr+ "  client local indexType:CompactRange size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][1].asList(), queryString[i]);
  
             if(i < orderByQueryIndex){
@@ -1197,7 +1202,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
               compareResultsOrder(rs, isPr);
             }
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }       
@@ -1209,7 +1214,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
-        getLogWriter().info("Put Objects locally on server");
+        LogWriterUtils.getLogWriter().info("Put Objects locally on server");
         for (int i=numberOfEntries; i<numberOfEntries*2; i++) {
           region.put("key-"+i, new Portfolio(i));
          }
@@ -1218,13 +1223,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server local indexType:Range  size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server local indexType:Range  size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
        }
@@ -1240,13 +1245,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("isPR: " + isPr+ " server local readSerializedTrue: indexType:CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("isPR: " + isPr+ " server local readSerializedTrue: indexType:CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -1262,13 +1267,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 remotely to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server remote readSerializedTrue: indexType: indexType:CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType: indexType:CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -1297,7 +1302,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         try {
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         } 
         // Verify the type of index created  
         Index index = null;
@@ -1340,7 +1345,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
     final int port1 = server1.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
     final int port2 = server2.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
 
-    final String host0 = getServerHostName(server0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
 
     // Create client pool.
     final String poolName = "testClientServerQueryPool"; 
@@ -1353,7 +1358,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         factory.setScope(Scope.LOCAL);
         ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null);
         Region region = createRegion(regionName, rootRegionName,  factory.create());
-        getLogWriter().info("Put PortfolioPdx");
+        LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
         for (int i=0; i<numberOfEntries; i++) {
           region.put("key-"+i, new PortfolioPdx(i));
          }
@@ -1393,28 +1398,28 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
           remoteQueryService = (PoolManager.find(poolName)).getQueryService();
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }
         
         for (int i=0; i < queryString.length; i++){
           try {
-            getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
             Query query = remoteQueryService.newQuery(queryString[i]);
             rs[0][0] = (SelectResults)query.execute();
-            getLogWriter().info("RR remote indexType: Range size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("RR remote indexType: Range size of resultset: "+ rs[0][0].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][0].asList(), queryString[i]);
                       
-            getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
             query = localQueryService.newQuery(queryString[i]);
             rs[0][1] = (SelectResults)query.execute();
-            getLogWriter().info("isPR: " + isPr+ "  client local indexType: Range size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
+            LogWriterUtils.getLogWriter().info("isPR: " + isPr+ "  client local indexType: Range size of resultset: "+ rs[0][1].size() + " for query: " + queryString[i]);;
             checkForPdxString(rs[0][1].asList(), queryString[i]);
                    
             if(i < orderByQueryIndex){
               // Compare local and remote query results.
               if (!compareResultsOfWithAndWithoutIndex(rs)){
-                getLogWriter().info("result0="+rs[0][0].asList());
-                getLogWriter().info("result1="+rs[0][1].asList());
+                LogWriterUtils.getLogWriter().info("result0="+rs[0][0].asList());
+                LogWriterUtils.getLogWriter().info("result1="+rs[0][1].asList());
                fail("Local and Remote Query Results are not matching for query :" + queryString[i]);  
               }
             }
@@ -1423,7 +1428,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
               compareResultsOrder(rs, isPr);
             }
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
        }
@@ -1436,7 +1441,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
 
-        getLogWriter().info("Put Objects locally on server");
+        LogWriterUtils.getLogWriter().info("Put Objects locally on server");
         for (int i=numberOfEntries; i<numberOfEntries*2; i++) {
           region.put("key-"+i, new Portfolio(i));
          }
@@ -1445,13 +1450,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server local indexType:Range  size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server local indexType:Range  size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
        }
@@ -1467,13 +1472,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 locally to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("isPR: " + isPr+ " server local readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("isPR: " + isPr+ " server local readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -1489,13 +1494,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         // Query server1 remotely to check if PdxString is not being returned
         for (int i = 0; i < queryString.length; i++) {
           try {
-            getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
+            LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
             SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
-            getLogWriter().info("RR server remote readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
+            LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType: Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
             // The results should not be PdxString
             checkForPdxString(rs.asList(), queryString[i]);
           } catch (Exception e) {
-            fail("Failed executing " + queryString[i], e);
+            Assert.fail("Failed executing " + queryString[i], e);
           }
         }
       }
@@ -1524,7 +1529,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         try {
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         } 
         // Verify the type of index created  
         Index index = null;
@@ -1567,7 +1572,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
     final int port1 = server1.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
     final int port2 = server2.invokeInt(PdxStringQueryDUnitTest.class, "getCacheServerPort");
 
-    final String host0 = getServerHostName(server0.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server0.getHost());
 
     // Create client pool.
     final String poolName = "testClientServerQueryPool"; 
@@ -1581,7 +1586,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
         ClientServerTestCase.configureConnectionPool(factory, host0, port1,-1, true, -1, -1, null);
         Region region = createRegion(regionName, rootRegionName,  factory.create());
       
-        getLogWriter().info("Put PortfolioPdx");
+        LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
         // Put some PortfolioPdx objects with null Status and secIds 
         for (int i=0; i<numberOfEntries*2; i++) {
           PortfolioPdx portfolioPdx = new PortfolioPdx(i);
@@ -1633,7 +1638,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
           remoteQueryService = (PoolManager.find(poolName)).getQueryService();
           localQueryService = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }
         
         // Querying the fields with null values
@@ -1644,7 +1649,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
           try {
             Query query = remoteQueryService.newQuery(qs[i]);
             SelectResults res = (SelectResults)query.execute();
-            getLogWriter().info("PR NULL Pdxstring test size of resultset: "+ res.size() + " for query: " + qs[i]);;
+            LogWriterUtils.getLogWriter().info("PR NULL Pdxstring test size of resultset: "+ res.size() + " for query: " + qs[i]);;
             if(i == 0){
               for(Object o : res){
                 if(o != null){
@@ -1655,7 +1660,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
               checkForPdxString(res.asList(), qs[i]);
             }
           } catch (Exception e) {
-            fail("Failed executing " + qs[i], e);
+            Assert.fail("Failed executing " + qs[i], e);
           }
         }
       }
@@ -1834,8 +1839,8 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
       @Override
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(vm0.getHost()), port1);
-        cf.addPoolServer(getServerHostName(vm1.getHost()), port2);
+        cf.addPoolServer(NetworkUtils.getServerHostName(vm0.getHost()), port1);
+        cf.addPoolServer(NetworkUtils.getServerHostName(vm1.getHost()), port2);
         ClientCache cache = getClientCache(cf);
         Region region = cache.createClientRegionFactory(ClientRegionShortcut.PROXY)
            .create(regionName);
@@ -1873,7 +1878,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
       }
     });
     
-    invokeInEveryVM(DistributedTestCase.class, "disconnectFromDS");
+    Invoke.invokeInEveryVM(DistributedTestCase.class, "disconnectFromDS");
   }
    
   protected void configAndStartBridgeServer(boolean isPr, boolean isAccessor, boolean asyncIndex) {
@@ -1897,7 +1902,7 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
     try {
       startBridgeServer(0, false);
     } catch (Exception ex) {
-      fail("While starting CacheServer", ex);
+      Assert.fail("While starting CacheServer", ex);
     }
   }
   /**
@@ -1930,12 +1935,12 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
     SerializableRunnable closeCache =
       new CacheSerializableRunnable("Close Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close Client. ###");
+        LogWriterUtils.getLogWriter().info("### Close Client. ###");
         try {
           closeCache();
           disconnectFromDS();
         } catch (Exception ex) {
-          getLogWriter().info("### Failed to get close client. ###");
+          LogWriterUtils.getLogWriter().info("### Failed to get close client. ###");
         }
       }
     };
@@ -1962,13 +1967,13 @@ public class PdxStringQueryDUnitTest extends CacheTestCase{
       public void run2() throws CacheException {
         // Create Cache.
         getLonerSystem();
-        addExpectedException("Connection refused");
+        IgnoredException.addIgnoredException("Connection refused");
         getCache();        
         PoolFactory cpf = PoolManager.createFactory();
         cpf.setSubscriptionEnabled(subscriptionEnabled);
         cpf.setSubscriptionRedundancy(redundancy);
         for (int i=0; i < servers.length; i++){
-          getLogWriter().info("### Adding to Pool. ### Server : " + servers[i] + " Port : " + ports[i]);
+          LogWriterUtils.getLogWriter().info("### Adding to Pool. ### Server : " + servers[i] + " Port : " + ports[i]);
           cpf.addServer(servers[i], ports[i]);
         }
         cpf.create(poolName);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
index 5833883..aeb4343 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryDataInconsistencyDUnitTest.java
@@ -40,11 +40,15 @@ import com.gemstone.gemfire.cache.query.partitioned.PRQueryDUnitHelper;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.execute.PRClientServerTestBase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * This tests the data inconsistency during update on an index and querying the
@@ -91,10 +95,13 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(CacheTestCase.class, "disconnectFromDS");
-    super.tearDown2();
-    invokeInEveryVM(QueryObserverHolder.class, "reset");
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(CacheTestCase.class, "disconnectFromDS");
+  }
+  
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(QueryObserverHolder.class, "reset");
   }
 
   @Override
@@ -149,7 +156,7 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
       @Override
       public void run2() throws CacheException {
         QueryService qs = CacheFactory.getAnyInstance().getQueryService();
-        while (!hooked){pause(100);}
+        while (!hooked){Wait.pause(100);}
         Object rs = null;
         try {
           rs = qs.newQuery("<trace> select * from /"+repRegionName+" where ID = 1").execute();          
@@ -175,7 +182,7 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
       @Override
       public void run2() throws CacheException {
         QueryService qs = CacheFactory.getAnyInstance().getQueryService();
-        while (!hooked){pause(100);}
+        while (!hooked){Wait.pause(100);}
         Object rs = null;
         try {
           rs = qs.newQuery("<trace> select * from /"+repRegionName+" where ID = 1").execute();          
@@ -197,7 +204,7 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
         hooked = false;//Let client put go further.
       }
     });
-    DistributedTestCase.join(putThread, 200, this.getLogWriter());
+    ThreadUtils.join(putThread, 200);
   }
 
   public void testRangeIndex() {
@@ -252,7 +259,7 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
       public void run2() throws CacheException {
         QueryService qs = CacheFactory.getAnyInstance().getQueryService();
         Position pos1 = null;
-        while (!hooked){pause(100);}
+        while (!hooked){Wait.pause(100);}
         try {
           Object rs = qs.newQuery("<trace> select pos from /"+repRegionName+" p, p.positions.values pos where pos.secId = 'APPL' AND p.ID = 1").execute();
           CacheFactory.getAnyInstance().getLogger().fine("Shobhit: "+rs);
@@ -264,13 +271,13 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
           }          
         } catch (Exception e) {
           e.printStackTrace();
-          fail("Query execution failed on server.", e);
+          Assert.fail("Query execution failed on server.", e);
           IndexManager.testHook = null;
         } finally {
           hooked = false;//Let client put go further.
         }
         while (!hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         try {
           Object rs = qs.newQuery("<trace> select pos from /"+repRegionName+" p, p.positions.values pos where pos.secId = 'APPL' AND p.ID = 1").execute();
@@ -292,7 +299,7 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
         }
       }
     });
-    DistributedTestCase.join(putThread, 200, this.getLogWriter());
+    ThreadUtils.join(putThread, 200);
   }
   
   public void testRangeIndexWithIndexAndQueryFromCluaseMisMatch() {
@@ -344,7 +351,7 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
       public void run2() throws CacheException {
         QueryService qs = CacheFactory.getAnyInstance().getQueryService();
         Position pos1 = null;
-        while (!hooked){pause(100);}
+        while (!hooked){Wait.pause(100);}
         try {
           Object rs = qs.newQuery("<trace> select pos from /"+repRegionName+" p, p.positions.values pos where pos.secId = 'APPL' AND p.ID = 1").execute();
           CacheFactory.getAnyInstance().getLogger().fine("Shobhit: "+rs);
@@ -356,13 +363,13 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
           }          
         } catch (Exception e) {
           e.printStackTrace();
-          fail("Query execution failed on server.", e);
+          Assert.fail("Query execution failed on server.", e);
           IndexManager.testHook = null;
         } finally {
           hooked = false;//Let client put go further.
         }
         while (!hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         try {
           Object rs = qs.newQuery("select pos from /"+repRegionName+" p, p.positions.values pos where pos.secId = 'APPL' AND p.ID = 1").execute();
@@ -383,7 +390,7 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
         }
       }
     });
-    DistributedTestCase.join(putThread, 200, this.getLogWriter());
+    ThreadUtils.join(putThread, 200);
   }
 
   public void testRangeIndexWithIndexAndQueryFromCluaseMisMatch2() {
@@ -435,7 +442,7 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
       public void run2() throws CacheException {
         QueryService qs = CacheFactory.getAnyInstance().getQueryService();
         Position pos1 = null;
-        while (!hooked){pause(100);}
+        while (!hooked){Wait.pause(100);}
         try {
           Object rs = qs.newQuery("<trace> select pos from /"+repRegionName+" p, p.collectionHolderMap.values coll, p.positions.values pos where pos.secId = 'APPL' AND p.ID = 1").execute();
           CacheFactory.getAnyInstance().getLogger().fine("Shobhit: "+rs);
@@ -447,13 +454,13 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
           }          
         } catch (Exception e) {
           e.printStackTrace();
-          fail("Query execution failed on server.", e);
+          Assert.fail("Query execution failed on server.", e);
           IndexManager.testHook = null;
         } finally {
           hooked = false;//Let client put go further.
         }
         while (!hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         try {
           Object rs = qs.newQuery("select pos from /"+repRegionName+" p, p.collectionHolderMap.values coll, p.positions.values pos where pos.secId = 'APPL' AND p.ID = 1").execute();
@@ -474,7 +481,7 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
         }
       }
     });
-    DistributedTestCase.join(putThread, 200, this.getLogWriter());
+    ThreadUtils.join(putThread, 200);
   }
   
   public static void createProxyRegions() {
@@ -537,7 +544,7 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
         Region region = cache.getRegion(repRegionName);
         for (int j = from; j < to; j++)
           region.put(new Integer(j), portfolio[j]);
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
             .info(
                 "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: Inserted Portfolio data on Region "
                     + regionName);
@@ -551,17 +558,17 @@ public class QueryDataInconsistencyDUnitTest extends CacheTestCase {
       switch (spot) {
       case 9: //Before Index update and after region entry lock.
         hooked  = true;
-        getLogWriter().info("QueryDataInconsistency.IndexManagerTestHook is hooked in Update Index Entry.");
+        LogWriterUtils.getLogWriter().info("QueryDataInconsistency.IndexManagerTestHook is hooked in Update Index Entry.");
         while(hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         assertEquals(hooked, false);
         break;
       case 10: //Before Region update and after Index Remove call.
         hooked  = true;
-        getLogWriter().info("QueryDataInconsistency.IndexManagerTestHook is hooked in Remove Index Entry.");
+        LogWriterUtils.getLogWriter().info("QueryDataInconsistency.IndexManagerTestHook is hooked in Remove Index Entry.");
         while(hooked) {
-          pause(100);
+          Wait.pause(100);
         }
         assertEquals(hooked, false);
         break;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryIndexUsingXMLDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryIndexUsingXMLDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryIndexUsingXMLDUnitTest.java
index 1ebe01d..5e9df71 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryIndexUsingXMLDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryIndexUsingXMLDUnitTest.java
@@ -61,12 +61,17 @@ import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.util.test.TestUtil;
 
 public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
@@ -130,17 +135,17 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
   public void setUp() throws Exception {
     super.setUp();
     //Workaround for #52008
-    addExpectedException("Failed to create index");
+    IgnoredException.addIgnoredException("Failed to create index");
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     // Get the disk store name.
     GemFireCacheImpl cache = (GemFireCacheImpl)getCache();
     String diskStoreName = cache.getDefaultDiskStoreName();
     
     //reset TestHook
-    invokeInEveryVM(resetTestHook());
+    Invoke.invokeInEveryVM(resetTestHook());
     // close the cache.
     closeCache();
     disconnectFromDS();
@@ -161,20 +166,20 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     VM vm1 = host.getVM(1);
     final String fileName = "IndexCreation.xml";
     
-    getLogWriter().info(
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
         "Creating index using an xml file name : " + fileName);
     
     AsyncInvocation asyInvk0 = vm0.invokeAsync(createIndexThrougXML("vm0testCreateIndexThroughXML", name, fileName));
     
     AsyncInvocation asyInvk1 = vm1.invokeAsync(createIndexThrougXML("vm1testCreateIndexThroughXML", name, fileName));
     
-    DistributedTestCase.join(asyInvk1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk1, 30 * 1000);
     if (asyInvk1.exceptionOccurred()) {
-      fail("asyInvk1 failed", asyInvk1.getException());
+      Assert.fail("asyInvk1 failed", asyInvk1.getException());
     }
-    DistributedTestCase.join(asyInvk0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk0, 30 * 1000);
     if (asyInvk0.exceptionOccurred()) {
-      fail("asyInvk0 failed", asyInvk0.getException());
+      Assert.fail("asyInvk0 failed", asyInvk0.getException());
     }
 
     // Check index for PR
@@ -228,7 +233,7 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     VM vm1 = host.getVM(1);
     final String fileName = "IndexCreation.xml";
     
-    getLogWriter().info(
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
         "Creating index using an xml file name : " + fileName);
     
     vm0.invoke(createIndexThrougXML("vm0testCreateIndexWhileDoingGII", name, fileName));
@@ -283,7 +288,7 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     VM vm1 = host.getVM(1);
     final String fileName = "IndexCreation.xml";
     
-    getLogWriter().info(
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
         "Creating index using an xml file name : " + fileName);
     
     vm0.invoke(createIndexThrougXML("vm0testRRegionCreateIndexWhileDoingGII", repRegName, fileName));
@@ -331,7 +336,7 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     VM vm1 = host.getVM(1);
     final String fileName = "IndexCreation.xml";
     
-    getLogWriter().info(
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
         "Creating index using an xml file name : " + fileName);
     
     vm0.invoke(createIndexThrougXML("vm0testPersistentPRRegion", persistentRegName, fileName));
@@ -396,7 +401,7 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     VM vm1 = host.getVM(1);
     final String fileName = "IndexCreation.xml";
     
-    getLogWriter().info("### in testCreateIndexWhileDoingGIIWithEmptyPRRegion.");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("### in testCreateIndexWhileDoingGIIWithEmptyPRRegion.");
     
 
     vm0.invoke(createIndexThrougXML("vm0testGIIWithEmptyPRRegion", name, fileName));
@@ -433,14 +438,14 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     VM vm1 = host.getVM(1);
     final String fileName = "IndexCreation.xml";
     
-    getLogWriter().info(
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
         "Creating index using an xml file name : " + fileName);
     
     AsyncInvocation asyInvk0 = vm0.invokeAsync(createIndexThrougXML("vm0testAsyncIndexWhileDoingGII", name, fileName));
     
-    DistributedTestCase.join(asyInvk0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk0, 30 * 1000);
     if (asyInvk0.exceptionOccurred()) {
-      fail("asyInvk0 failed", asyInvk0.getException());
+      Assert.fail("asyInvk0 failed", asyInvk0.getException());
     }
     
     // LoadRegion
@@ -451,16 +456,16 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     
     vm0.invoke(prIndexCreationCheck(name, statusIndex, 50));
 
-    DistributedTestCase.join(asyInvk1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk1, 30 * 1000);
     if (asyInvk1.exceptionOccurred()) {
-      fail("asyInvk1 failed", asyInvk1.getException());
+      Assert.fail("asyInvk1 failed", asyInvk1.getException());
     }
     
     vm1.invoke(prIndexCreationCheck(name, statusIndex, 50));
 
-    DistributedTestCase.join(asyInvk0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk0, 30 * 1000);
     if (asyInvk0.exceptionOccurred()) {
-      fail("asyInvk0 failed", asyInvk0.getException());
+      Assert.fail("asyInvk0 failed", asyInvk0.getException());
     }
     
     vm1.invoke(resetTestHook());
@@ -479,7 +484,7 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     VM vm1 = host.getVM(1);
     final String fileName = "IndexCreation.xml";
     
-    getLogWriter().info(
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
         "Creating index using an xml file name : " + fileName);
     
     vm0.invoke(createIndexThrougXML("vm0testIndexCompareQResults", name, fileName));
@@ -542,13 +547,13 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     VM vm1 = host.getVM(1);
     final String fileName = "IndexCreation.xml";
     
-    getLogWriter().info(
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
         "Creating index using an xml file name : " + fileName);
     
     AsyncInvocation asyInvk0 = vm0.invokeAsync(createIndexThrougXML("vm0testCreateAsyncIndexGIIAndQuery", name, fileName));
-    DistributedTestCase.join(asyInvk0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk0, 30 * 1000);
     if (asyInvk0.exceptionOccurred()) {
-      fail("asyInvk0 failed", asyInvk0.getException());
+      Assert.fail("asyInvk0 failed", asyInvk0.getException());
     }
     
     // LoadRegion
@@ -558,13 +563,13 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     AsyncInvocation asyInvk1 = vm1.invokeAsync(createIndexThrougXML("vm1testCreateAsyncIndexGIIAndQuery", name, fileName));
  
     
-    DistributedTestCase.join(asyInvk1, 30 * 1000, getLogWriter());  
+    ThreadUtils.join(asyInvk1, 30 * 1000);  
     if (asyInvk1.exceptionOccurred()) {
-      fail("asyInvk1 failed", asyInvk1.getException());
+      Assert.fail("asyInvk1 failed", asyInvk1.getException());
     }
-    DistributedTestCase.join(asyInvk0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk0, 30 * 1000);
     if (asyInvk0.exceptionOccurred()) {
-      fail("asyInvk0 failed", asyInvk0.getException());
+      Assert.fail("asyInvk0 failed", asyInvk0.getException());
     }
  
     vm0.invoke(prIndexCreationCheck(name, statusIndex, 50));
@@ -592,7 +597,7 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     VM vm1 = host.getVM(1);
     final String fileName = "IndexCreation.xml";
     
-    getLogWriter().info(
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
         "Creating index using an xml file name : " + fileName);
     
     vm0.invoke(createIndexThrougXML("vm0testAsyncIndexAndCompareQResults", name, fileName));
@@ -611,9 +616,9 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     vm1.invoke(setTestHook());    
     vm1.invoke(createIndexThrougXML("vm1testAsyncIndexAndCompareQResults", name, fileName));
     
-    DistributedTestCase.join(asyInvk0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyInvk0, 30 * 1000);
     if (asyInvk0.exceptionOccurred()) {
-      fail("asyInvk0 failed", asyInvk0.getException());
+      Assert.fail("asyInvk0 failed", asyInvk0.getException());
     }
      
     vm1.invoke(prIndexCreationCheck(persistentRegName, "secIndex", 50));
@@ -635,7 +640,7 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     VM vm1 = host.getVM(1);
     final String fileName = "IndexCreation.xml";
     
-    getLogWriter().info(
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
         "Creating index using an xml file name : " + fileName);
     //create index using xml
     vm0.invoke(createIndexThrougXML("vm0testIndexCreationForReplicatedPersistentOverFlowRegionOnRestart", persistentOverFlowRegName, fileName));
@@ -781,7 +786,7 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
         return "Number of Indexed Bucket is less than the expected number. "+ bucketCount + ", " + index.getNumberOfIndexedBuckets();
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -882,7 +887,7 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
             }
 
             // compare.
-            getLogWriter().info("Execute query : \n queryStr with index: " + s[0]  + " \n queryStr without index: " + s[1]);
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Execute query : \n queryStr with index: " + s[0]  + " \n queryStr without index: " + s[1]);
             ssORrs.CompareQueryResultsWithoutAndWithIndexes(sr, 1, s);
           }
         }
@@ -942,7 +947,7 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
     new Exception("TEST DEBUG###" + diskStoreId).printStackTrace();
     if (system == null || !system.isConnected()) {
       // Figure out our distributed system properties
-      Properties p = getAllDistributedSystemProperties(getDistributedSystemProperties());
+      Properties p = DistributedTestUtils.getAllDistributedSystemProperties(getDistributedSystemProperties());
       system = (InternalDistributedSystem)DistributedSystem.connect(p);
     } 
     return system;
@@ -956,13 +961,13 @@ public class QueryIndexUsingXMLDUnitTest extends CacheTestCase {
         System.setProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE", "true");
         cache = CacheFactory.create(system); 
       } catch (CacheExistsException e) {
-        fail("the cache already exists", e);
+        Assert.fail("the cache already exists", e);
 
       } catch (RuntimeException ex) {
         throw ex;
 
       } catch (Exception ex) {
-        fail("Checked exception while initializing cache??", ex);
+        Assert.fail("Checked exception while initializing cache??", ex);
       } finally {
         System.clearProperty("gemfire.DISABLE_DISCONNECT_DS_ON_CACHE_CLOSE");
       }      

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryParamsAuthorizationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryParamsAuthorizationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryParamsAuthorizationDUnitTest.java
index 8030f09..5984576 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryParamsAuthorizationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/dunit/QueryParamsAuthorizationDUnitTest.java
@@ -32,6 +32,7 @@ import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -83,7 +84,7 @@ public class QueryParamsAuthorizationDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws Exception {
         ClientCacheFactory ccf = new ClientCacheFactory()
-            .addPoolServer(getServerHostName(server1.getHost()), port)
+            .addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port)
             .set("security-client-auth-init",
                 "templates.security.UserPasswordAuthInit.create")
             .set("security-username", "root").set("security-password", "root");


[30/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/ClientServerFunctionExecutionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/ClientServerFunctionExecutionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/ClientServerFunctionExecutionDUnitTest.java
index 1e78da2..3890498 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/ClientServerFunctionExecutionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/ClientServerFunctionExecutionDUnitTest.java
@@ -33,8 +33,13 @@ import com.gemstone.gemfire.cache.execute.ResultCollector;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import java.io.Serializable;
 import java.util.ArrayList;
@@ -59,7 +64,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
 
   public void setUp() throws Exception {
     super.setUp();
-    addExpectedException("java.net.ConnectException");
+    IgnoredException.addIgnoredException("java.net.ConnectException");
   }
 
   
@@ -97,7 +102,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     registerFunctionAtServer(function);
     isByName = new Boolean(true);   
     toRegister = new Boolean(true);
-    getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
+    LogWriterUtils.getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
         "serverExecution", new Object[] { isByName, function, toRegister});
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
@@ -114,7 +119,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     registerFunctionAtServer(function);
     isByName = new Boolean(true);   
     toRegister = new Boolean(true);
-    getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
+    LogWriterUtils.getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
         "serverExecution_SendException", new Object[] { isByName, function, toRegister});
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
@@ -133,7 +138,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     registerFunctionAtServer(function);
     isByName = new Boolean(true);   
     toRegister = new Boolean(true);
-    getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
+    LogWriterUtils.getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
         "serverExecution_NoLastResult", new Object[] { isByName, function, toRegister});
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
@@ -149,7 +154,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     registerFunctionAtServer(function);
     isByName = new Boolean(true);  
     toRegister = new Boolean(false);
-    getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
+    LogWriterUtils.getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
         "serverExecution", new Object[] { isByName, function, toRegister});
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
@@ -160,7 +165,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
    */   
   public void testServerExecution_byInlineFunction() {
     createScenario();
-    getLogWriter().info("ClientServerFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
+    LogWriterUtils.getLogWriter().info("ClientServerFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
         "serverExecution_Inline");
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
@@ -173,7 +178,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
    */   
   public void testServerExecution_byInlineFunction_InvalidAttrbiutes() {
     createScenario();
-    getLogWriter().info("ClientServerFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
+    LogWriterUtils.getLogWriter().info("ClientServerFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
         "serverExecution_Inline_InvalidAttributes");
   }
@@ -183,7 +188,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
    */
   public void testBug40714() {
     createScenario();
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "ClientServerFunctionExecutionDUnitTest#testBug40714 : Starting test");
 
@@ -253,7 +258,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
       fail("Test failed after the execute operation.");
     }
   }
@@ -268,7 +273,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     registerFunctionAtServer(function);
     isByName = new Boolean(true);
     toRegister = new Boolean(true);    
-    getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
+    LogWriterUtils.getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
         "serverExecution", new Object[] { isByName, function, toRegister});
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
@@ -282,7 +287,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     registerFunctionAtServer(function);
     isByName = new Boolean(true);
     toRegister = new Boolean(false);
-    getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
+    LogWriterUtils.getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#testServerSingleKeyExecution_byName : Starting test");
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
         "serverExecution", new Object[] { isByName, function, toRegister});
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
@@ -300,7 +305,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     //The test code appears to trigger this because the first
     //call to the function disconnects from the DS but does not call
     //last result;
-    addExpectedException("did not send last result");
+    IgnoredException.addIgnoredException("did not send last result");
     createScenario();
     
     server1.invoke(
@@ -317,7 +322,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
         "createProxyRegion",
-        new Object[] { getServerHostName(server1.getHost()) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()) });
     
     function = new TestFunction(true, TestFunction.TEST_FUNCTION_HA_SERVER);
     registerFunctionAtServer(function);
@@ -337,8 +342,8 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     //The test code appears to trigger this because the first
     //call to the function disconnects from the DS but does not call
     //last result;
-    addExpectedException("Socket Closed");
-    addExpectedException("did not send last result");
+    IgnoredException.addIgnoredException("Socket Closed");
+    IgnoredException.addIgnoredException("did not send last result");
     createScenario();
     
     server1.invoke(
@@ -355,7 +360,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
         "createProxyRegion",
-        new Object[] { getServerHostName(server1.getHost()) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()) });
     
     function = new TestFunction(true, TestFunction.TEST_FUNCTION_HA_SERVER);
     registerFunctionAtServer(function);
@@ -379,7 +384,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     //The test code appears to trigger this because the first
     //call to the function disconnects from the DS but does not call
     //last result;
-    addExpectedException("did not send last result");
+    IgnoredException.addIgnoredException("did not send last result");
     createScenario();
     server1.invoke(
         ClientServerFunctionExecutionDUnitTest.class,
@@ -395,7 +400,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     
     client.invoke(ClientServerFunctionExecutionDUnitTest.class,
         "createProxyRegion",
-        new Object[] { getServerHostName(server1.getHost()) });
+        new Object[] { NetworkUtils.getServerHostName(server1.getHost()) });
     
     function = new TestFunction(true, TestFunction.TEST_FUNCTION_NONHA_SERVER);
     registerFunctionAtServer(function);
@@ -426,7 +431,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
   
   
   private void createScenario() {
-    getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#createScenario : creating scenario");
+    LogWriterUtils.getLogWriter().info("ClientServerFFunctionExecutionDUnitTest#createScenario : creating scenario");
     createClientServerScenarionWithoutRegion();    
   }
    
@@ -445,7 +450,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
       
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
       fail("Test failed after the execute operation");
     }
     
@@ -465,7 +470,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
       
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
       fail("Test failed after the execute operations");
     }
   }
@@ -502,7 +507,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
       
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
       fail("Test failed after the execute operation");
     }
     
@@ -529,7 +534,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
       
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
       fail("Test failed after the execute operations");
     }
   }
@@ -574,7 +579,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
       String excuse;
       public boolean done() {
         int sz = pool.getConnectedServerCount();
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Checking for the Live Servers : Expected  : " + expectedLiveServers
                 + " Available :" + sz);
         if (sz == expectedLiveServers.intValue()) {
@@ -587,7 +592,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 3 * 60 * 1000, 1000, true);
   }
   
   public static Object serverExecutionHAOneServerDown(Boolean isByName, Function function,
@@ -608,7 +613,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
       assertEquals(retryRegionName, ((List)rs.getResult()).get(0));
     } catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
       fail("Test failed after the execute operation");
     }
     return rs.getResult();
@@ -631,7 +636,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     } catch (Exception ex) {
       if (!(ex instanceof ServerConnectivityException)) {
         ex.printStackTrace();
-        getLogWriter().info("Exception : ", ex);
+        LogWriterUtils.getLogWriter().info("Exception : ", ex);
         fail("Test failed after the execute operation");
       }
     }
@@ -655,7 +660,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
     } catch (Exception ex) {
       if (!(ex instanceof ServerConnectivityException)) {
         ex.printStackTrace();
-        getLogWriter().info("Exception : ", ex);
+        LogWriterUtils.getLogWriter().info("Exception : ", ex);
         fail("Test failed after the execute operation");
       }
     }
@@ -678,7 +683,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
       assertTrue(((Integer)list.get(1)) == 5);
     } catch (Exception ex) {
       ex.printStackTrace();
-      fail("This is not expected Exception", ex);
+      Assert.fail("This is not expected Exception", ex);
     }
   }
 
@@ -729,7 +734,7 @@ public class ClientServerFunctionExecutionDUnitTest extends PRClientServerTestBa
       
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
       fail("Test failed after the execute operation nn TRUE");
     }
   }
@@ -764,7 +769,7 @@ public static void serverExecution_Inline_InvalidAttributes() {
       fail("Should have failed with Invalid attributes.");
       
     }catch (Exception ex) {
-      getLogWriter().info("Exception : " , ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
       assertTrue(ex.getMessage().contains(
           "For Functions with isHA true, hasResult must also be true."));
     }
@@ -793,7 +798,7 @@ public static void serverExecution_Inline_InvalidAttributes() {
       
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
       fail("Test failed after the execute operation");
     }
     
@@ -818,7 +823,7 @@ public static void serverExecution_Inline_InvalidAttributes() {
           
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
       fail("Test failed after the execute operation");
     }
     
@@ -846,7 +851,7 @@ public static void allServerExecution_SendException(Boolean isByName, Function f
       
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
       fail("Test failed after the execute operation");
     }
     
@@ -872,7 +877,7 @@ public static void allServerExecution_SendException(Boolean isByName, Function f
       
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
       fail("Test failed after the execute operation");
     }
     
@@ -927,7 +932,7 @@ public static void allServerExecution_SendException(Boolean isByName, Function f
            
     }catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : " , ex);
+      LogWriterUtils.getLogWriter().info("Exception : " , ex);
       fail("Test failed after the execute operation asdfasdfa   ");
     }
   }
@@ -935,17 +940,13 @@ public static void allServerExecution_SendException(Boolean isByName, Function f
   private static ResultCollector execute(Execution member,
       Serializable args, Function function, Boolean isByName) throws Exception {
     if (isByName.booleanValue()) {// by name
-      getLogWriter().info("The function name to execute : " + function.getId());
+      LogWriterUtils.getLogWriter().info("The function name to execute : " + function.getId());
       Execution me = member.withArgs(args);   
-      getLogWriter().info("The args passed  : " + args);
+      LogWriterUtils.getLogWriter().info("The args passed  : " + args);
       return me.execute(function.getId()); 
     }
     else { // By Instance
       return member.withArgs(args).execute(function);
     }
   }
-  
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/ColocationFailoverDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/ColocationFailoverDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/ColocationFailoverDUnitTest.java
index 81b9984..2ae358d 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/ColocationFailoverDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/ColocationFailoverDUnitTest.java
@@ -39,10 +39,15 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.partitioned.RegionAdvisor;
 import com.gemstone.gemfire.internal.logging.InternalLogWriter;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class ColocationFailoverDUnitTest extends DistributedTestCase {
 
@@ -100,7 +105,7 @@ public class ColocationFailoverDUnitTest extends DistributedTestCase {
     putInPRs();
     verifyColocationInAllVms();
     dataStore1.invoke(ColocationFailoverDUnitTest.class, "closeCache");
-    pause(5000); //wait for volunteering primary
+    Wait.pause(5000); //wait for volunteering primary
     verifyColocationAfterFailover();
   }
   
@@ -206,12 +211,12 @@ public class ColocationFailoverDUnitTest extends DistributedTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
   }
 
   
   protected static void dump() {
-    final InternalLogWriter logger = getLogWriter();
+    final InternalLogWriter logger = LogWriterUtils.getLogWriter();
     ((PartitionedRegion)customerPR).dumpAllBuckets(false);
     ((PartitionedRegion)orderPR).dumpAllBuckets(false);
     ((PartitionedRegion)shipmentPR).dumpAllBuckets(false);
@@ -348,7 +353,7 @@ public class ColocationFailoverDUnitTest extends DistributedTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 2 * 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 2 * 60 * 1000, 1000, true);
   }
 
   public static void createCacheInAllVms() {
@@ -374,7 +379,7 @@ public class ColocationFailoverDUnitTest extends DistributedTestCase {
       assertNotNull(cache);
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      Assert.fail("Failed while creating the cache", e);
     }
   }
 
@@ -426,7 +431,7 @@ public class ColocationFailoverDUnitTest extends DistributedTestCase {
     if (partitionedRegionName.equals(customerPR_Name)) {
       customerPR = cache.createRegion(partitionedRegionName, attr.create());
       assertNotNull(customerPR);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region " + partitionedRegionName
               + " created Successfully :" + customerPR);
 
@@ -434,7 +439,7 @@ public class ColocationFailoverDUnitTest extends DistributedTestCase {
     if (partitionedRegionName.equals(orderPR_Name)) {
       orderPR = cache.createRegion(partitionedRegionName, attr.create());
       assertNotNull(orderPR);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region " + partitionedRegionName
               + " created Successfully :" + orderPR);
 
@@ -443,7 +448,7 @@ public class ColocationFailoverDUnitTest extends DistributedTestCase {
     if (partitionedRegionName.equals(shipmentPR_Name)) {
       shipmentPR = cache.createRegion(partitionedRegionName, attr.create());
       assertNotNull(shipmentPR);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region " + partitionedRegionName
               + " created Successfully :" + shipmentPR);
 
@@ -466,14 +471,14 @@ public class ColocationFailoverDUnitTest extends DistributedTestCase {
     }
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
-    invokeInEveryVM(new SerializableRunnable() {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       public void run() {
         closeCache();
       }
     });
-    super.tearDown2();
   }
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/DistributedRegionFunctionExecutionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/DistributedRegionFunctionExecutionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/DistributedRegionFunctionExecutionDUnitTest.java
index 286a5bd..66d8ade 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/DistributedRegionFunctionExecutionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/DistributedRegionFunctionExecutionDUnitTest.java
@@ -52,10 +52,16 @@ import com.gemstone.gemfire.internal.cache.functions.DistribuedRegionFunctionFun
 import com.gemstone.gemfire.internal.cache.functions.DistributedRegionFunction;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class DistributedRegionFunctionExecutionDUnitTest extends
     DistributedTestCase {
@@ -93,7 +99,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDown() throws Exception {
     // this test creates a cache that is incompatible with CacheTestCase,
     // so we need to close it and null out the cache variable
     disconnectAllFromDS();
@@ -314,7 +320,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
         "registerFunction", new Object[] { new Boolean(true), new Integer(5) });
 
     // add expected exception to avoid suspect strings
-    final ExpectedException ex = addExpectedException("I have been thrown");
+    final IgnoredException ex = IgnoredException.addIgnoredException("I have been thrown");
     replicate1.invoke(DistributedRegionFunctionExecutionDUnitTest.class,
         "executeFunctionFunctionInvocationTargetException");
     ex.remove();
@@ -361,7 +367,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
                 new Integer(0) });
 
     // add expected exception to avoid suspect strings
-    final ExpectedException ex = addExpectedException("I have been thrown");
+    final IgnoredException ex = IgnoredException.addIgnoredException("I have been thrown");
     replicate1.invoke(DistributedRegionFunctionExecutionDUnitTest.class,
         "executeFunctionFunctionInvocationTargetExceptionWithoutHA");
     ex.remove();
@@ -401,7 +407,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
         "registerFunction", new Object[] { new Boolean(true), new Integer(5) });
 
     // add expected exception to avoid suspect strings
-    final ExpectedException ex = addExpectedException("I have been thrown");
+    final IgnoredException ex = IgnoredException.addIgnoredException("I have been thrown");
     executeFunctionFunctionInvocationTargetException();
     ex.remove();
   }
@@ -443,7 +449,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
             new Integer(0) });
 
     // add expected exception to avoid suspect strings
-    final ExpectedException ex = addExpectedException("I have been thrown");
+    final IgnoredException ex = IgnoredException.addIgnoredException("I have been thrown");
     executeFunctionFunctionInvocationTargetExceptionWithoutHA();
     ex.remove();
   }
@@ -531,9 +537,9 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
     replicate1.invoke(DistributedRegionFunctionExecutionDUnitTest.class,
         "disconnect");
 
-    DistributedTestCase.join(async[0], 50 * 1000, getLogWriter());
+    ThreadUtils.join(async[0], 50 * 1000);
     if (async[0].getException() != null) {
-      fail("UnExpected Exception Occured : ", async[0].getException());
+      Assert.fail("UnExpected Exception Occured : ", async[0].getException());
     }
     List l = (List)async[0].getReturnValue();
     assertEquals(5001, l.size());
@@ -634,9 +640,9 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
     createClientAndPopulateClientRegion(DataPolicy.EMPTY, port1, port2);
     // add ExpectedException's to servers since client can connect to any
     // one of those
-    final ExpectedException expectedEx = addExpectedException(
+    final IgnoredException expectedEx = IgnoredException.addIgnoredException(
         "did not send last result", empty1);
-    final ExpectedException expectedEx2 = addExpectedException(
+    final IgnoredException expectedEx2 = IgnoredException.addIgnoredException(
         "did not send last result", empty2);
     try {
       executeFunction_NoLastResult();
@@ -687,9 +693,9 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
         "startServerHA");
     emptyServer1.invoke(DistributedRegionFunctionExecutionDUnitTest.class,
         "closeCacheHA");
-    DistributedTestCase.join(async[0], 4 * 60 * 1000, getLogWriter());
+    ThreadUtils.join(async[0], 4 * 60 * 1000);
     if (async[0].getException() != null) {
-      fail("UnExpected Exception Occured : ", async[0].getException());
+      Assert.fail("UnExpected Exception Occured : ", async[0].getException());
     }
     List l = (List)async[0].getReturnValue();
     assertEquals(5001, l.size());
@@ -730,7 +736,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
 
     createClientAndPopulateClientRegion(DataPolicy.EMPTY, port1, port2);
     // add expected exception
-    final ExpectedException ex = addExpectedException(
+    final IgnoredException ex = IgnoredException.addIgnoredException(
         "DataPolicy.NORMAL is not supported");
     try {
       executeFunction();
@@ -846,7 +852,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
     createClientAndPopulateClientRegion(DataPolicy.EMPTY, port1, port2);
 
     // add expected exception to avoid suspect strings
-    final ExpectedException ex = addExpectedException("I have been thrown");
+    final IgnoredException ex = IgnoredException.addIgnoredException("I have been thrown");
     executeFunctionFunctionInvocationTargetException_ClientServer();
     ex.remove();
   }
@@ -897,7 +903,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
     createClientAndPopulateClientRegion(DataPolicy.EMPTY, port1, port2);
 
     // add expected exception to avoid suspect strings
-    final ExpectedException ex = addExpectedException("I have been thrown");
+    final IgnoredException ex = IgnoredException.addIgnoredException("I have been thrown");
     executeFunctionFunctionInvocationTargetException_ClientServer_WithoutHA();
     ex.remove();
   }
@@ -939,7 +945,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
         "registerFunction", new Object[] { new Boolean(true), new Integer(5) });
 
     // add expected exception to avoid suspect strings
-    final ExpectedException ex = addExpectedException("I have been thrown");
+    final IgnoredException ex = IgnoredException.addIgnoredException("I have been thrown");
     executeFunctionFunctionInvocationTargetException_ClientServer();
     ex.remove();
   }
@@ -989,7 +995,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
                 new Integer(0) });
 
     // add expected exception to avoid suspect strings
-    final ExpectedException ex = addExpectedException("I have been thrown");
+    final IgnoredException ex = IgnoredException.addIgnoredException("I have been thrown");
     executeFunctionFunctionInvocationTargetException_ClientServer_WithoutHA();
     ex.remove();
   }
@@ -1079,7 +1085,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
   }
   
   public void testFunctionWithNoResultThrowsException(){
-    addExpectedException("RuntimeException");
+    IgnoredException.addIgnoredException("RuntimeException");
     replicate1.invoke(DistributedRegionFunctionExecutionDUnitTest.class,
         "createCacheInVm");
     replicate2.invoke(DistributedRegionFunctionExecutionDUnitTest.class,
@@ -1097,7 +1103,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
     createClientAndPopulateClientRegion(DataPolicy.EMPTY, port1, port2);
 
     executeFunctionWithNoResultThrowException();
-    pause(10000);
+    Wait.pause(10000);
   }
   
   public static void executeFunction_NoResult() {
@@ -1132,9 +1138,9 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
       ds.disconnect();
     }
     catch (Exception e) {
-      getLogWriter().info("Exception Occured : " + e.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception Occured : " + e.getMessage());
       e.printStackTrace();
-      fail("Test failed", e);
+      Assert.fail("Test failed", e);
     }
   }
   
@@ -1314,7 +1320,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
     }
     catch (Exception e) {
       e.printStackTrace();
-      fail("This is not expected Exception", e);
+      Assert.fail("This is not expected Exception", e);
     }
   }
 
@@ -1344,7 +1350,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
     }
     catch (Exception e) {
       e.printStackTrace();
-      fail("This is not expected Exception", e);
+      Assert.fail("This is not expected Exception", e);
     }
   }
 
@@ -1412,7 +1418,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
     factory.setDataPolicy(policy);
     assertNotNull(cache);
     region = cache.createRegion(REGION_NAME, factory.create());
-    getLogWriter().info("Client Region Created :" + region);
+    LogWriterUtils.getLogWriter().info("Client Region Created :" + region);
     assertNotNull(region);
     for (int i = 1; i <= 200; i++) {
       region.put("execKey-" + i, new Integer(i));
@@ -1438,7 +1444,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
     factory.setPoolName(p.getName());
     assertNotNull(cache);
     region = cache.createRegion(REGION_NAME, factory.create());
-    getLogWriter().info("Client Region Created :" + region);
+    LogWriterUtils.getLogWriter().info("Client Region Created :" + region);
     assertNotNull(region);
   }
 
@@ -1448,7 +1454,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
     factory.setDataPolicy(policy);
     assertNotNull(cache);
     region = cache.createRegion(REGION_NAME, factory.create());
-    getLogWriter().info("Region Created :" + region);
+    LogWriterUtils.getLogWriter().info("Region Created :" + region);
     assertNotNull(region);
 
     CacheServer server = cache.addCacheServer();
@@ -1459,7 +1465,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start the Server", e);
+      Assert.fail("Failed to start the Server", e);
     }
     assertTrue(server.isRunning());
     return new Integer(server.getPort());
@@ -1471,7 +1477,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
     factory.setDataPolicy(policy);
     assertNotNull(cache);
     region = cache.createRegion(REGION_NAME, factory.create());
-    getLogWriter().info("Region Created :" + region);
+    LogWriterUtils.getLogWriter().info("Region Created :" + region);
     assertNotNull(region);
   }
   
@@ -1491,12 +1497,12 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
       ds.disconnect();
       ds = getSystem(props);
       cache = CacheFactory.create(ds);
-      getLogWriter().info("Created Cache on peer");
+      LogWriterUtils.getLogWriter().info("Created Cache on peer");
       assertNotNull(cache);
       FunctionService.registerFunction(function);
     }
     catch (Exception e) {
-      fail(
+      Assert.fail(
           "DistributedRegionFunctionExecutionDUnitTest#createCache() Failed while creating the cache",
           e);
     }
@@ -1515,7 +1521,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 3000, 200, false);
+    Wait.waitForCriterion(wc, 3000, 200, false);
     long endTime = System.currentTimeMillis();
     region.getCache().getLogger().fine(
         "Time wait for Cache Close = " + (endTime - startTime));
@@ -1534,13 +1540,13 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 2000, 500, false);
+    Wait.waitForCriterion(wc, 2000, 500, false);
     Collection bridgeServers = cache.getCacheServers();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Start Server Bridge Servers list : " + bridgeServers.size());
     Iterator bridgeIterator = bridgeServers.iterator();
     CacheServer bridgeServer = (CacheServer)bridgeIterator.next();
-    getLogWriter().info("start Server Bridge Server" + bridgeServer);
+    LogWriterUtils.getLogWriter().info("start Server Bridge Server" + bridgeServer);
     try {
       bridgeServer.start();
     }
@@ -1561,7 +1567,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 1000, 200, false);
+    Wait.waitForCriterion(wc, 1000, 200, false);
     try {
       Iterator iter = cache.getCacheServers().iterator();
       if (iter.hasNext()) {
@@ -1586,7 +1592,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 1000, 200, false);
+    Wait.waitForCriterion(wc, 1000, 200, false);
     if (cache != null && !cache.isClosed()) {
       try {
         Iterator iter = cache.getCacheServers().iterator();
@@ -1616,7 +1622,7 @@ public class DistributedRegionFunctionExecutionDUnitTest extends
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 2000, 200, false);
+    Wait.waitForCriterion(wc, 2000, 200, false);
     long endTime = System.currentTimeMillis();
     region.getCache().getLogger().fine(
         "Time wait for Disconnecting = " + (endTime - startTime));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStatsDUnitTest.java
index edcbfda..9edb736 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/FunctionServiceStatsDUnitTest.java
@@ -52,11 +52,13 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionTestHelper;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /*
  * This is DUnite Test to test the Function Execution stats under various
@@ -261,7 +263,7 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
 
         }
         catch (Exception e) {
-          getLogWriter().info("Exception : " + e.getMessage());
+          LogWriterUtils.getLogWriter().info("Exception : " + e.getMessage());
           e.printStackTrace();
           fail("Test failed after the put operation");
         }
@@ -285,9 +287,9 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
             .getFunctionExecutionsCompleted());
         assertTrue(functionServiceStats.getResultsReceived() >= resultReceived_Aggregate);
 
-        getLogWriter().info("Calling FunctionStats for  TEST_FUNCTION2 :");
+        LogWriterUtils.getLogWriter().info("Calling FunctionStats for  TEST_FUNCTION2 :");
         FunctionStats functionStats = FunctionStats.getFunctionStats(TestFunction.TEST_FUNCTION2, iDS);
-        getLogWriter().info("Called FunctionStats for  TEST_FUNCTION2 :");
+        LogWriterUtils.getLogWriter().info("Called FunctionStats for  TEST_FUNCTION2 :");
         assertEquals(noOfExecutionCalls_TESTFUNCTION2, functionStats
             .getFunctionExecutionCalls());
         assertEquals(noOfExecutionsCompleted_TESTFUNCTION2, functionStats
@@ -378,14 +380,14 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
           ds.disconnect();
           ds = getSystem(props);
           cache = CacheFactory.create(ds);
-          getLogWriter().info("Created Cache on Server");
+          LogWriterUtils.getLogWriter().info("Created Cache on Server");
           assertNotNull(cache);
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.DISTRIBUTED_ACK);
           factory.setDataPolicy(DataPolicy.REPLICATE);
           assertNotNull(cache);
           Region region = cache.createRegion(regionName, factory.create());
-          getLogWriter().info("Region Created :" + region);
+          LogWriterUtils.getLogWriter().info("Region Created :" + region);
           assertNotNull(region);
           for (int i = 1; i <= 200; i++) {
             region.put("execKey-" + i, new Integer(i));
@@ -398,13 +400,13 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
             server.start();
           }
           catch (IOException e) {
-            fail("Failed to start the Server", e);
+            Assert.fail("Failed to start the Server", e);
           }
           assertTrue(server.isRunning());
           return new Integer(server.getPort());
         }
         catch (Exception e) {
-          fail(
+          Assert.fail(
               "FunctionServiceStatsDUnitTest#createCache() Failed while creating the cache",
               e);
           throw e;
@@ -427,7 +429,7 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
           ds.disconnect();
           ds = getSystem(props);
           cache = CacheFactory.create(ds);
-          getLogWriter().info("Created Cache on Client");
+          LogWriterUtils.getLogWriter().info("Created Cache on Client");
           assertNotNull(cache);
 
 
@@ -452,7 +454,7 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
           factory.setPoolName(p.getName());
           assertNotNull(cache);
           Region region = cache.createRegion(regionName, factory.create());
-          getLogWriter().info("Client Region Created :" + region);
+          LogWriterUtils.getLogWriter().info("Client Region Created :" + region);
           assertNotNull(region);
           for (int i = 1; i <= 200; i++) {
             region.put("execKey-" + i, new Integer(i));
@@ -460,7 +462,7 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
           return Boolean.TRUE;
         }
         catch (Exception e) {
-          fail(
+          Assert.fail(
               "FunctionServiceStatsDUnitTest#createCache() Failed while creating the cache",
               e);
           throw e;
@@ -517,12 +519,12 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
         }
         catch (FunctionException e) {
           e.printStackTrace();
-          fail("test failed due to", e);
+          Assert.fail("test failed due to", e);
           throw e;
         }
         catch (Exception e) {
           e.printStackTrace();
-          fail("test failed due to", e);
+          Assert.fail("test failed due to", e);
           throw e;
         }
       
@@ -603,7 +605,7 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
         }
         catch (Exception ex) {
           ex.printStackTrace();
-          getLogWriter().info("Exception : ", ex);
+          LogWriterUtils.getLogWriter().info("Exception : ", ex);
           fail("Test failed after the execute operation nn TRUE");
         }
         function = new TestFunction(true, TestFunction.TEST_FUNCTION5);
@@ -625,7 +627,7 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
         }
         catch (Exception ex) {
           ex.printStackTrace();
-          getLogWriter().info("Exception : ", ex);
+          LogWriterUtils.getLogWriter().info("Exception : ", ex);
           fail("Test failed after the execute operationssssss");
         }
         return Boolean.TRUE;
@@ -973,7 +975,7 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
         factory.setScope(Scope.DISTRIBUTED_ACK);
         factory.setDataPolicy(DataPolicy.EMPTY);
         Region region = getCache().createRegion(rName, factory.create());
-        getLogWriter().info("Region Created :" + region);
+        LogWriterUtils.getLogWriter().info("Region Created :" + region);
         assertNotNull(region);
         FunctionService.registerFunction(new TestFunction(true, TestFunction.TEST_FUNCTION2));
         for (int i = 1; i <= 200; i++) {
@@ -991,7 +993,7 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
         factory.setScope(Scope.DISTRIBUTED_ACK);
         factory.setDataPolicy(DataPolicy.REPLICATE);
         Region region = getCache().createRegion(rName, factory.create());
-        getLogWriter().info("Region Created :" + region);
+        LogWriterUtils.getLogWriter().info("Region Created :" + region);
         assertNotNull(region);
         FunctionService.registerFunction(new TestFunction(true, TestFunction.TEST_FUNCTION2));
         for (int i = 1; i <= 200; i++) {
@@ -1026,12 +1028,12 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
         }
         catch (FunctionException e) {
           e.printStackTrace();
-          fail("test failed due to", e);
+          Assert.fail("test failed due to", e);
           return Boolean.FALSE;
         }
         catch (Exception e) {
           e.printStackTrace();
-          fail("test failed due to", e);
+          Assert.fail("test failed due to", e);
           return Boolean.FALSE;
         }
         
@@ -1088,7 +1090,7 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
           assertNotNull(ds);
         }
         catch (Exception e) {
-          fail("Failed while creating the Distribued System", e);
+          Assert.fail("Failed while creating the Distribued System", e);
         }
         return Boolean.TRUE;
       }
@@ -1145,9 +1147,9 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
           
         }
         catch (Exception e) {
-          getLogWriter().info("Exception Occured : " + e.getMessage());
+          LogWriterUtils.getLogWriter().info("Exception Occured : " + e.getMessage());
           e.printStackTrace();
-          fail("Test failed", e);
+          Assert.fail("Test failed", e);
         }
         return Boolean.TRUE;
       }
@@ -1294,7 +1296,7 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
               return excuse;
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 20000, 1000, false);
+          Wait.waitForCriterion(wc, 20000, 1000, false);
           rc.getResult();
         }
         catch (Exception expected) {
@@ -1359,11 +1361,4 @@ public class FunctionServiceStatsDUnitTest extends PRClientServerTestBase{
     ArrayList commonAttributes = createCommonServerAttributes("TestPartitionedRegion", null, 0, 13, null);
     createClientServerScenarion(commonAttributes, 20, 20, 20);
   }
-  
-  // this tear down is geeting used in client server mode. this is making use of
-  // cache object for client server
-  //For P2P, i have added separate closeDistributedSystem method 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalDataSetDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalDataSetDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalDataSetDUnitTest.java
index 5dfb429..6d582ed 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalDataSetDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalDataSetDUnitTest.java
@@ -44,8 +44,10 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.functions.LocalDataSetFunction;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -141,7 +143,7 @@ public class LocalDataSetDUnitTest extends CacheTestCase {
       Region localRegion = PartitionRegionHelper.getLocalDataForContext((RegionFunctionContext)context);
       Iterator it = localRegion.keySet().iterator();
       while (it.hasNext()) {
-        getLogWriter().info("LocalKeys:"+it.next());
+        LogWriterUtils.getLogWriter().info("LocalKeys:"+it.next());
       }
       context.getResultSender().lastResult(Boolean.TRUE);
     }
@@ -204,7 +206,7 @@ public class LocalDataSetDUnitTest extends CacheTestCase {
     }
     catch (Exception e) {
       e.printStackTrace();
-      fail("Test failed due to ", e);
+      Assert.fail("Test failed due to ", e);
     }
   }
   private void registerFunctions() {
@@ -249,7 +251,7 @@ public class LocalDataSetDUnitTest extends CacheTestCase {
       assertNotNull(cache);
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      Assert.fail("Failed while creating the cache", e);
     }
   }
 
@@ -302,7 +304,7 @@ public class LocalDataSetDUnitTest extends CacheTestCase {
     if (partitionedRegionName.equals("CustomerPR")) {
       customerPR = cache.createRegion(partitionedRegionName, attr.create());
       assertNotNull(customerPR);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region " + partitionedRegionName
               + " created Successfully :" + customerPR);
 
@@ -310,7 +312,7 @@ public class LocalDataSetDUnitTest extends CacheTestCase {
     if (partitionedRegionName.equals("OrderPR")) {
       orderPR = cache.createRegion(partitionedRegionName, attr.create());
       assertNotNull(orderPR);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region " + partitionedRegionName
               + " created Successfully :" + orderPR);
 
@@ -319,7 +321,7 @@ public class LocalDataSetDUnitTest extends CacheTestCase {
     if (partitionedRegionName.equals("ShipmentPR")) {
       shipmentPR = cache.createRegion(partitionedRegionName, attr.create());
       assertNotNull(shipmentPR);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Partitioned Region " + partitionedRegionName
               + " created Successfully :" + shipmentPR);
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalDataSetIndexingDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalDataSetIndexingDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalDataSetIndexingDUnitTest.java
index 8b0d4ba..1d933e3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalDataSetIndexingDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalDataSetIndexingDUnitTest.java
@@ -124,7 +124,7 @@ public class LocalDataSetIndexingDUnitTest extends CacheTestCase {
               "/pr2 e2");
         }
         catch (Exception e) {
-          fail("Test failed due to Exception in index creation ", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("Test failed due to Exception in index creation ", e);
         }
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalFunctionExecutionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalFunctionExecutionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalFunctionExecutionDUnitTest.java
index f58d349..33c8f3e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalFunctionExecutionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/LocalFunctionExecutionDUnitTest.java
@@ -34,8 +34,11 @@ import com.gemstone.gemfire.cache.execute.ResultCollector;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -96,7 +99,7 @@ public class LocalFunctionExecutionDUnitTest extends DistributedTestCase{
       assertNotNull(cache);
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      Assert.fail("Failed while creating the cache", e);
     }
   }
   
@@ -113,7 +116,7 @@ public class LocalFunctionExecutionDUnitTest extends DistributedTestCase{
     
     region = cache.createRegion(partitionedRegionName, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + partitionedRegionName
             + " created Successfully :" + region);
   }
@@ -126,7 +129,7 @@ public class LocalFunctionExecutionDUnitTest extends DistributedTestCase{
     assertNotNull(cache);
     region = cache.createRegion(distributedRegionName, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region " + distributedRegionName
             + " created Successfully :" + region);
   }
@@ -144,7 +147,7 @@ public class LocalFunctionExecutionDUnitTest extends DistributedTestCase{
       FunctionService.registerFunction(function1);
       ResultCollector rc = FunctionService.onRegion(region).withArgs(Boolean.TRUE).execute(function1.getId());
       rc.getResult();
-      fail("Exception should occur",new Exception("Test Failed"));
+      Assert.fail("Exception should occur",new Exception("Test Failed"));
     }
     catch (Exception e) {
       assertTrue(e.getMessage().contains("I have been thrown from TestFunction"));
@@ -158,7 +161,7 @@ public class LocalFunctionExecutionDUnitTest extends DistributedTestCase{
       DistributedMember localmember = system.getDistributedMember();
       ResultCollector rc = FunctionService.onMember(system, localmember).withArgs(Boolean.TRUE).execute(function1.getId());
       rc.getResult();
-      fail("Exception should occur",new Exception("Test Failed"));
+      Assert.fail("Exception should occur",new Exception("Test Failed"));
     }
     catch (Exception e) {
       assertTrue(e.getMessage().contains("I have been thrown from TestFunction"));
@@ -166,13 +169,13 @@ public class LocalFunctionExecutionDUnitTest extends DistributedTestCase{
     }    
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     if(cache != null) {
       cache.close();
     }
     cache = null;
-    invokeInEveryVM(new SerializableRunnable() { public void run() {
+    Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() {
       if(cache != null) {
         cache.close();
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/MemberFunctionExecutionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/MemberFunctionExecutionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/MemberFunctionExecutionDUnitTest.java
index 9508726..1f3ed17 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/MemberFunctionExecutionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/MemberFunctionExecutionDUnitTest.java
@@ -48,7 +48,9 @@ import com.gemstone.gemfire.i18n.LogWriterI18n;
 import com.gemstone.gemfire.internal.ClassBuilder;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -166,7 +168,7 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
       resultCollector.getResult();
       fail("Should have received FunctionException due to class not found");
     } catch (FunctionException expected) {
-      getLogWriter().warning("received wrong exception cause", expected.getCause());
+      LogWriterUtils.getLogWriter().warning("received wrong exception cause", expected.getCause());
       assertTrue((expected.getCause() instanceof ClassNotFoundException));
     }
   }
@@ -340,7 +342,7 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
           fail("Should have seen an exception");
         } catch (Exception e) {
           if(!(e.getCause() instanceof FunctionInvocationTargetException)) {
-            fail("failed", e);
+            Assert.fail("failed", e);
           }
         }
         
@@ -385,7 +387,7 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
         }
       });
       List li = (ArrayList)rc.getResult();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "MemberFunctionExecutionDUnitTest#excuteOnMembers: Result : " + li);
       assertEquals(li.size(), 1);
       for (Object obj : li) {
@@ -394,9 +396,9 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
       ds.disconnect();
     }
     catch (Exception e) {
-      getLogWriter().info("Exception Occured : "+ e.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception Occured : "+ e.getMessage());
       e.printStackTrace();
-      fail("Test failed",e);
+      Assert.fail("Test failed",e);
     }
   }
   /*
@@ -442,7 +444,7 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
     try {
       ResultCollector rc = executor.execute(function.getId());
       List li = (ArrayList)rc.getResult();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "MemberFunctionExecutionDUnitTest#excuteOnMembers: Result : " + li);
       assertEquals(li.size(), noOfMembers.intValue());
       for (Object obj : li) {
@@ -450,9 +452,9 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
       }
     }
     catch (Exception e) {
-      getLogWriter().info("Exception Occured : "+ e.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception Occured : "+ e.getMessage());
       e.printStackTrace();
-      fail("Test failed",e);
+      Assert.fail("Test failed",e);
     }
   }
   
@@ -482,7 +484,7 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
     try {
       ResultCollector rc = memberExcution.withArgs(Boolean.TRUE).execute(function);
       List li = (ArrayList)rc.getResult();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "MemberFunctionExecutionDUnitTest#excuteOnMembers: Result : " + li);
       assertEquals(noOfMembers.intValue(), li.size());
       for (Object obj : li) {
@@ -490,9 +492,9 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
       }
     }
     catch (Exception e) {
-      getLogWriter().info("Exception Occured : "+ e.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception Occured : "+ e.getMessage());
       e.printStackTrace();
-      fail("Test failed",e);
+      Assert.fail("Test failed",e);
     }
   }
   
@@ -584,7 +586,7 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
         }
       });
       List li = (ArrayList)rc.getResult();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "MemberFunctionExecutionDUnitTest#excuteOnMembers: Result : " + li);
       assertEquals(li.size(), noOfMembers.intValue());
       for (Object obj : li) {
@@ -592,9 +594,9 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
       }
     }
     catch (Exception e) {
-      getLogWriter().info("Exception Occured : "+ e.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception Occured : "+ e.getMessage());
       e.printStackTrace();
-      fail("Test failed",e);
+      Assert.fail("Test failed",e);
     }
   }
   
@@ -612,7 +614,7 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
       fail("Test Failed");
     }
     catch (Exception expected) {
-      getLogWriter().info("Exception Occured : "+ expected.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception Occured : "+ expected.getMessage());
 //      boolean check = expected.getMessage().equals("Cannot return any result, as Function.hasResult() is false");
       assertTrue(expected.getMessage().equals(LocalizedStrings.ExecuteFunction_CANNOT_0_RESULTS_HASRESULT_FALSE
           .toLocalizedString("return any")));
@@ -658,20 +660,23 @@ public class MemberFunctionExecutionDUnitTest extends CacheTestCase {
       FunctionService.registerFunction(new TestFunction(true,TestFunction.TEST_FUNCTION_NO_LASTRESULT));
     }
     catch (Exception e) {
-      fail("Failed while creating the Distribued System", e);
+      Assert.fail("Failed while creating the Distribued System", e);
     }
     return ds;
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     List<VM> members = new ArrayList<VM>(4);
     members.add(member1); members.add(member2); members.add(member3); members.add(member4);
     for (VM member: members) {
       member.invoke(MemberFunctionExecutionDUnitTest.class, "registerExpectedExceptions",
           new Object[] { Boolean.FALSE });
     }
-    super.tearDown2();
+  }
+  
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS();
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/MultiRegionFunctionExecutionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/MultiRegionFunctionExecutionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/MultiRegionFunctionExecutionDUnitTest.java
index b9f0dc2..899e2bf 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/MultiRegionFunctionExecutionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/MultiRegionFunctionExecutionDUnitTest.java
@@ -35,6 +35,7 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -70,10 +71,10 @@ public class MultiRegionFunctionExecutionDUnitTest extends CacheTestCase {
     vm3 = host.getVM(3);
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     cache = null;
-    invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+    Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
   }
   
   public void testMultiRegionFunctionExecution(){
@@ -153,7 +154,7 @@ public class MultiRegionFunctionExecutionDUnitTest extends CacheTestCase {
       assertNotNull(cache);
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      com.gemstone.gemfire.test.dunit.Assert.fail("Failed while creating the cache", e);
     }
   }
   @SuppressWarnings("unchecked")

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/OnGroupsFunctionExecutionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/OnGroupsFunctionExecutionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/OnGroupsFunctionExecutionDUnitTest.java
index fd5b1a4..3cc34d8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/OnGroupsFunctionExecutionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/OnGroupsFunctionExecutionDUnitTest.java
@@ -44,9 +44,14 @@ import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * 
@@ -59,8 +64,8 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableCallable() {
+  protected final void preTearDown() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableCallable() {
       @Override
       public Object call() throws Exception {
         Cache c = null;
@@ -83,7 +88,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
 
     @Override
     public void execute(FunctionContext context) {
-      getLogWriter().fine("SWAP:1:executing OnGroupsFunction:"+invocationCount);
+      LogWriterUtils.getLogWriter().fine("SWAP:1:executing OnGroupsFunction:"+invocationCount);
       InternalDistributedSystem ds = InternalDistributedSystem.getConnectedInstance();
       synchronized (OnGroupsFunction.class) {
     	  invocationCount++;
@@ -228,7 +233,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
     vm0.invoke(new SerializableCallable() {
       @Override
       public Object call() throws Exception {
-        getLogWriter().fine("SWAP:invoking on gm");
+        LogWriterUtils.getLogWriter().fine("SWAP:invoking on gm");
         DistributedSystem ds = getSystem();
         try {
           FunctionService.onMember("no such group");
@@ -255,7 +260,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
       @Override
       public Object call() throws Exception {
         DistributedSystem ds = getSystem();
-        getLogWriter().fine("SWAP:invoking on g0");
+        LogWriterUtils.getLogWriter().fine("SWAP:invoking on g0");
         Execution e = FunctionService.onMembers("g0");
         ArrayList<String> args = new ArrayList<String>();
         args.add("g0");
@@ -295,7 +300,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
     vm0.invoke(new SerializableCallable() {
       @Override
       public Object call() throws Exception {
-        getLogWriter().fine("SWAP:invoking on g0 g1");
+        LogWriterUtils.getLogWriter().fine("SWAP:invoking on g0 g1");
         InternalDistributedSystem ds = InternalDistributedSystem.getConnectedInstance();
         Execution e = FunctionService.onMembers("g0", "g1");
         ArrayList<String> args = new ArrayList<String>();
@@ -492,7 +497,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
     
     //The test function deliberately throws a null pointer exception.
     //which is logged.
-    addExpectedException(NullPointerException.class.getSimpleName());
+    IgnoredException.addIgnoredException(NullPointerException.class.getSimpleName());
     
     initVM(vm0, "g0,mg", regionName, false);
     initVM(vm1, "g1", regionName, false);
@@ -680,11 +685,11 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         } catch (CacheClosedException cce) {
         }
         disconnectFromDS();
-        getLogWriter().fine("SWAP:creating client cache");
+        LogWriterUtils.getLogWriter().fine("SWAP:creating client cache");
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolLocator(hostName, locatorPort);
         ccf.setPoolServerGroup("mg");
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache c = ccf.create();
 
         c.getLogger().info("SWAP:invoking function from client on g0");
@@ -778,11 +783,11 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         } catch (CacheClosedException cce) {
         }
         disconnectFromDS();
-        getLogWriter().fine("SWAP:creating client cache");
+        LogWriterUtils.getLogWriter().fine("SWAP:creating client cache");
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolLocator(hostName, locatorPort);
         ccf.setPoolServerGroup("mg");
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache c = ccf.create();
 
         c.getLogger().info("SWAP:invoking function from client on g0");
@@ -855,14 +860,14 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         } catch (CacheClosedException cce) {
         }
         disconnectFromDS();
-        getLogWriter().fine("SWAP:creating client cache");
+        LogWriterUtils.getLogWriter().fine("SWAP:creating client cache");
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolLocator(hostName, locatorPort);
         ccf.setPoolServerGroup("mg");
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache c = ccf.create();
 
-        ExpectedException ex = addExpectedException("No member found");
+        IgnoredException ex = IgnoredException.addIgnoredException("No member found");
         try {
           InternalFunctionService.onServer(c, "no such group").execute(new OnGroupsFunction()).getResult();
          fail("expected exception not thrown");
@@ -935,14 +940,14 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         } catch (CacheClosedException cce) {
         }
         disconnectFromDS();
-        getLogWriter().fine("SWAP:creating client cache");
+        LogWriterUtils.getLogWriter().fine("SWAP:creating client cache");
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolLocator(hostName, locatorPort);
         ccf.setPoolServerGroup("mg");
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache c = ccf.create();
 
-        ExpectedException expected = addExpectedException("No member found");
+        IgnoredException expected = IgnoredException.addIgnoredException("No member found");
         try {
           InternalFunctionService.onServers(c, "no such group").execute(new OnGroupsFunction()).getResult();
           fail("expected exception not thrown");
@@ -951,7 +956,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
           expected.remove();
         }
 
-        addExpectedException("NullPointerException");
+        IgnoredException.addIgnoredException("NullPointerException");
         Execution e = InternalFunctionService.onServers(c, "mg");
         ArrayList<String> args = new ArrayList<String>();
         args.add("runtime");
@@ -1012,11 +1017,11 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         } catch (CacheClosedException cce) {
         }
         disconnectFromDS();
-        getLogWriter().fine("SWAP:creating client cache");
+        LogWriterUtils.getLogWriter().fine("SWAP:creating client cache");
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolLocator(hostName, locatorPort);
         ccf.setPoolServerGroup("mg");
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache c = ccf.create();
 
         Execution e = InternalFunctionService.onServers(c, "g1");
@@ -1024,7 +1029,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         args.add("disconnect");
         e = e.withArgs(args);
         
-        addExpectedException("FunctionInvocationTargetException");
+        IgnoredException.addIgnoredException("FunctionInvocationTargetException");
         try {
           e.execute(new OnGroupsExceptionFunction()).getResult();
           fail("expected exception not thrown");
@@ -1061,11 +1066,11 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         } catch (CacheClosedException cce) {
         }
         disconnectFromDS();
-        getLogWriter().fine("SWAP:creating client cache");
+        LogWriterUtils.getLogWriter().fine("SWAP:creating client cache");
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolLocator(hostName, locatorPort);
         ccf.setPoolServerGroup("mg");
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache c = ccf.create();
 
         Execution e = InternalFunctionService.onServers(c, "g1");
@@ -1073,7 +1078,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         args.add("disconnect");
         args.add("g2");
         e = e.withArgs(args);
-        addExpectedException("FunctionInvocationTargetException");
+        IgnoredException.addIgnoredException("FunctionInvocationTargetException");
         try {
           e.execute(new OnGroupsExceptionFunction()).getResult();
           fail("expected exception not thrown");
@@ -1110,11 +1115,11 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         } catch (CacheClosedException cce) {
         }
         disconnectFromDS();
-        getLogWriter().fine("SWAP:creating client cache");
+        LogWriterUtils.getLogWriter().fine("SWAP:creating client cache");
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolLocator(hostName, locatorPort);
         ccf.setPoolServerGroup("mg");
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache c = ccf.create();
 
         Execution e = InternalFunctionService.onServers(c, "g1");
@@ -1124,7 +1129,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         e = e.withArgs(args);
         ((AbstractExecution)e).setIgnoreDepartedMembers(true);
         ArrayList l = (ArrayList) e.execute(new OnGroupsExceptionFunction()).getResult();
-        getLogWriter().info("SWAP:result:"+l);
+        LogWriterUtils.getLogWriter().info("SWAP:result:"+l);
         assertEquals(2, l.size());
         if (l.get(0) instanceof Throwable) {
           assertTrue((Boolean) l.get(1));
@@ -1152,7 +1157,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
   public void testNoAckGroupsFunction() {
     //Workaround for #52005. This is a product bug
     //that should be fixed
-    addExpectedException("Cannot return any result");
+    IgnoredException.addIgnoredException("Cannot return any result");
     Host host = Host.getHost(0);
     final VM server0 = host.getVM(0);
     final VM server1 = host.getVM(1);
@@ -1177,11 +1182,11 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         } catch (CacheClosedException cce) {
         }
         disconnectFromDS();
-        getLogWriter().fine("SWAP:creating client cache");
+        LogWriterUtils.getLogWriter().fine("SWAP:creating client cache");
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolLocator(hostName, locatorPort);
         ccf.setPoolServerGroup("mg");
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache c = ccf.create();
 
         c.getLogger().info("SWAP:invoking function from client on g0");
@@ -1203,7 +1208,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         return "OnGroupsNoAck invocation count mismatch";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 30000, 1000, true);
+    Wait.waitForCriterion(wc, 30000, 1000, true);
 
     resetInvocationCount(server0);
     resetInvocationCount(server1);
@@ -1219,7 +1224,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
       }
     });
     // pause here to verify that we do not get more than 1 invocation
-    DistributedTestCase.pause(5000);
+    Wait.pause(5000);
     WaitCriterion wc2 = new WaitCriterion() {
       @Override
       public boolean done() {
@@ -1233,7 +1238,7 @@ public class OnGroupsFunctionExecutionDUnitTest extends DistributedTestCase {
         return "OnGroupsNoAck invocation count mismatch";
       }
     };
-    DistributedTestCase.waitForCriterion(wc2, 30000, 1000, true);
+    Wait.waitForCriterion(wc2, 30000, 1000, true);
     resetInvocationCount(server0);
     resetInvocationCount(server1);
     resetInvocationCount(server2);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerFunctionExecutionNoAckDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerFunctionExecutionNoAckDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerFunctionExecutionNoAckDUnitTest.java
index d923eda..d33dc47 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerFunctionExecutionNoAckDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRClientServerFunctionExecutionNoAckDUnitTest.java
@@ -31,6 +31,7 @@ import com.gemstone.gemfire.cache.execute.ResultCollector;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 
 public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServerTestBase{
   /**
@@ -66,7 +67,7 @@ public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServe
     
     isByName = new Boolean(true);
     toRegister = new Boolean(true);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRClientServerFunctionExecutionNoAckDUnitTest#testServerFunctionExecution_NoAck : Starting test");
     client.invoke(PRClientServerFunctionExecutionNoAckDUnitTest.class,
@@ -84,7 +85,7 @@ public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServe
     registerFunctionAtServer(functionAck);
     toRegister = new Boolean(false);
     isByName = new Boolean(true);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRClientServerFunctionExecutionNoAckDUnitTest#testServerFunctionExecution_NoAck : Starting test");
     client.invoke(PRClientServerFunctionExecutionNoAckDUnitTest.class,
@@ -94,7 +95,7 @@ public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServe
   }
   
   private void createScenario() {
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRClientServerFFunctionExecutionDUnitTest#createScenario : creating scenario");
     createClientServerScenarionWithoutRegion();
@@ -117,11 +118,11 @@ public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServe
       for(int i=0;i< NUM_ITERATION;i++)
         execute(member, Boolean.TRUE, functionNoAck, isByName, toRegister);
       t.stop();
-      getLogWriter().info("Time taken to execute boolean based" + NUM_ITERATION + "NoAck functions :" + t.getTimeInMs());      
+      LogWriterUtils.getLogWriter().info("Time taken to execute boolean based" + NUM_ITERATION + "NoAck functions :" + t.getTimeInMs());      
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
       fail("Test failed after the execute operation");
     }
         
@@ -135,11 +136,11 @@ public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServe
       for(int i=0;i< NUM_ITERATION;i++)
         execute(member, testKeysSet, functionNoAck, isByName, toRegister);
       t.stop();
-      getLogWriter().info("Time taken to execute setbased" + NUM_ITERATION + "NoAck functions :" + t.getTimeInMs());     
+      LogWriterUtils.getLogWriter().info("Time taken to execute setbased" + NUM_ITERATION + "NoAck functions :" + t.getTimeInMs());     
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
       fail("Test failed after the execute operationssssss");
     }
     if(toRegister.booleanValue()){
@@ -158,11 +159,11 @@ public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServe
         timeinms += t.getTimeInMs();
         assertEquals(Boolean.TRUE, ((List)rc.getResult()).get(0));
       }
-      getLogWriter().info("Time taken to execute boolean based" + NUM_ITERATION + "haveResults functions :" + timeinms);      
+      LogWriterUtils.getLogWriter().info("Time taken to execute boolean based" + NUM_ITERATION + "haveResults functions :" + timeinms);      
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
       fail("Test failed after the execute operation");
     }
         
@@ -184,11 +185,11 @@ public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServe
         }
         
       }
-      getLogWriter().info("Time taken to execute setbased" + NUM_ITERATION + "haveResults functions :" + timeinms);     
+      LogWriterUtils.getLogWriter().info("Time taken to execute setbased" + NUM_ITERATION + "haveResults functions :" + timeinms);     
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
       fail("Test failed after the execute operationssssss");
     }    
   }
@@ -211,7 +212,7 @@ public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServe
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
       fail("Test failed after the execute operation allserver   ");
     }
 
@@ -224,7 +225,7 @@ public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServe
     }
     catch (Exception ex) {
       ex.printStackTrace();
-      getLogWriter().info("Exception : ", ex);
+      LogWriterUtils.getLogWriter().info("Exception : ", ex);
       fail("Test failed after the execute operation");
     }
   }
@@ -234,15 +235,15 @@ public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServe
       throws Exception {
     if (isByName.booleanValue()) {// by name
       if(toRegister.booleanValue()){
-        getLogWriter().info("The function name to execute : " + function.getId());
+        LogWriterUtils.getLogWriter().info("The function name to execute : " + function.getId());
         Execution me = member.withArgs(args);
-        getLogWriter().info("The args passed  : " + args);
+        LogWriterUtils.getLogWriter().info("The args passed  : " + args);
         return me.execute(function.getId());
       }
       else {
-        getLogWriter().info("The function name to execute : (without Register) " + function.getId());
+        LogWriterUtils.getLogWriter().info("The function name to execute : (without Register) " + function.getId());
         Execution me = member.withArgs(args);
-        getLogWriter().info("The args passed  : " + args);
+        LogWriterUtils.getLogWriter().info("The args passed  : " + args);
         return me.execute(function.getId(), function.hasResult(),function.isHA(),function.optimizeForWrite());
       }
     }
@@ -250,9 +251,4 @@ public class PRClientServerFunctionExecutionNoAckDUnitTest extends PRClientServe
       return member.withArgs(args).execute(function);
     }
   }
-
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
 }


[44/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
index 04dd49b..1860038 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitHelper.java
@@ -83,6 +83,7 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserverAdapter;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
 import com.gemstone.gemfire.util.test.TestUtil;
@@ -136,7 +137,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
           localRegion = cache.createRegion(regionName, attr.create());
         }
         catch (IllegalStateException ex) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .warning(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreate: Creation caught IllegalStateException",
                   ex);
@@ -173,7 +174,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
           localRegion = cache.createRegion(regionName, attr.create());
         }
         catch (IllegalStateException ex) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .warning(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreate: Creation caught IllegalStateException",
                   ex);
@@ -217,7 +218,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
           localRegion = cache.createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
         }
         catch (IllegalStateException ex) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .warning(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreate: Creation caught IllegalStateException",
                   ex);
@@ -310,7 +311,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             partitionedregion = cache.createRegion(regionName, attr.create());
           }
           catch (IllegalStateException ex) {
-            getLogWriter()
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
                 .warning(
                     "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Creation caught IllegalStateException",
                     ex);
@@ -359,7 +360,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             partitionedregion = cache.createRegion(regionName, attr.create());
           }
           catch (IllegalStateException ex) {
-            getLogWriter()
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
                 .warning(
                     "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Creation caught IllegalStateException",
                     ex);
@@ -414,7 +415,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             partitionedregion = cache.createRegion(regionName, attr.create());
           }
           catch (IllegalStateException ex) {
-            getLogWriter()
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
                 .warning(
                     "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Creation caught IllegalStateException",
                     ex);
@@ -532,10 +533,10 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               }
             } catch (EntryExistsException e) {
               // Do nothing let it go
-              getLogWriter().info("EntryExistsException was thrown for key "+ j);
+              com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("EntryExistsException was thrown for key "+ j);
             } catch (EntryNotFoundException e) {
               // Do nothing let it go
-              getLogWriter().info("EntryNotFoundException was thrown for key "+ j);
+              com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("EntryNotFoundException was thrown for key "+ j);
             }
           }
         }
@@ -741,7 +742,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         }
 
         catch (QueryException e) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .error(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                       + e, e);
@@ -750,14 +751,14 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         }
 
         catch (RegionDestroyedException rde) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
                   rde);
 
         }
         catch (CancelException cce) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
                   cce);
@@ -868,7 +869,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             }
           }
 
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
@@ -884,7 +885,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         }
 
         catch (QueryException e) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
               + e, e);
@@ -893,14 +894,14 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         }
 
         catch (RegionDestroyedException rde) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         }
         catch (CancelException cce) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
@@ -1001,7 +1002,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             }
           }
 
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
         }
@@ -1012,7 +1013,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         }
 
         catch (QueryException e) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
               + e, e);
@@ -1021,14 +1022,14 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         }
 
         catch (RegionDestroyedException rde) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         }
         catch (CancelException cce) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
@@ -1141,7 +1142,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             ssORrs.CompareQueryResultsWithoutAndWithIndexes(r, queries.length,true,rq);
 
           }
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
@@ -1155,7 +1156,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         }
 
         catch (QueryException e) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .error(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
               + e, e);
@@ -1164,14 +1165,14 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         }
 
         catch (RegionDestroyedException rde) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
               rde);
 
         }
         catch (CancelException cce) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
           .info(
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
               cce);
@@ -1267,7 +1268,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 //                  "Finished executing PR query: " + qStr);
             }
           }
-         getLogWriter()
+         com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
          .info(
              "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
@@ -1283,7 +1284,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
        }
 
        catch (QueryException e) {
-         getLogWriter()
+         com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
          .error(
              "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
              + e, e);
@@ -1292,14 +1293,14 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
        }
 
        catch (RegionDestroyedException rde) {
-         getLogWriter()
+         com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
          .info(
              "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
              rde);
 
        }
        catch (CancelException cce) {
-         getLogWriter()
+         com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
          .info(
              "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
              cce);
@@ -1413,7 +1414,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             r[j][1] = region.query(query[j]);
           }
 
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults: Queries Executed successfully on Local region & PR Region");
 
@@ -1423,7 +1424,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         catch (QueryException e) {
           // assertTrue("caught Exception"+ e.getMessage(),false);
 
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .error(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults: Caught an Exception while querying Constants"
                       + e, e);
@@ -1543,7 +1544,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             "PRQueryDUnitHelper#compareTwoQueryResults: Type 2 is NULL "
                 + type2, type2);
         if ((type1.getClass().getName()).equals(type2.getClass().getName())) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#compareTwoQueryResults: Both Search Results are of the same Type i.e.--> "
                       + ((SelectResults)r[j][0]).getCollectionType()
@@ -1551,7 +1552,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
         }
         else {
-          getLogWriter().error(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().error(
               "PRQueryDUnitHelper#compareTwoQueryResults: Classes are : "
                   + type1.getClass().getName() + " "
                   + type2.getClass().getName());
@@ -1561,14 +1562,14 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         int size0 = ((SelectResults)r[j][0]).size();
         int size1 = ((SelectResults)r[j][1]).size();
         if (size0 == size1) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#compareTwoQueryResults: Both Search Results are non-zero and are of Same Size i.e.  Size= "
                       + size1 + ";j=" + j);
 
         }
         else {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .error(
                   "PRQueryDUnitHelper#compareTwoQueryResults: FAILED:Search resultSet size are different in both cases; size0=" +
                      size0 + ";size1=" + size1 + ";j=" + j);
@@ -1681,7 +1682,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         }
         catch (QueryException qe) {
 
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .error(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRInvalidQuery: Caught another Exception while querying , Exception is "
                       + qe, qe);
@@ -1722,11 +1723,11 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                 + "</ExpectedException>");
 
         Region region = cache.getRegion(regionName);
-        getLogWriter()
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .info(
                 "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForRegionClose: Closing region");
         region.close();
-        getLogWriter()
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .info(
                 "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForRegionClose: Region Closed on VM ");
 //        Region partitionedregion = null;
@@ -1740,7 +1741,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             .create();
         attr.setPartitionAttributes(prAttr);
         cache.createRegion(regionName, attr.create());
-        getLogWriter()
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .info(
                 "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForRegionClose: Region Recreated on VM ");
         getCache().getLogger().info(
@@ -1781,17 +1782,17 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             "<ExpectedException action=add>" + expectedReplyException
                 + "</ExpectedException>");
 
-        getLogWriter()
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .info(
                 "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: Closing cache");
         closeCache();
 
-        getLogWriter()
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .info(
                 "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: cache Closed on VM ");
         cache = getCache();
 
-        getLogWriter()
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .info(
                 "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: Recreating the cache ");
 //        Region partitionedregion = null;
@@ -1817,11 +1818,11 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
           //Wait for recovery to finish
           cdl.await();
         } catch (InterruptedException e) {
-          fail("interupted", e);
+          Assert.fail("interupted", e);
         } finally {
           InternalResourceManager.setResourceObserver(null);
         }
-        getLogWriter()
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .info(
                 "PROperationWithQueryDUnitTest#getCacheSerializableRunnableForCacheClose: cache Recreated on VM ");
         getCache().getLogger().info(
@@ -1890,7 +1891,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
 
         Region region = cache.getRegion(regionName);
 
-        getLogWriter()
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
             .info(
                 "PRQueryRegionDestroyedDUnitTest#getCacheSerializableRunnableForRegionClose: Destroying region "
                     + region);
@@ -1945,7 +1946,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
           }
           long endTimeLocal=System.currentTimeMillis();
           long queryTimeLocal = endTimeLocal-startTimeLocal;
-          getLogWriter().info("PRQueryDUnitHelper#PRQueryingVsLocalQuerying: Time to Query Local cache "+queryTimeLocal + " ms");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#PRQueryingVsLocalQuerying: Time to Query Local cache "+queryTimeLocal + " ms");
           
           long startTimePR = System.currentTimeMillis();
           for (int k = 0; k < query.length; k++) {
@@ -1955,8 +1956,8 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
           long endTimePR = System.currentTimeMillis();
           long queryTimePR = endTimePR-startTimePR;
           
-          getLogWriter().info("PRQueryDUnitHelper#PRQueryingVsLocalQuerying: Time to Query PR "+queryTimePR+" ms");
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper#PRQueryingVsLocalQuerying: Time to Query PR "+queryTimePR+" ms");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#PRQueryingVsLocalQuerying: Queries Executed successfully on Local region & PR Region");
 
@@ -1968,7 +1969,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
           
         }
         catch (QueryException e) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .error(
                   "PRQueryDUnitHelper#PRQueryingVsLocalQuerying: Caught QueryException while querying"
                       + e, e);
@@ -2003,34 +2004,34 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
     public void displayResults(){
       
       try {
-        getLogWriter().info("PRQueryDUnitHelper:PerfResultsObject#displayResults");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("PRQueryDUnitHelper:PerfResultsObject#displayResults");
         BufferedWriter out = new BufferedWriter(new FileWriter("PRQueryPerfDUnitTest.txt", true));
         
-        getLogWriter().info("~~~~~~~~~~~~~~~~~~~~~~~PR Querying Performance Results~~~~~~~~~~~~~~~~~~~~~~~");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("~~~~~~~~~~~~~~~~~~~~~~~PR Querying Performance Results~~~~~~~~~~~~~~~~~~~~~~~");
         out.write("~~~~~~~~~~~~~~~~~~~~~~~PR Querying Performance Results~~~~~~~~~~~~~~~~~~~~~~~\n\n");
         
-        getLogWriter().info(this.OperationDescription);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(this.OperationDescription);
         out.write("\t"+this.OperationDescription+"\n\n");
         
-        getLogWriter().info("Scope                    : "+this.Scope);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Scope                    : "+this.Scope);
         out.write("Scope                    : "+this.Scope+"\n\n");
         
-        getLogWriter().info("Redundancy Level         : "+this.redundancy);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Redundancy Level         : "+this.redundancy);
         out.write("Redundancy Level         : "+this.redundancy+"\n\n");
         
-        getLogWriter().info("Number of Accessor       : "+this.NumberOfAccessors);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Number of Accessor       : "+this.NumberOfAccessors);
         out.write("Number of Accessor       : "+this.NumberOfAccessors+"\n\n");
         
-        getLogWriter().info("Number of Datastore/s    : "+this.NumberOfDataStores);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Number of Datastore/s    : "+this.NumberOfDataStores);
         out.write("Number of Datastore/s    : "+this.NumberOfDataStores+"\n\n");
         
-        getLogWriter().info("QueryingTime Local       : "+this.QueryingTimeLocal+" ms");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("QueryingTime Local       : "+this.QueryingTimeLocal+" ms");
         out.write("QueryingTime Local       : "+this.QueryingTimeLocal+" ms\n\n");
         
-        getLogWriter().info("QueryingTime PR          : "+this.QueryingTimePR+" ms");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("QueryingTime PR          : "+this.QueryingTimePR+" ms");
         out.write("QueryingTime PR          : "+this.QueryingTimePR+" ms\n");
         
-        getLogWriter().info("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
         out.write("\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n");
         out.close();
     } catch (IOException e) {
@@ -2095,7 +2096,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
            */
         }
         catch (Exception ex) {
-          fail("Creating Index in this vm failed : ", ex);
+          Assert.fail("Creating Index in this vm failed : ", ex);
         }
       }
     };
@@ -2137,7 +2138,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
             }
             fail("Multi index creation failed, " + sb);
           } else {
-           fail("Creating Index in this vm failed : ", ex);
+           Assert.fail("Creating Index in this vm failed : ", ex);
           }
         }
         assertNotNull("Indexes should have been created.", indexes);
@@ -2181,7 +2182,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
           
         }
         catch (Exception ex) {
-          fail("Creating Index in this vm failed : ", ex);
+          Assert.fail("Creating Index in this vm failed : ", ex);
         }
       }
     };
@@ -2256,7 +2257,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
       pw.close();
     }
     catch (IOException ex) {
-      fail("IOException during cache.xml generation to " + file, ex);
+      Assert.fail("IOException during cache.xml generation to " + file, ex);
     }
 
   }
@@ -2397,7 +2398,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
         }
         catch (RegionNotFoundException exx) {
           // TODO Auto-generated catch block
-          fail("Region Not found in this vm ", exx);
+          Assert.fail("Region Not found in this vm ", exx);
         }
 
       }
@@ -2545,7 +2546,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                     + " r2 where " + queries[j]).execute();
             r[j][1] = r2.asList();
           }
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
@@ -2561,7 +2562,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (QueryException e) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .error(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                       + e, e);
@@ -2569,13 +2570,13 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (RegionDestroyedException rde) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
                   rde);
 
         } catch (CancelException cce) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
                   cce);
@@ -2674,7 +2675,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                     + " r2, r2.positions.values pos2 where " + queries[j]).execute();
             r[j][1] = r2.asList();
           }
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
@@ -2690,7 +2691,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (QueryException e) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .error(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                       + e, e);
@@ -2698,13 +2699,13 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (RegionDestroyedException rde) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
                   rde);
 
         } catch (CancelException cce) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
                   cce);
@@ -2804,7 +2805,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                     + " r2, r2.positions.values pos2 where " + queries[j]).execute();
             r[j][1] = r2.asList();
           }
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
@@ -2820,7 +2821,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (QueryException e) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .error(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                       + e, e);
@@ -2828,13 +2829,13 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (RegionDestroyedException rde) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
                   rde);
 
         } catch (CancelException cce) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
                   cce);
@@ -2934,7 +2935,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
                     + " r2 where " + queries[j]).execute();
             r[j][1] = r2.asList();
           }
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Queries Executed successfully on Local region & PR Region");
 
@@ -2950,7 +2951,7 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (QueryException e) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .error(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                       + e, e);
@@ -2958,13 +2959,13 @@ public class PRQueryDUnitHelper extends PartitionedRegionDUnitTestCase
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (RegionDestroyedException rde) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a RegionDestroyedException while querying as expected ",
                   rde);
 
         } catch (CancelException cce) {
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught a CancelException while querying as expected ",
                   cce);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitTest.java
index 9912f08..098a881 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryDUnitTest.java
@@ -46,6 +46,8 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionQueryEvaluator;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -96,7 +98,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
 
   public void testPRDAckCreationAndQuerying() throws Exception
   {
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Querying PR Test with DACK Started*****");
 
@@ -107,7 +109,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm3 = host.getVM(3);
 
     // Creating PR's on the participating VM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -118,17 +120,17 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         redundancy));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
     // creating a local region on one of the JVM's
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -146,7 +148,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         (2 * stepSize), (3 * stepSize)));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         (3 * (stepSize)), totalDataSize));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -154,14 +156,14 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, i, totalDataSize));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data over Local Region on VM0");
 
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
         name, localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
   }
@@ -180,7 +182,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
 
   public void testPRDAckCreationAndQueryingFull() throws Exception
   {
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
     .info(
           "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Querying PR Test with DACK Started*****");
 
@@ -192,7 +194,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm3 = host.getVM(3);
 
     // Creating PR's on the participating VM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -203,17 +205,17 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
                                                                redundancy, valueConstraint));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
                                                                redundancy, valueConstraint));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
     // creating a local region on one of the JVM's
     vm0.invoke(PRQHelp
                .getCacheSerializableRunnableForLocalRegionCreation(localName, valueConstraint));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -231,7 +233,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
                                                              (2 * stepSize), (3 * stepSize)));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfoliosAndPositions,
                                                              (3 * (stepSize)), totalDataSize));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -239,14 +241,14 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
                                                              portfoliosAndPositions, i, totalDataSize));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data over Local Region on VM0");
 
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
                                                                                name, localName, true));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
   }
@@ -266,7 +268,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
 
   public void testPRDAckCreationAndQueryingWithConstants() throws Exception
   {
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : Querying PR Test with DACK Started*****");
 
@@ -281,7 +283,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm3 = host.getVM(3);
 
     // Creating PR's on the participating VM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : Creating PR's on VM0, VM1 , VM2 , VM3");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -292,17 +294,17 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         redundancy));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : Creating Local region on VM0 to compare result Sets");
     // creating a local region on one of the JVM's
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : Successfully Created Local Region on VM0");
 
@@ -320,7 +322,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         (2 * stepSize), (3 * stepSize)));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         (3 * (stepSize)), totalDataSize));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : Inserted Portfolio data across PR's");
 
@@ -328,7 +330,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, i, totalDataSize));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : Inserted Portfolio data over Local Region on VM0");
 
@@ -337,7 +339,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         .invoke(PRQHelp
             .getCacheSerializableRunnableForPRQueryWithConstantsAndComparingResults(
                 name, localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQueryingWithConstants : *Querying PR's with DACK Test ENDED*****");
   }
@@ -399,7 +401,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     final MyTestHook th = new MyTestHook();
 
     // add expected exception strings
-    final ExpectedException ex = addExpectedException("Data loss detected");
+    final IgnoredException ex = IgnoredException.addIgnoredException("Data loss detected");
     try {
       Object[] params = new Object[0];
       final DefaultQuery query = (DefaultQuery)getCache().getQueryService()
@@ -522,7 +524,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         for (Object r: th.resultsPerMember.entrySet()){
           Map.Entry e = (Map.Entry)r;
           Integer res = (Integer)e.getValue();
-          getLogWriter().info("PRQueryDUnitTest#testQueryResultsFromMembers : \n" +
+          LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testQueryResultsFromMembers : \n" +
               "Query [" + queries[q] + "] Member : " + e.getKey() + " results size :" + res.intValue());
           assertEquals("Query [" + queries[q] + "]: The results returned by the member does not match the query limit size : Member : " + e.getKey(), limit[q], res.intValue());
         }
@@ -629,7 +631,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         for (Object r: th.resultsPerMember.entrySet()){
           Map.Entry e = (Map.Entry)r;
           Integer res = (Integer)e.getValue();
-          getLogWriter().info("PRQueryDUnitTest#testQueryResultsFromMembers : \n" +
+          LogWriterUtils.getLogWriter().info("PRQueryDUnitTest#testQueryResultsFromMembers : \n" +
               "Query [" + queries[q] + "] Member : " + e.getKey() + " results size :" + res.intValue());
           if (res.intValue() != 0 /* accessor member */ || res.intValue() != limit[q]) {
             assertEquals("Query [" + queries[q] + "]: The results returned by the member does not match the query limit size : Member : " + e.getKey(), limit[q], res.intValue());
@@ -682,7 +684,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     });
 
     // add expected exception strings
-    final ExpectedException ex = addExpectedException("Data loss detected",
+    final IgnoredException ex = IgnoredException.addIgnoredException("Data loss detected",
         accessor);
     accessor.invoke(new SerializableCallable(
         "Create bucket and test dataloss query") {
@@ -731,7 +733,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
 
   public void testPRAccessorCreationAndQuerying() throws Exception
   {
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Querying PR Test with DACK Started*****");
     Host host = Host.getHost(0);
@@ -747,17 +749,17 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // Creting PR's on the participating VM's
 
     // Creating Accessor node on the VM
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Creating the Accessor node in the PR");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
         0));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully created the Accessor node in the PR");
 
     // Creating the Datastores Nodes in the VM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Creating the Datastore node in the PR");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -766,17 +768,17 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         redundancy));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created the Datastore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created PR's across all VM's");
     // creating a local region on one of the JVM's
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -793,7 +795,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         (2 * stepSize), (3 * stepSize)));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         (3 * (stepSize)), totalDataSize));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -804,7 +806,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
         name, localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Querying PR's Test ENDED*****");
   }
@@ -825,7 +827,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
    int dataSize = 10;
    int step = 2;
    
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
          "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Querying PR Test with DACK Started*****");
 
@@ -837,7 +839,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
    VM vm3 = host.getVM(3);
 
    // Creating PR's on the participating VM's
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -848,17 +850,17 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
                                                               redundancy, valueConstraint));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
                                                               redundancy, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
    // creating a local region on one of the JVM's
    vm0.invoke(PRQHelp
               .getCacheSerializableRunnableForLocalRegionCreation(localName, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -876,7 +878,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
                                                             (2 * step), (3 * step)));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
                                                             (3 * (step)), dataSize));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -884,14 +886,14 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(localName,
                                                             portfoliosAndPositions, i, dataSize));
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data over Local Region on VM0");
 
    // querying the VM for data
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPROrderByQueryAndCompareResults(
                                                                               name, localName));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
      .info(
            "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
  }
@@ -912,7 +914,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
    int dataSize = 10;
    int step = 2;
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
    "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Querying PR Test with DACK Started*****");
 
@@ -924,7 +926,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
    VM vm3 = host.getVM(3);
 
    // Creating PR's on the participating VM's
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
    "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -935,17 +937,17 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
        redundancy, valueConstraint));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
        redundancy, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
    // creating a local region on one of the JVM's
    vm0.invoke(PRQHelp
        .getCacheSerializableRunnableForLocalRegionCreation(localName, valueConstraint));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -963,7 +965,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
        (2 * step), (3 * step)));
    vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
        (3 * (step)), dataSize));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -971,14 +973,14 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(localName,
        portfoliosAndPositions, i, dataSize));
 
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data over Local Region on VM0");
 
    // querying the VM for data
    vm0.invoke(PRQHelp.getCacheSerializableRunnableForPROrderByQueryAndVerifyOrder(
        name, localName));
-   getLogWriter()
+   LogWriterUtils.getLogWriter()
    .info(
        "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
  }
@@ -999,7 +1001,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     int step = 2;
     
     Class valueConstraint = Portfolio.class;
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Querying PR Test with DACK Started*****");
     Host host = Host.getHost(0);
@@ -1015,17 +1017,17 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // Creting PR's on the participating VM's
 
     // Creating Accessor node on the VM
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Creating the Accessor node in the PR");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
         0, valueConstraint));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully created the Accessor node in the PR");
 
     // Creating the Datastores Nodes in the VM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Creating the Datastore node in the PR");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -1034,17 +1036,17 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         redundancy, valueConstraint));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy, valueConstraint));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created the Datastore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created PR's across all VM's");
     // creating a local region on one of the JVM's
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName, valueConstraint));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -1061,7 +1063,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         (2 * step), (3 * step)));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfolio,
         (3 * (step)), dataSize));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -1072,7 +1074,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPROrderByQueryAndCompareResults(
         name, localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQuerying : Querying PR's Test ENDED*****");
   }
@@ -1082,7 +1084,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     int dataSize = 10;
     int step = 2;
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
     .info(
           "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Querying PR Test with DACK Started*****");
 
@@ -1094,7 +1096,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm3 = host.getVM(3);
 
     // Creating PR's on the participating VM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating PR's on VM0, VM1 , VM2 , VM3");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -1105,17 +1107,17 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
                                                                redundancy, valueConstraint));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
                                                                redundancy, valueConstraint));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created PR's on VM0, VM1 , VM2 , VM3");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Creating Local region on VM0 to compare result Sets");
     // creating a local region on one of the JVM's
     vm0.invoke(PRQHelp
                .getCacheSerializableRunnableForLocalRegionCreation(localName, valueConstraint));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Successfully Created Local Region on VM0");
 
@@ -1133,7 +1135,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
                                                              (2 * step), (3 * step)));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(name, portfoliosAndPositions,
                                                              (3 * (step)), dataSize));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data across PR's");
 
@@ -1141,14 +1143,14 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPutsKeyValue(localName,
                                                              portfoliosAndPositions, i, dataSize));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : Inserted Portfolio data over Local Region on VM0");
 
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPROrderByQueryWithLimit(
                                                                                name, localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
       .info(
             "PRQueryDUnitTest#testPRDAckCreationAndQuerying : *Querying PR's with DACK Test ENDED*****");
   }
@@ -1167,7 +1169,7 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
 
   public void testPRAccessorCreationAndQueryingWithNoData() throws Exception
   {
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Querying PR Test with No Data  Started*****");
     Host host = Host.getHost(0);
@@ -1183,17 +1185,17 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // Creting PR's on the participating VM's
 
     // Creating Accessor node on the VM
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Creating the Accessor node in the PR");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
         0));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Successfully created the Accessor node in the PR");
 
     // Creating the Datastores Nodes in the VM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Creating the Datastore node in the PR");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -1202,24 +1204,24 @@ public class PRQueryDUnitTest extends PartitionedRegionDUnitTestCase
         redundancy));
     vm3.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Successfully Created the Datastore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Successfully Created PR's across all VM's");
     // creating a local region on one of the JVM's
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Successfully Created Local Region on VM0");
 
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
         name, localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryDUnitTest#testPRAccessorCreationAndQueryingWithNoData : Querying PR's Test No Data ENDED*****");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryPerfDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryPerfDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryPerfDUnitTest.java
index 069ea52..609c1a3 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryPerfDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryPerfDUnitTest.java
@@ -79,7 +79,7 @@ public class PRQueryPerfDUnitTest extends PartitionedRegionDUnitTestCase {
   throws Exception
  {
 
-    LogWriter log = getLogWriter();
+    LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
     log.info("BenchMarking PR Querying Test Started*****");
     Host host = Host.getHost(0);
 
@@ -165,7 +165,7 @@ public class PRQueryPerfDUnitTest extends PartitionedRegionDUnitTestCase {
   public void norun_testBenchmarkingQueryingOneAccessorTwoDS_Redundancy0()
       throws Exception
   {
-    LogWriter log = getLogWriter();
+    LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
 
     log.info("BenchMarking PR Querying Test Started*****");
     Host host = Host.getHost(0);
@@ -249,7 +249,7 @@ public class PRQueryPerfDUnitTest extends PartitionedRegionDUnitTestCase {
   public void norun_testBenchmarkingQueryingOneAccessorTwoDS_D_ACK_Redundancy1()
       throws Exception
   {
-    LogWriter log = getLogWriter();
+    LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
     log.info("BenchMarking PR Querying Test Started*****");
     Host host = Host.getHost(0);
 
@@ -330,7 +330,7 @@ public class PRQueryPerfDUnitTest extends PartitionedRegionDUnitTestCase {
   public void norun_testBenchmarkingQueryingOneAccessorThreeDS_Redundancy1()
       throws Exception
   {
-    LogWriter log = getLogWriter();
+    LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
     log.info("BenchMarking PR Querying Test Started*****");
     Host host = Host.getHost(0);
 
@@ -415,7 +415,7 @@ public class PRQueryPerfDUnitTest extends PartitionedRegionDUnitTestCase {
   public void norun_testBenchmarkingQueryingOneAccessorThreeDS_Redundancy2()
       throws Exception
   {
-    LogWriter log = getLogWriter();
+    LogWriter log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
     log.info("BenchMarking PR Querying Test Started*****");
     Host host = Host.getHost(0);
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionCloseDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionCloseDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionCloseDUnitTest.java
index 56a643c..8641f48 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionCloseDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionCloseDUnitTest.java
@@ -32,10 +32,13 @@ import java.util.Random;
 
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.internal.cache.ForceReattemptException;
 
 public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
@@ -89,7 +92,7 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
 
   {
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Querying PR Test with region Close PR operation*****");
     Host host = Host.getHost(0);
@@ -101,33 +104,33 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
     vmList.add(vm1);
     vmList.add(vm2);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Creating Accessor node on VM0");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Created Accessor node on VM0");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Creating PR's across all VM1 , VM2");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Created PR on VM1 , VM2");
 
     // creating a local region on one of the JVM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Created Local Region on VM0");
 
@@ -137,36 +140,36 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
     final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Inserting Portfolio data through the accessor node");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Inserted Portfolio data through the accessor node");
 
     // Putting the same data in the local region created
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Inserting Portfolio data on local node  VM0 for result Set Comparison");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Successfully Inserted Portfolio data on local node  VM0 for result Set Comparison");
 
     Random random = new Random();
     AsyncInvocation async0;
     // querying the VM for data
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Querying on VM0 both on PR Region & local ,also  Comparing the Results sets from both");
     async0 = vm0
         .invokeAsync(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
             name, localName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Calling for Region.close() on either of the Datastores VM1 , VM2 at random and then recreating the cache, with a predefined Delay ");
     for (int j = 0; j < queryTestCycle; j++) {
@@ -174,10 +177,10 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
       if( 0 != k ) {
       ((VM)(vmList.get(k))).invoke(PRQHelp.getCacheSerializableRunnableForRegionClose(
           name, redundancy));
-      pause(threadSleepTime);
+      Wait.pause(threadSleepTime);
       }
     }
-    DistributedTestCase.join(async0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async0, 30 * 1000);
 
     if (async0.exceptionOccurred()) {
       // for now, certain exceptions when a region is closed are acceptable
@@ -193,11 +196,11 @@ public class PRQueryRegionCloseDUnitTest extends PartitionedRegionDUnitTestCase
       } while (t != null);
       
       if (!isForceReattempt) {
-        fail("Unexpected exception during query", async0.getException());
+        Assert.fail("Unexpected exception during query", async0.getException());
       }
     }
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionCloseDUnitTest#testPRWithRegionCloseInOneDatastoreWithoutDelay: Querying with PR Operations ENDED*****");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedDUnitTest.java
index 3401d27..c3a37dd 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedDUnitTest.java
@@ -32,10 +32,13 @@ import java.util.Random;
 
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.internal.cache.ForceReattemptException;
 
 public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestCase
@@ -88,7 +91,7 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
       throws Exception
 
   {
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Querying with PR Destroy Region Operation Test Started");
     Host host = Host.getHost(0);
@@ -102,16 +105,16 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
     vmList.add(vm2);
     vmList.add(vm3);
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Creating Accessor node on VM0");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Successfully Created Accessor node on VM0");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Creating PR's across all VM1 , VM2, VM3");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
@@ -122,17 +125,17 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
         redundancy));
     
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Successfully Created PR on VM1 , VM2, VM3");
 
     // creating a local region on one of the JVM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Successfully Created Local Region on VM0");
 
@@ -143,22 +146,22 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
  
 
     // Putting the data into the accessor node
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Inserting Portfolio data through the accessor node");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Successfully Inserted Portfolio data through the accessor node");
 
     // Putting the same data in the local region created
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Inserting Portfolio data on local node  VM0 for result Set Comparison");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Successfully Inserted Portfolio data on local node  VM0 for result Set Comparison");
 
@@ -167,7 +170,7 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
 
     // Execute query first time. This is to make sure all the buckets are created 
     // (lazy bucket creation).
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Querying on VM0 First time");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
@@ -175,15 +178,15 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
 
     // Now execute the query. And while query execution in process destroy the region 
     // on one of the node.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Querying on VM0 both on PR Region & local ,also  Comparing the Results sets from both");
     async0 = vm0
         .invokeAsync(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
             name, localName));
     
-    pause(5);
-    getLogWriter()
+    Wait.pause(5);
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Calling for Region.destroyRegion() on either of the Datastores VM1 , VM2 at random and then recreating the cache, with a predefined Delay ");
     
@@ -193,7 +196,7 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
           name, redundancy));
     
     
-      DistributedTestCase.join(async0, 30 * 1000, getLogWriter());
+      ThreadUtils.join(async0, 30 * 1000);
 
     if (async0.exceptionOccurred()) {
       // for Elbe, certain exceptions when a region is destroyed are acceptable
@@ -209,10 +212,10 @@ public class PRQueryRegionDestroyedDUnitTest extends PartitionedRegionDUnitTestC
       } while (t != null);
       
       if (!isForceReattempt) {
-        fail("Unexpected exception during query", async0.getException());
+        Assert.fail("Unexpected exception during query", async0.getException());
       }
     }
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryRegionDestroyedDUnitTest#testPRWithRegionDestroyInOneDatastoreWithDelay: Querying with PR Destroy Region Operation Test ENDED");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedJUnitTest.java
index 4b9ae3d..314d4a4 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryRegionDestroyedJUnitTest.java
@@ -34,7 +34,7 @@ import com.gemstone.gemfire.cache.query.RegionNotFoundException;
 import com.gemstone.gemfire.cache.query.SelectResults;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionTestHelper;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -216,8 +216,8 @@ public class PRQueryRegionDestroyedJUnitTest
       logger
           .info("PRQueryRegionDestroyedJUnitTest#testQueryOnSingleDataStore: Waiting for the Threads to join ");
 
-      DistributedTestCase.join(t1, 30 * 1000, null);
-      DistributedTestCase.join(t2, 30 * 1000, null);
+      ThreadUtils.join(t1, 30 * 1000);
+      ThreadUtils.join(t2, 30 * 1000);
       logger
           .info("PRQueryRegionDestroyedJUnitTest#testQueryOnSingleDataStore: checking for any Unexpected Exception's occured");
 


[51/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
GEODE-773: Extract static methods from DistributedTestCase

Refactor DistributedTestCase by extracting inner classes & interfaces into:
* IgnoredException
* StoppableWaitCriterion
* WaitCriterion

Refactor DistributedTestCase by extracting static methods into:
* Assert
* DebuggerUtils
* DistributedTestUtils
* Invoke
* Jitter
* LogWriterUtils
* NetworkUtils
* ThreadUtils
* Wait

Also:
* Rename variables for clarity
* Replace several uses of LogWriter with Logger
* Write and polish lots of javadocs
* Overhaul entire hierarchy of tearDown() methods across dunit tests


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/820cfd63
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/820cfd63
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/820cfd63

Branch: refs/heads/feature/GEODE-17
Commit: 820cfd632abbb18f9a2265048c8e2ad3d56c2b9d
Parents: 21d9229
Author: Kirk Lund <kl...@pivotal.io>
Authored: Tue Feb 9 10:40:21 2016 -0800
Committer: Kirk Lund <kl...@pivotal.io>
Committed: Tue Feb 9 10:43:30 2016 -0800

----------------------------------------------------------------------
 .../LauncherLifecycleCommandsDUnitTest.java     |   40 +-
 .../SharedConfigurationEndToEndDUnitTest.java   |   16 +-
 .../gemfire/internal/util/DebuggerSupport.java  |   17 +-
 .../com/gemstone/gemfire/TXExpiryJUnitTest.java |   10 +-
 .../cache/CacheRegionClearStatsDUnitTest.java   |   12 +-
 .../cache/ClientServerTimeSyncDUnitTest.java    |   31 +-
 .../cache/ConnectionPoolAndLoaderDUnitTest.java |   12 +-
 .../ClientServerRegisterInterestsDUnitTest.java |    6 +-
 .../internal/AutoConnectionSourceDUnitTest.java |   47 +-
 .../CacheServerSSLConnectionDUnitTest.java      |   13 +-
 .../internal/LocatorLoadBalancingDUnitTest.java |   51 +-
 .../cache/client/internal/LocatorTestBase.java  |   25 +-
 .../internal/SSLNoClientAuthDUnitTest.java      |    6 +-
 .../pooling/ConnectionManagerJUnitTest.java     |   11 +-
 .../management/MemoryThresholdsDUnitTest.java   |   77 +-
 .../MemoryThresholdsOffHeapDUnitTest.java       |   69 +-
 .../management/ResourceManagerDUnitTest.java    |    5 +-
 .../mapInterface/PutAllGlobalLockJUnitTest.java |    4 +-
 .../PartitionRegionHelperDUnitTest.java         |   23 +-
 .../query/cq/dunit/CqQueryTestListener.java     |   26 +-
 .../query/dunit/CompactRangeIndexDUnitTest.java |   15 +-
 .../cache/query/dunit/CqTimeTestListener.java   |   14 +-
 .../cache/query/dunit/HashIndexDUnitTest.java   |   10 +-
 .../cache/query/dunit/HelperTestCase.java       |    3 +-
 .../query/dunit/PdxStringQueryDUnitTest.java    |  297 ++--
 .../dunit/QueryDataInconsistencyDUnitTest.java  |   57 +-
 .../dunit/QueryIndexUsingXMLDUnitTest.java      |   83 +-
 .../QueryParamsAuthorizationDUnitTest.java      |    3 +-
 .../QueryUsingFunctionContextDUnitTest.java     |   42 +-
 .../query/dunit/QueryUsingPoolDUnitTest.java    |  283 ++--
 .../cache/query/dunit/RemoteQueryDUnitTest.java |  148 +-
 ...esourceManagerWithQueryMonitorDUnitTest.java |   27 +-
 .../query/dunit/SelectStarQueryDUnitTest.java   |  121 +-
 .../IndexCreationDeadLockJUnitTest.java         |    6 +-
 .../IndexMaintenanceAsynchJUnitTest.java        |    6 +-
 .../functional/LikePredicateJUnitTest.java      |    4 +-
 .../internal/ExecutionContextJUnitTest.java     |    4 +-
 .../index/AsynchIndexMaintenanceJUnitTest.java  |   17 +-
 ...rrentIndexInitOnOverflowRegionDUnitTest.java |   45 +-
 ...ndexOperationsOnOverflowRegionDUnitTest.java |   64 +-
 ...pdateWithInplaceObjectModFalseDUnitTest.java |   67 +-
 ...ConcurrentIndexUpdateWithoutWLDUnitTest.java |   61 +-
 .../index/CopyOnReadIndexDUnitTest.java         |   60 +-
 .../index/IndexCreationInternalsJUnitTest.java  |    6 +-
 .../index/IndexMaintainceJUnitTest.java         |    4 +-
 .../IndexTrackingQueryObserverDUnitTest.java    |   16 +-
 ...itializeIndexEntryDestroyQueryDUnitTest.java |   37 +-
 .../index/MultiIndexCreationDUnitTest.java      |   30 +-
 .../index/PutAllWithIndexPerfDUnitTest.java     |   23 +-
 .../PRBasicIndexCreationDUnitTest.java          |  108 +-
 .../PRBasicIndexCreationDeadlockDUnitTest.java  |   17 +-
 .../PRBasicMultiIndexCreationDUnitTest.java     |   85 +-
 .../partitioned/PRBasicQueryDUnitTest.java      |   33 +-
 .../PRBasicRemoveIndexDUnitTest.java            |    7 +-
 .../PRColocatedEquiJoinDUnitTest.java           |  305 ++--
 .../partitioned/PRInvalidQueryDUnitTest.java    |   17 +-
 .../partitioned/PRQueryCacheCloseDUnitTest.java |   75 +-
 .../PRQueryCacheClosedJUnitTest.java            |    6 +-
 .../query/partitioned/PRQueryDUnitHelper.java   |  157 ++-
 .../query/partitioned/PRQueryDUnitTest.java     |  158 ++-
 .../query/partitioned/PRQueryPerfDUnitTest.java |   10 +-
 .../PRQueryRegionCloseDUnitTest.java            |   39 +-
 .../PRQueryRegionDestroyedDUnitTest.java        |   41 +-
 .../PRQueryRegionDestroyedJUnitTest.java        |    6 +-
 .../PRQueryRemoteNodeExceptionDUnitTest.java    |  153 +-
 .../snapshot/ParallelSnapshotDUnitTest.java     |    5 +-
 .../snapshot/SnapshotByteArrayDUnitTest.java    |   11 +-
 .../snapshot/SnapshotPerformanceDUnitTest.java  |   21 +-
 .../gemfire/cache30/Bug35214DUnitTest.java      |   14 +-
 .../gemfire/cache30/Bug38741DUnitTest.java      |   15 +-
 .../gemfire/cache30/CacheListenerTestCase.java  |   16 +-
 .../gemfire/cache30/CacheLoaderTestCase.java    |    8 +-
 .../gemfire/cache30/CacheMapTxnDUnitTest.java   |   24 +-
 ...cheRegionsReliablityStatsCheckDUnitTest.java |    3 +-
 .../cache30/CacheStatisticsDUnitTest.java       |   15 +-
 .../gemstone/gemfire/cache30/CacheTestCase.java |   49 +-
 .../gemfire/cache30/CacheXml30DUnitTest.java    |   25 +-
 .../gemfire/cache30/CacheXml41DUnitTest.java    |   13 +-
 .../gemfire/cache30/CacheXml45DUnitTest.java    |    2 +-
 .../gemfire/cache30/CacheXml57DUnitTest.java    |   18 +-
 .../gemfire/cache30/CacheXml60DUnitTest.java    |    8 +-
 .../gemfire/cache30/CacheXml65DUnitTest.java    |   24 +-
 .../gemfire/cache30/CacheXml66DUnitTest.java    |    6 +-
 .../gemfire/cache30/CacheXml80DUnitTest.java    |    3 +-
 .../gemfire/cache30/CacheXml81DUnitTest.java    |    4 +-
 .../gemfire/cache30/CacheXml90DUnitTest.java    |    7 +-
 .../gemfire/cache30/CacheXmlTestCase.java       |   13 +-
 .../cache30/CertifiableTestCacheListener.java   |   12 +-
 .../cache30/ClearMultiVmCallBkDUnitTest.java    |   27 +-
 .../gemfire/cache30/ClearMultiVmDUnitTest.java  |   33 +-
 .../cache30/ClientMembershipDUnitTest.java      |  157 ++-
 .../ClientRegisterInterestDUnitTest.java        |   57 +-
 .../cache30/ClientServerCCEDUnitTest.java       |   56 +-
 .../gemfire/cache30/ClientServerTestCase.java   |   13 +-
 .../ConcurrentLeaveDuringGIIDUnitTest.java      |   13 +-
 .../gemfire/cache30/DiskRegionDUnitTest.java    |   24 +-
 .../cache30/DistAckMapMethodsDUnitTest.java     |   23 +-
 ...tedAckOverflowRegionCCEOffHeapDUnitTest.java |    6 +-
 ...tributedAckPersistentRegionCCEDUnitTest.java |    1 -
 ...dAckPersistentRegionCCEOffHeapDUnitTest.java |   11 +-
 .../DistributedAckRegionCCEDUnitTest.java       |   20 +-
 ...DistributedAckRegionCCEOffHeapDUnitTest.java |   11 +-
 .../cache30/DistributedAckRegionDUnitTest.java  |   10 +-
 .../DistributedAckRegionOffHeapDUnitTest.java   |   11 +-
 .../DistributedMulticastRegionDUnitTest.java    |    8 +-
 .../DistributedNoAckRegionCCEDUnitTest.java     |   19 +-
 ...stributedNoAckRegionCCEOffHeapDUnitTest.java |   11 +-
 .../DistributedNoAckRegionDUnitTest.java        |   48 +-
 .../DistributedNoAckRegionOffHeapDUnitTest.java |   11 +-
 .../gemfire/cache30/DynamicRegionDUnitTest.java |   20 +-
 .../gemfire/cache30/GlobalLockingDUnitTest.java |    7 +-
 .../cache30/GlobalRegionCCEDUnitTest.java       |    5 +-
 .../GlobalRegionCCEOffHeapDUnitTest.java        |   11 +-
 .../gemfire/cache30/GlobalRegionDUnitTest.java  |   28 +-
 .../cache30/GlobalRegionOffHeapDUnitTest.java   |   13 +-
 .../cache30/LRUEvictionControllerDUnitTest.java |    3 +-
 .../gemfire/cache30/LocalRegionDUnitTest.java   |    3 +-
 .../gemfire/cache30/MultiVMRegionTestCase.java  |  372 ++---
 .../OffHeapLRUEvictionControllerDUnitTest.java  |   11 +-
 .../PRBucketSynchronizationDUnitTest.java       |   27 +-
 .../cache30/PartitionedRegionDUnitTest.java     |   15 +-
 .../PartitionedRegionOffHeapDUnitTest.java      |   11 +-
 .../cache30/PreloadedRegionTestCase.java        |    5 +-
 .../cache30/PutAllCallBkRemoteVMDUnitTest.java  |   40 +-
 .../cache30/PutAllCallBkSingleVMDUnitTest.java  |   36 +-
 .../gemfire/cache30/PutAllMultiVmDUnitTest.java |   18 +-
 .../gemfire/cache30/QueueMsgDUnitTest.java      |    5 +-
 .../cache30/RRSynchronizationDUnitTest.java     |   25 +-
 .../gemfire/cache30/ReconnectDUnitTest.java     |  159 ++-
 .../ReconnectedCacheServerDUnitTest.java        |    2 +-
 .../cache30/RegionExpirationDUnitTest.java      |   14 +-
 .../RegionMembershipListenerDUnitTest.java      |   23 +-
 .../cache30/RegionReliabilityTestCase.java      |   15 +-
 .../gemfire/cache30/RegionTestCase.java         |  114 +-
 .../cache30/RemoveAllMultiVmDUnitTest.java      |   18 +-
 .../gemfire/cache30/RequiredRolesDUnitTest.java |   19 +-
 .../cache30/RolePerformanceDUnitTest.java       |    5 +-
 .../gemfire/cache30/SearchAndLoadDUnitTest.java |   74 +-
 .../gemfire/cache30/SlowRecDUnitTest.java       |  137 +-
 .../gemfire/cache30/TXDistributedDUnitTest.java |   72 +-
 .../gemfire/cache30/TXOrderDUnitTest.java       |   11 +-
 .../cache30/TXRestrictionsDUnitTest.java        |    6 +-
 .../gemfire/cache30/TestCacheCallback.java      |    6 +-
 .../distributed/DistributedMemberDUnitTest.java |    3 +-
 .../distributed/DistributedSystemDUnitTest.java |   12 +-
 .../distributed/HostedLocatorsDUnitTest.java    |    3 +-
 .../gemfire/distributed/LocatorDUnitTest.java   |  191 +--
 .../gemfire/distributed/LocatorJUnitTest.java   |    1 -
 .../distributed/SystemAdminDUnitTest.java       |   13 +-
 .../distributed/internal/Bug40751DUnitTest.java |    7 +-
 .../ConsoleDistributionManagerDUnitTest.java    |   45 +-
 .../internal/DistributionAdvisorDUnitTest.java  |    7 +-
 .../internal/DistributionManagerDUnitTest.java  |   30 +-
 .../internal/ProductUseLogDUnitTest.java        |    6 +-
 .../GemFireDeadlockDetectorDUnitTest.java       |   15 +-
 .../internal/locks/CollaborationJUnitTest.java  |   53 +-
 .../membership/gms/MembershipManagerHelper.java |    6 +-
 .../TcpServerBackwardCompatDUnitTest.java       |   18 +-
 .../gemfire/disttx/DistTXDebugDUnitTest.java    |  115 +-
 .../disttx/DistTXPersistentDebugDUnitTest.java  |   17 +-
 .../disttx/DistributedTransactionDUnitTest.java |   30 +-
 ...wardCompatibilitySerializationDUnitTest.java |    2 +-
 .../ClassNotFoundExceptionDUnitTest.java        |    6 +-
 .../gemfire/internal/JSSESocketJUnitTest.java   |    4 +-
 .../gemfire/internal/JarDeployerDUnitTest.java  |   10 +-
 .../internal/PdxDeleteFieldDUnitTest.java       |    6 +-
 .../gemfire/internal/PdxRenameDUnitTest.java    |    6 +-
 .../gemfire/internal/SocketCloserJUnitTest.java |   12 +-
 .../gemfire/internal/cache/BackupDUnitTest.java |   60 +-
 .../internal/cache/Bug33359DUnitTest.java       |   21 +-
 .../internal/cache/Bug33726DUnitTest.java       |    3 +-
 .../internal/cache/Bug37241DUnitTest.java       |   14 +-
 .../internal/cache/Bug37377DUnitTest.java       |   12 +-
 .../internal/cache/Bug39079DUnitTest.java       |   13 +-
 .../internal/cache/Bug40299DUnitTest.java       |    7 +-
 .../internal/cache/Bug41091DUnitTest.java       |   18 +-
 .../internal/cache/Bug41733DUnitTest.java       |    3 +-
 .../internal/cache/Bug41957DUnitTest.java       |   13 +-
 .../internal/cache/Bug45164DUnitTest.java       |    3 +-
 .../internal/cache/Bug47667DUnitTest.java       |    6 +-
 .../internal/cache/CacheAdvisorDUnitTest.java   |    7 +-
 .../internal/cache/ClearDAckDUnitTest.java      |   47 +-
 .../internal/cache/ClearGlobalDUnitTest.java    |   14 +-
 .../cache/ClientServerGetAllDUnitTest.java      |   33 +-
 ...ServerInvalidAndDestroyedEntryDUnitTest.java |   26 +-
 .../ClientServerTransactionCCEDUnitTest.java    |   12 +-
 .../cache/ClientServerTransactionDUnitTest.java |   83 +-
 .../ConcurrentDestroySubRegionDUnitTest.java    |    5 +-
 .../cache/ConcurrentMapOpsDUnitTest.java        |   29 +-
 .../ConcurrentRegionOperationsJUnitTest.java    |   18 +-
 ...rentRollingAndRegionOperationsJUnitTest.java |    4 +-
 .../cache/ConnectDisconnectDUnitTest.java       |   17 +-
 .../cache/DeltaPropagationDUnitTest.java        |   32 +-
 .../cache/DeltaPropagationStatsDUnitTest.java   |   17 +-
 .../internal/cache/DeltaSizingDUnitTest.java    |    5 +-
 .../cache/DiskRegByteArrayDUnitTest.java        |    7 +-
 .../cache/DiskRegionClearJUnitTest.java         |    4 +-
 .../internal/cache/DiskRegionJUnitTest.java     |   43 +-
 ...DistrbutedRegionProfileOffHeapDUnitTest.java |   24 +-
 .../cache/DistributedCacheTestCase.java         |   16 +-
 .../internal/cache/EventTrackerDUnitTest.java   |   27 +-
 .../internal/cache/EvictionDUnitTest.java       |    9 +-
 .../cache/EvictionObjectSizerDUnitTest.java     |   25 +-
 .../internal/cache/EvictionStatsDUnitTest.java  |   20 +-
 .../internal/cache/EvictionTestBase.java        |   39 +-
 .../cache/FixedPRSinglehopDUnitTest.java        |   31 +-
 .../internal/cache/GIIDeltaDUnitTest.java       |   76 +-
 .../internal/cache/GIIFlowControlDUnitTest.java |   38 +-
 .../internal/cache/GridAdvisorDUnitTest.java    |   33 +-
 .../internal/cache/HABug36773DUnitTest.java     |   21 +-
 .../HAOverflowMemObjectSizerDUnitTest.java      |    9 +-
 .../cache/IncrementalBackupDUnitTest.java       |   16 +-
 .../cache/InterruptClientServerDUnitTest.java   |    9 +-
 .../internal/cache/InterruptsDUnitTest.java     |    6 +-
 .../internal/cache/MapClearGIIDUnitTest.java    |   26 +-
 .../internal/cache/MapInterface2JUnitTest.java  |    6 +-
 .../cache/NetSearchMessagingDUnitTest.java      |   13 +-
 .../cache/OffHeapEvictionDUnitTest.java         |   28 +-
 .../cache/OffHeapEvictionStatsDUnitTest.java    |   23 +-
 .../gemfire/internal/cache/OffHeapTestUtil.java |    2 +-
 .../gemfire/internal/cache/OplogJUnitTest.java  |   25 +-
 .../cache/P2PDeltaPropagationDUnitTest.java     |    6 +-
 .../internal/cache/PRBadToDataDUnitTest.java    |    3 +-
 .../cache/PartitionedRegionAPIDUnitTest.java    |   23 +-
 ...gionBucketCreationDistributionDUnitTest.java |  107 +-
 .../PartitionedRegionCacheCloseDUnitTest.java   |   17 +-
 .../PartitionedRegionCreationDUnitTest.java     |   67 +-
 .../cache/PartitionedRegionDUnitTestCase.java   |   22 +-
 ...rtitionedRegionDelayedRecoveryDUnitTest.java |   11 +-
 .../PartitionedRegionDestroyDUnitTest.java      |   23 +-
 .../PartitionedRegionEntryCountDUnitTest.java   |    3 +-
 .../PartitionedRegionEvictionDUnitTest.java     |   16 +-
 .../cache/PartitionedRegionHADUnitTest.java     |   24 +-
 ...onedRegionHAFailureAndRecoveryDUnitTest.java |   49 +-
 ...artitionedRegionLocalMaxMemoryDUnitTest.java |    5 +-
 ...nedRegionLocalMaxMemoryOffHeapDUnitTest.java |   11 +-
 .../PartitionedRegionMultipleDUnitTest.java     |   72 +-
 ...rtitionedRegionOffHeapEvictionDUnitTest.java |   11 +-
 .../cache/PartitionedRegionPRIDDUnitTest.java   |   22 +-
 .../cache/PartitionedRegionQueryDUnitTest.java  |   33 +-
 ...artitionedRegionRedundancyZoneDUnitTest.java |    3 +-
 ...tionedRegionSerializableObjectJUnitTest.java |    4 +-
 .../PartitionedRegionSingleHopDUnitTest.java    |  173 ++-
 ...RegionSingleHopWithServerGroupDUnitTest.java |  148 +-
 .../cache/PartitionedRegionSizeDUnitTest.java   |   20 +-
 .../cache/PartitionedRegionStatsDUnitTest.java  |   11 +-
 .../PartitionedRegionTestUtilsDUnitTest.java    |   22 +-
 .../PartitionedRegionWithSameNameDUnitTest.java |   46 +-
 .../internal/cache/PutAllDAckDUnitTest.java     |   23 +-
 .../internal/cache/PutAllGlobalDUnitTest.java   |   53 +-
 .../cache/RemoteTransactionDUnitTest.java       |   38 +-
 .../internal/cache/RemoveAllDAckDUnitTest.java  |   21 +-
 .../internal/cache/RemoveDAckDUnitTest.java     |   17 +-
 .../internal/cache/RemoveGlobalDUnitTest.java   |   20 +-
 .../cache/SimpleDiskRegionJUnitTest.java        |   12 +-
 .../internal/cache/SingleHopStatsDUnitTest.java |   55 +-
 .../internal/cache/SizingFlagDUnitTest.java     |    3 +-
 .../internal/cache/SystemFailureDUnitTest.java  |   22 +-
 .../cache/TXReservationMgrJUnitTest.java        |    4 +-
 .../cache/TransactionsWithDeltaDUnitTest.java   |   11 +-
 .../control/RebalanceOperationDUnitTest.java    |   46 +-
 ...egionOverflowAsyncRollingOpLogJUnitTest.java |    6 +-
 ...RegionOverflowSyncRollingOpLogJUnitTest.java |    6 +-
 ...ltiThreadedOplogPerJUnitPerformanceTest.java |    4 +-
 .../cache/execute/Bug51193DUnitTest.java        |    6 +-
 .../ClientServerFunctionExecutionDUnitTest.java |   87 +-
 .../execute/ColocationFailoverDUnitTest.java    |   27 +-
 ...ributedRegionFunctionExecutionDUnitTest.java |   78 +-
 .../execute/FunctionServiceStatsDUnitTest.java  |   57 +-
 .../cache/execute/LocalDataSetDUnitTest.java    |   14 +-
 .../execute/LocalDataSetIndexingDUnitTest.java  |    2 +-
 .../LocalFunctionExecutionDUnitTest.java        |   19 +-
 .../MemberFunctionExecutionDUnitTest.java       |   41 +-
 .../MultiRegionFunctionExecutionDUnitTest.java  |    9 +-
 .../OnGroupsFunctionExecutionDUnitTest.java     |   71 +-
 ...ntServerFunctionExecutionNoAckDUnitTest.java |   40 +-
 ...tServerRegionFunctionExecutionDUnitTest.java |   83 +-
 ...egionFunctionExecutionFailoverDUnitTest.java |   70 +-
 ...onFunctionExecutionNoSingleHopDUnitTest.java |   84 +-
 ...onExecutionSelectorNoSingleHopDUnitTest.java |  100 +-
 ...gionFunctionExecutionSingleHopDUnitTest.java |   94 +-
 .../cache/execute/PRClientServerTestBase.java   |   59 +-
 .../cache/execute/PRColocationDUnitTest.java    |  126 +-
 .../execute/PRCustomPartitioningDUnitTest.java  |   14 +-
 .../execute/PRFunctionExecutionDUnitTest.java   |   51 +-
 .../PRFunctionExecutionTimeOutDUnitTest.java    |    5 +-
 ...ctionExecutionWithResultSenderDUnitTest.java |    3 +-
 .../execute/PRPerformanceTestDUnitTest.java     |   12 +-
 .../cache/execute/PRTransactionDUnitTest.java   |   44 +-
 .../execute/SingleHopGetAllPutAllDUnitTest.java |   25 +-
 .../functions/DistributedRegionFunction.java    |    6 +-
 .../internal/cache/functions/TestFunction.java  |    8 +-
 .../ha/BlockingHARQAddOperationJUnitTest.java   |    9 +-
 .../cache/ha/BlockingHARegionJUnitTest.java     |   55 +-
 .../cache/ha/Bug36853EventsExpiryDUnitTest.java |   24 +-
 .../internal/cache/ha/Bug48571DUnitTest.java    |   13 +-
 .../internal/cache/ha/Bug48879DUnitTest.java    |    3 +-
 .../cache/ha/EventIdOptimizationDUnitTest.java  |   21 +-
 .../internal/cache/ha/FailoverDUnitTest.java    |   25 +-
 .../internal/cache/ha/HABugInPutDUnitTest.java  |   11 +-
 .../internal/cache/ha/HAClearDUnitTest.java     |   22 +-
 .../cache/ha/HAConflationDUnitTest.java         |   14 +-
 .../internal/cache/ha/HADuplicateDUnitTest.java |   14 +-
 .../cache/ha/HAEventIdPropagationDUnitTest.java |   26 +-
 .../internal/cache/ha/HAExpiryDUnitTest.java    |   17 +-
 .../internal/cache/ha/HAGIIBugDUnitTest.java    |   29 +-
 .../internal/cache/ha/HAGIIDUnitTest.java       |   43 +-
 .../cache/ha/HARQAddOperationJUnitTest.java     |   24 +-
 .../cache/ha/HARQueueNewImplDUnitTest.java      |  108 +-
 .../internal/cache/ha/HARegionDUnitTest.java    |   15 +-
 .../cache/ha/HARegionQueueDUnitTest.java        |   36 +-
 .../cache/ha/HARegionQueueJUnitTest.java        |   26 +-
 .../cache/ha/HASlowReceiverDUnitTest.java       |   22 +-
 .../ha/OperationsPropagationDUnitTest.java      |   40 +-
 .../internal/cache/ha/PutAllDUnitTest.java      |   15 +-
 .../internal/cache/ha/StatsBugDUnitTest.java    |   26 +-
 .../cache/locks/TXLockServiceDUnitTest.java     |   49 +-
 .../cache/partitioned/Bug39356DUnitTest.java    |    5 +-
 .../cache/partitioned/Bug43684DUnitTest.java    |    9 +-
 .../cache/partitioned/Bug47388DUnitTest.java    |    9 +-
 .../cache/partitioned/Bug51400DUnitTest.java    |    9 +-
 .../partitioned/ElidedPutAllDUnitTest.java      |    5 +-
 .../partitioned/PartitionResolverDUnitTest.java |    5 +-
 .../PartitionedRegionLoaderWriterDUnitTest.java |   10 +-
 ...rtitionedRegionMetaDataCleanupDUnitTest.java |   17 +-
 .../partitioned/PersistPRKRFDUnitTest.java      |   22 +-
 ...tentColocatedPartitionedRegionDUnitTest.java |   43 +-
 .../PersistentPartitionedRegionDUnitTest.java   |   64 +-
 .../PersistentPartitionedRegionTestBase.java    |   27 +-
 ...rtitionedRegionWithTransactionDUnitTest.java |   19 +-
 .../cache/partitioned/ShutdownAllDUnitTest.java |   17 +-
 ...StreamingPartitionOperationOneDUnitTest.java |    3 +-
 .../fixed/FixedPartitioningDUnitTest.java       |   66 +-
 .../fixed/FixedPartitioningTestBase.java        |   98 +-
 ...ngWithColocationAndPersistenceDUnitTest.java |   50 +-
 .../PersistentRVVRecoveryDUnitTest.java         |   28 +-
 .../PersistentRecoveryOrderDUnitTest.java       |  119 +-
 .../PersistentReplicatedTestBase.java           |   14 +-
 .../internal/cache/tier/Bug40396DUnitTest.java  |    7 +-
 ...mpatibilityHigherVersionClientDUnitTest.java |    9 +-
 .../cache/tier/sockets/Bug36269DUnitTest.java   |   24 +-
 .../cache/tier/sockets/Bug36457DUnitTest.java   |   16 +-
 .../cache/tier/sockets/Bug36805DUnitTest.java   |   14 +-
 .../cache/tier/sockets/Bug36829DUnitTest.java   |   13 +-
 .../cache/tier/sockets/Bug36995DUnitTest.java   |   19 +-
 .../cache/tier/sockets/Bug37210DUnitTest.java   |   22 +-
 .../cache/tier/sockets/Bug37805DUnitTest.java   |    6 +-
 .../CacheServerMaxConnectionsJUnitTest.java     |    8 +-
 .../cache/tier/sockets/CacheServerTestUtil.java |   30 +-
 .../CacheServerTransactionsDUnitTest.java       |  115 +-
 .../tier/sockets/ClearPropagationDUnitTest.java |   26 +-
 .../tier/sockets/ClientConflationDUnitTest.java |   31 +-
 .../sockets/ClientHealthMonitorJUnitTest.java   |    8 +-
 .../sockets/ClientInterestNotifyDUnitTest.java  |   29 +-
 .../tier/sockets/ClientServerMiscDUnitTest.java |  111 +-
 .../cache/tier/sockets/ConflationDUnitTest.java |   65 +-
 .../tier/sockets/ConnectionProxyJUnitTest.java  |   14 +-
 .../DataSerializerPropogationDUnitTest.java     |  118 +-
 .../DestroyEntryPropagationDUnitTest.java       |   41 +-
 .../sockets/DurableClientBug39997DUnitTest.java |   10 +-
 .../DurableClientQueueSizeDUnitTest.java        |   11 +-
 .../DurableClientReconnectAutoDUnitTest.java    |    4 +-
 .../DurableClientReconnectDUnitTest.java        |   93 +-
 .../sockets/DurableClientStatsDUnitTest.java    |   43 +-
 .../sockets/DurableRegistrationDUnitTest.java   |   60 +-
 .../sockets/DurableResponseMatrixDUnitTest.java |   30 +-
 .../sockets/EventIDVerificationDUnitTest.java   |   21 +-
 .../EventIDVerificationInP2PDUnitTest.java      |   16 +-
 .../ForceInvalidateEvictionDUnitTest.java       |   12 +-
 ...ForceInvalidateOffHeapEvictionDUnitTest.java |   11 +-
 .../cache/tier/sockets/HABug36738DUnitTest.java |   16 +-
 .../tier/sockets/HAInterestPart1DUnitTest.java  |   22 +-
 .../tier/sockets/HAInterestPart2DUnitTest.java  |   31 +-
 .../cache/tier/sockets/HAInterestTestCase.java  |   50 +-
 .../sockets/HAStartupAndFailoverDUnitTest.java  |   50 +-
 .../InstantiatorPropagationDUnitTest.java       |  120 +-
 .../tier/sockets/InterestListDUnitTest.java     |  116 +-
 .../sockets/InterestListEndpointDUnitTest.java  |   27 +-
 .../sockets/InterestListFailoverDUnitTest.java  |   23 +-
 .../sockets/InterestListRecoveryDUnitTest.java  |   45 +-
 .../sockets/InterestRegrListenerDUnitTest.java  |   51 +-
 .../sockets/InterestResultPolicyDUnitTest.java  |   26 +-
 .../sockets/NewRegionAttributesDUnitTest.java   |    7 +-
 .../sockets/RedundancyLevelPart1DUnitTest.java  |   58 +-
 .../sockets/RedundancyLevelPart2DUnitTest.java  |   56 +-
 .../sockets/RedundancyLevelPart3DUnitTest.java  |   14 +-
 .../tier/sockets/RedundancyLevelTestBase.java   |   55 +-
 .../tier/sockets/RegionCloseDUnitTest.java      |   22 +-
 ...erInterestBeforeRegionCreationDUnitTest.java |   14 +-
 .../sockets/RegisterInterestKeysDUnitTest.java  |   22 +-
 .../sockets/ReliableMessagingDUnitTest.java     |   37 +-
 .../sockets/UnregisterInterestDUnitTest.java    |   15 +-
 .../sockets/UpdatePropagationDUnitTest.java     |   49 +-
 .../VerifyEventIDGenerationInP2PDUnitTest.java  |   10 +-
 ...UpdatesFromNonInterestEndPointDUnitTest.java |   21 +-
 .../versions/RegionVersionVectorJUnitTest.java  |    4 +-
 .../cache/wan/AsyncEventQueueTestBase.java      |  102 +-
 .../asyncqueue/AsyncEventListenerDUnitTest.java |  396 +++---
 .../AsyncEventQueueStatsDUnitTest.java          |   79 +-
 .../ConcurrentAsyncEventQueueDUnitTest.java     |   47 +-
 .../CommonParallelAsyncEventQueueDUnitTest.java |   10 +-
 .../CompressionCacheConfigDUnitTest.java        |   18 +-
 .../CompressionCacheListenerDUnitTest.java      |    9 +-
 ...ompressionCacheListenerOffHeapDUnitTest.java |   11 +-
 .../CompressionRegionConfigDUnitTest.java       |   22 +-
 .../CompressionRegionFactoryDUnitTest.java      |    5 -
 .../CompressionRegionOperationsDUnitTest.java   |   25 +-
 ...ressionRegionOperationsOffHeapDUnitTest.java |   12 +-
 .../internal/jta/dunit/ExceptionsDUnitTest.java |   14 +-
 .../jta/dunit/IdleTimeOutDUnitTest.java         |   57 +-
 .../jta/dunit/LoginTimeOutDUnitTest.java        |   14 +-
 .../jta/dunit/MaxPoolSizeDUnitTest.java         |   42 +-
 .../jta/dunit/TransactionTimeOutDUnitTest.java  |   20 +-
 .../dunit/TxnManagerMultiThreadDUnitTest.java   |   76 +-
 .../internal/jta/dunit/TxnTimeOutDUnitTest.java |   42 +-
 .../DistributedSystemLogFileJUnitTest.java      |   18 +-
 .../logging/LocatorLogFileJUnitTest.java        |    6 +-
 .../logging/MergeLogFilesJUnitTest.java         |    4 +-
 .../internal/offheap/OffHeapRegionBase.java     |    4 +-
 .../offheap/OutOfOffHeapMemoryDUnitTest.java    |   17 +-
 .../process/LocalProcessLauncherDUnitTest.java  |    4 -
 .../statistics/StatisticsDUnitTest.java         |   20 +-
 .../statistics/ValueMonitorJUnitTest.java       |    6 +-
 .../management/CacheManagementDUnitTest.java    |   28 +-
 .../management/ClientHealthStatsDUnitTest.java  |   29 +-
 .../management/CompositeTypeTestDUnitTest.java  |    9 +-
 .../management/DLockManagementDUnitTest.java    |   24 +-
 .../management/DiskManagementDUnitTest.java     |   36 +-
 .../management/DistributedSystemDUnitTest.java  |   43 +-
 .../management/LocatorManagementDUnitTest.java  |   29 +-
 .../gemstone/gemfire/management/MBeanUtil.java  |   26 +-
 .../gemfire/management/ManagementTestBase.java  |   45 +-
 .../MemberMBeanAttributesDUnitTest.java         |    7 +-
 .../management/OffHeapManagementDUnitTest.java  |    6 +-
 .../gemfire/management/QueryDataDUnitTest.java  |   28 +-
 .../management/RegionManagementDUnitTest.java   |   71 +-
 ...ersalMembershipListenerAdapterDUnitTest.java |  152 +-
 .../stats/DistributedSystemStatsDUnitTest.java  |    9 +-
 .../internal/cli/CliUtilDUnitTest.java          |   39 +-
 .../cli/commands/CliCommandTestBase.java        |   16 +-
 .../cli/commands/ConfigCommandsDUnitTest.java   |   46 +-
 ...eateAlterDestroyRegionCommandsDUnitTest.java |   62 +-
 .../cli/commands/DeployCommandsDUnitTest.java   |   16 +-
 .../commands/DiskStoreCommandsDUnitTest.java    |   40 +-
 .../cli/commands/FunctionCommandsDUnitTest.java |   56 +-
 .../commands/GemfireDataCommandsDUnitTest.java  |  194 +--
 ...WithCacheLoaderDuringCacheMissDUnitTest.java |    8 +-
 .../cli/commands/IndexCommandsDUnitTest.java    |   22 +-
 ...stAndDescribeDiskStoreCommandsDUnitTest.java |   12 +-
 .../ListAndDescribeRegionDUnitTest.java         |   29 +-
 .../cli/commands/ListIndexCommandDUnitTest.java |   20 +-
 .../cli/commands/MemberCommandsDUnitTest.java   |   15 +-
 .../MiscellaneousCommandsDUnitTest.java         |   39 +-
 ...laneousCommandsExportLogsPart1DUnitTest.java |    9 +-
 ...laneousCommandsExportLogsPart2DUnitTest.java |   14 +-
 ...laneousCommandsExportLogsPart3DUnitTest.java |    9 +-
 ...laneousCommandsExportLogsPart4DUnitTest.java |   14 +-
 .../cli/commands/QueueCommandsDUnitTest.java    |   16 +-
 .../SharedConfigurationCommandsDUnitTest.java   |   30 +-
 .../cli/commands/ShellCommandsDUnitTest.java    |   18 +-
 .../cli/commands/ShowDeadlockDUnitTest.java     |   13 +-
 .../cli/commands/ShowMetricsDUnitTest.java      |   37 +-
 .../cli/commands/ShowStackTraceDUnitTest.java   |   21 +-
 .../cli/commands/UserCommandsDUnitTest.java     |    3 +-
 .../SharedConfigurationDUnitTest.java           |   19 +-
 .../internal/pulse/TestClientIdsDUnitTest.java  |   25 +-
 .../internal/pulse/TestFunctionsDUnitTest.java  |   11 +-
 .../internal/pulse/TestHeapDUnitTest.java       |   11 +-
 .../internal/pulse/TestLocatorsDUnitTest.java   |   11 +-
 .../pulse/TestSubscriptionsDUnitTest.java       |   22 +-
 .../ClientsWithVersioningRetryDUnitTest.java    |   37 +-
 .../pdx/DistributedSystemIdDUnitTest.java       |    3 +-
 .../pdx/JSONPdxClientServerDUnitTest.java       |    7 +-
 .../gemfire/pdx/PdxClientServerDUnitTest.java   |   16 +-
 .../pdx/PdxDeserializationDUnitTest.java        |    9 +-
 .../gemfire/pdx/PdxTypeExportDUnitTest.java     |    3 +-
 .../gemfire/pdx/VersionClassLoader.java         |   10 +-
 .../gemfire/redis/RedisDistDUnitTest.java       |   14 +-
 .../web/controllers/RestAPITestBase.java        |   10 +-
 .../security/ClientAuthenticationDUnitTest.java |  100 +-
 .../security/ClientAuthorizationDUnitTest.java  |   63 +-
 .../security/ClientAuthorizationTestBase.java   |   28 +-
 .../security/ClientMultiUserAuthzDUnitTest.java |   41 +-
 .../DeltaClientAuthorizationDUnitTest.java      |   56 +-
 .../DeltaClientPostAuthorizationDUnitTest.java  |   30 +-
 .../security/P2PAuthenticationDUnitTest.java    |   44 +-
 .../gemfire/security/SecurityTestUtil.java      |  311 ++--
 .../com/gemstone/gemfire/test/dunit/Assert.java |   66 +
 .../gemfire/test/dunit/AsyncInvocation.java     |   23 +-
 .../gemstone/gemfire/test/dunit/DUnitEnv.java   |    1 -
 .../gemfire/test/dunit/DebuggerUtils.java       |   52 +
 .../gemfire/test/dunit/DistributedTestCase.java | 1328 +++---------------
 .../test/dunit/DistributedTestUtils.java        |  167 +++
 .../com/gemstone/gemfire/test/dunit/Host.java   |    5 +-
 .../gemfire/test/dunit/IgnoredException.java    |  200 +++
 .../com/gemstone/gemfire/test/dunit/Invoke.java |  160 +++
 .../com/gemstone/gemfire/test/dunit/Jitter.java |   87 ++
 .../gemfire/test/dunit/LogWriterUtils.java      |  111 ++
 .../gemfire/test/dunit/NetworkUtils.java        |   69 +
 .../gemfire/test/dunit/RMIException.java        |    2 +-
 .../gemfire/test/dunit/RepeatableRunnable.java  |    4 +-
 .../test/dunit/SerializableCallableIF.java      |    4 +-
 .../test/dunit/SerializableRunnable.java        |    3 +-
 .../test/dunit/SerializableRunnableIF.java      |    4 +-
 .../test/dunit/StoppableWaitCriterion.java      |   35 +
 .../gemfire/test/dunit/ThreadUtils.java         |  155 ++
 .../com/gemstone/gemfire/test/dunit/VM.java     |   60 +-
 .../com/gemstone/gemfire/test/dunit/Wait.java   |  204 +++
 .../gemfire/test/dunit/WaitCriterion.java       |   33 +
 .../dunit/rules/DistributedDisconnectRule.java  |  121 ++
 .../rules/DistributedExternalResource.java      |   58 +
 .../DistributedRestoreSystemProperties.java     |   74 +
 .../gemfire/test/dunit/rules/RemoteInvoker.java |   39 +
 .../test/dunit/tests/BasicDUnitTest.java        |    5 +-
 .../tests/GetDefaultDiskStoreNameDUnitTest.java |   67 +
 .../dunit/tests/GetTestMethodNameDUnitTest.java |   54 +
 .../src/test/java/hydra/MethExecutor.java       |    1 +
 .../cache/query/cq/dunit/CqDataDUnitTest.java   |   90 +-
 .../dunit/CqDataOptimizedExecuteDUnitTest.java  |   11 +-
 .../cq/dunit/CqDataUsingPoolDUnitTest.java      |   99 +-
 ...qDataUsingPoolOptimizedExecuteDUnitTest.java |   13 +-
 .../cache/query/cq/dunit/CqPerfDUnitTest.java   |   79 +-
 .../cq/dunit/CqPerfUsingPoolDUnitTest.java      |   77 +-
 .../cache/query/cq/dunit/CqQueryDUnitTest.java  |  327 ++---
 .../dunit/CqQueryOptimizedExecuteDUnitTest.java |   25 +-
 .../cq/dunit/CqQueryUsingPoolDUnitTest.java     |  328 ++---
 ...QueryUsingPoolOptimizedExecuteDUnitTest.java |    8 +-
 .../cq/dunit/CqResultSetUsingPoolDUnitTest.java |   77 +-
 ...ltSetUsingPoolOptimizedExecuteDUnitTest.java |   30 +-
 .../cache/query/cq/dunit/CqStateDUnitTest.java  |   16 +-
 .../cache/query/cq/dunit/CqStatsDUnitTest.java  |   34 +-
 .../dunit/CqStatsOptimizedExecuteDUnitTest.java |    8 +-
 .../cq/dunit/CqStatsUsingPoolDUnitTest.java     |   34 +-
 ...StatsUsingPoolOptimizedExecuteDUnitTest.java |    8 +-
 .../query/cq/dunit/CqTimeTestListener.java      |   14 +-
 .../PartitionedRegionCqQueryDUnitTest.java      |   72 +-
 ...dRegionCqQueryOptimizedExecuteDUnitTest.java |   18 +-
 .../query/cq/dunit/PrCqUsingPoolDUnitTest.java  |   62 +-
 .../PrCqUsingPoolOptimizedExecuteDUnitTest.java |    8 +-
 .../cache/query/dunit/PdxQueryCQDUnitTest.java  |   46 +-
 .../cache/query/dunit/PdxQueryCQTestBase.java   |   32 +-
 .../dunit/QueryIndexUpdateRIDUnitTest.java      |   72 +-
 .../query/dunit/QueryMonitorDUnitTest.java      |   48 +-
 .../cache/snapshot/ClientSnapshotDUnitTest.java |    6 +-
 .../cache/PRDeltaPropagationDUnitTest.java      |   17 +-
 .../internal/cache/PutAllCSDUnitTest.java       |  401 +++---
 .../cache/RemoteCQTransactionDUnitTest.java     |   13 +-
 .../internal/cache/ha/CQListGIIDUnitTest.java   |   53 +-
 .../cache/ha/HADispatcherDUnitTest.java         |   23 +-
 .../sockets/ClientToServerDeltaDUnitTest.java   |   27 +-
 .../DeltaPropagationWithCQDUnitTest.java        |   19 +-
 ...ToRegionRelationCQRegistrationDUnitTest.java |   32 +-
 .../sockets/DurableClientCrashDUnitTest.java    |    4 +-
 .../sockets/DurableClientNetDownDUnitTest.java  |    3 +-
 .../sockets/DurableClientSimpleDUnitTest.java   |   69 +-
 .../tier/sockets/DurableClientTestCase.java     |   97 +-
 .../CacheServerManagementDUnitTest.java         |   44 +-
 .../cli/commands/ClientCommandsDUnitTest.java   |   31 +-
 .../DurableClientCommandsDUnitTest.java         |   13 +-
 .../internal/pulse/TestCQDUnitTest.java         |   16 +-
 .../internal/pulse/TestClientsDUnitTest.java    |   14 +-
 .../internal/pulse/TestServerDUnitTest.java     |   11 +-
 .../ClientAuthorizationTwoDUnitTest.java        |   16 +-
 .../security/ClientAuthzObjectModDUnitTest.java |   13 +-
 .../ClientCQPostAuthorizationDUnitTest.java     |   39 +-
 .../ClientPostAuthorizationDUnitTest.java       |   21 +-
 .../gemfire/security/MultiuserAPIDUnitTest.java |   21 +-
 .../MultiuserDurableCQAuthzDUnitTest.java       |   22 +-
 .../internal/cache/UpdateVersionDUnitTest.java  |   59 +-
 .../gemfire/internal/cache/wan/WANTestBase.java |  353 ++---
 ...oncurrentParallelGatewaySenderDUnitTest.java |  191 +--
 ...allelGatewaySenderOperation_1_DUnitTest.java |  253 ++--
 ...allelGatewaySenderOperation_2_DUnitTest.java |   90 +-
 .../ConcurrentWANPropogation_1_DUnitTest.java   |  216 +--
 .../ConcurrentWANPropogation_2_DUnitTest.java   |  168 +--
 .../cache/wan/disttx/DistTXWANDUnitTest.java    |   54 +-
 .../CommonParallelGatewaySenderDUnitTest.java   |  150 +-
 ...wWANConcurrencyCheckForDestroyDUnitTest.java |   44 +-
 .../cache/wan/misc/PDXNewWanDUnitTest.java      |  202 +--
 ...dRegion_ParallelWANPersistenceDUnitTest.java |  229 +--
 ...dRegion_ParallelWANPropogationDUnitTest.java |  256 ++--
 .../SenderWithTransportFilterDUnitTest.java     |   18 +-
 ...downAllPersistentGatewaySenderDUnitTest.java |   34 +-
 .../wan/misc/WANLocatorServerDUnitTest.java     |   14 +-
 .../cache/wan/misc/WANSSLDUnitTest.java         |   40 +-
 .../wan/misc/WanAutoDiscoveryDUnitTest.java     |   14 +-
 .../cache/wan/misc/WanValidationsDUnitTest.java |  352 ++---
 ...arallelGatewaySenderOperationsDUnitTest.java |  157 ++-
 ...llelGatewaySenderQueueOverflowDUnitTest.java |  107 +-
 .../ParallelWANConflationDUnitTest.java         |   55 +-
 ...ersistenceEnabledGatewaySenderDUnitTest.java |  539 +++----
 ...llelWANPropagationClientServerDUnitTest.java |   28 +-
 ...lelWANPropagationConcurrentOpsDUnitTest.java |   81 +-
 .../ParallelWANPropagationDUnitTest.java        |  381 ++---
 ...ParallelWANPropagationLoopBackDUnitTest.java |   77 +-
 .../wan/parallel/ParallelWANStatsDUnitTest.java |    8 +-
 ...tewaySenderDistributedDeadlockDUnitTest.java |  109 +-
 ...rialGatewaySenderEventListenerDUnitTest.java |   75 +-
 .../SerialGatewaySenderOperationsDUnitTest.java |  130 +-
 .../SerialGatewaySenderQueueDUnitTest.java      |   68 +-
 ...ersistenceEnabledGatewaySenderDUnitTest.java |  210 +--
 .../SerialWANPropagationLoopBackDUnitTest.java  |   97 +-
 .../serial/SerialWANPropogationDUnitTest.java   |  509 +++----
 ...NPropogation_PartitionedRegionDUnitTest.java |  179 +--
 .../SerialWANPropogationsFeatureDUnitTest.java  |  124 +-
 .../wan/serial/SerialWANStatsDUnitTest.java     |   12 +-
 .../wan/wancommand/WANCommandTestBase.java      |   38 +-
 .../WanCommandCreateGatewaySenderDUnitTest.java |    5 +-
 ...WanCommandGatewayReceiverStartDUnitTest.java |    9 +-
 .../WanCommandGatewayReceiverStopDUnitTest.java |    9 +-
 .../WanCommandGatewaySenderStartDUnitTest.java  |   14 +-
 .../WanCommandGatewaySenderStopDUnitTest.java   |   12 +-
 .../wan/wancommand/WanCommandListDUnitTest.java |   11 +-
 .../WanCommandPauseResumeDUnitTest.java         |   17 +-
 .../wancommand/WanCommandStatusDUnitTest.java   |   25 +-
 .../management/WANManagementDUnitTest.java      |   30 +-
 .../ClusterConfigurationDUnitTest.java          |   20 +-
 .../pulse/TestRemoteClusterDUnitTest.java       |   22 +-
 618 files changed, 15004 insertions(+), 13100 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
index afb2770..7c6929c 100644
--- a/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
+++ b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommandsDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.management.internal.cli.commands;
 
+import static com.gemstone.gemfire.test.dunit.Wait.*;
+
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.client.ClientCache;
 import com.gemstone.gemfire.cache.client.ClientCacheFactory;
@@ -43,6 +45,8 @@ import com.gemstone.gemfire.management.cli.Result;
 import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
+
 import org.junit.FixMethodOrder;
 import org.junit.runners.MethodSorters;
 
@@ -145,9 +149,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Override
-  protected void tearDownAfter() throws Exception {
-    super.tearDownAfter();
-
+  protected final void postTearDown() throws Exception {
     LauncherLifecycleCommands launcherLifecycleCommands = new LauncherLifecycleCommands();
     Integer pid;
 
@@ -302,7 +304,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
   public void test000StartLocatorCapturesOutputOnError() throws IOException {
     final int locatorPort = AvailablePortHelper.getRandomAvailableTCPPort();
 
-    String pathname = (getClass().getSimpleName() + "_" + testName);
+    String pathname = (getClass().getSimpleName() + "_" + getTestMethodName());
     File workingDirectory = new File(pathname);
 
     assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
@@ -364,7 +366,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
 
     CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_LOCATOR);
 
-    command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, getClass().getSimpleName().concat("_").concat(getTestMethodName()));
     command.addOption(CliStrings.START_LOCATOR__PORT, "0");
     command.addOption(CliStrings.START_LOCATOR__PROPERTIES, gemfirePropertiesPathname);
     command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.http-service-port=0");
@@ -389,7 +391,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
 
     CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_LOCATOR);
 
-    command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_LOCATOR__MEMBER_NAME, getClass().getSimpleName().concat("_").concat(getTestMethodName()));
     command.addOption(CliStrings.START_LOCATOR__PORT, "0");
     command.addOption(CliStrings.START_LOCATOR__SECURITY_PROPERTIES, gemfireSecurityPropertiesPathname);
     command.addOption(CliStrings.START_LOCATOR__J, "-Dgemfire.http-service-port=0");
@@ -414,7 +416,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
 
     CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
 
-    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(getTestMethodName()));
     command.addOption(CliStrings.START_SERVER__CACHE_XML_FILE, cacheXmlPathname);
 
     CommandResult result = executeCommand(command.toString());
@@ -433,7 +435,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
 
     CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
 
-    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(getTestMethodName()));
     command.addOption(CliStrings.START_SERVER__PROPERTIES, gemfirePropertiesFile);
 
     CommandResult result = executeCommand(command.toString());
@@ -453,7 +455,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
 
     CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
 
-    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(getTestMethodName()));
     command.addOption(CliStrings.START_SERVER__SECURITY_PROPERTIES, gemfireSecuritiesPropertiesFile);
 
     CommandResult result = executeCommand(command.toString());
@@ -471,7 +473,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
   public void test006StartLocatorInRelativeDirectory() {
     final int locatorPort = AvailablePortHelper.getRandomAvailableTCPPort();
 
-    String pathname = (getClass().getSimpleName() + "_" + testName);
+    String pathname = (getClass().getSimpleName() + "_" + getTestMethodName());
     File workingDirectory = new File(pathname);
 
     assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
@@ -504,7 +506,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
   }
 
   public void test007StatusLocatorUsingMemberNameIDWhenGfshIsNotConnected() {
-    CommandResult result = executeCommand(CliStrings.STATUS_LOCATOR + " --name=" + testName);
+    CommandResult result = executeCommand(CliStrings.STATUS_LOCATOR + " --name=" + getTestMethodName());
 
     assertNotNull(result);
     assertEquals(Result.Status.ERROR, result.getStatus());
@@ -518,7 +520,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
     final int jmxManagerPort = ports[0];
     final int locatorPort = ports[1];
 
-    String pathname = (getClass().getSimpleName() + "_" + testName);
+    String pathname = (getClass().getSimpleName() + "_" + getTestMethodName());
     File workingDirectory = new File(pathname);
 
     assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
@@ -580,7 +582,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
     final int jmxManagerPort = ports[0];
     final int locatorPort = ports[1];
 
-    String pathname = (getClass().getSimpleName() + "_" + testName);
+    String pathname = (getClass().getSimpleName() + "_" + getTestMethodName());
     File workingDirectory = new File(pathname);
 
     assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
@@ -631,7 +633,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
   }
 
   public void test010StopLocatorUsingMemberNameIDWhenGfshIsNotConnected() {
-    CommandResult result = executeCommand(CliStrings.STOP_LOCATOR + " --name=" + testName);
+    CommandResult result = executeCommand(CliStrings.STOP_LOCATOR + " --name=" + getTestMethodName());
 
     assertNotNull(result);
     assertEquals(Result.Status.ERROR, result.getStatus());
@@ -645,7 +647,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
     final int jmxManagerPort = ports[0];
     final int locatorPort = ports[1];
 
-    String pathname = (getClass().getSimpleName() + "_" + testName);
+    String pathname = (getClass().getSimpleName() + "_" + getTestMethodName());
     File workingDirectory = new File(pathname);
 
     assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
@@ -732,7 +734,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
     final int jmxManagerPort = ports[0];
     final int locatorPort = ports[1];
 
-    String pathname = (getClass().getSimpleName() + "_" + testName);
+    String pathname = (getClass().getSimpleName() + "_" + getTestMethodName());
     File workingDirectory = new File(pathname);
 
     assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
@@ -803,14 +805,14 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
   }
 
   public void test013StartServerWithSpring() {
-    String pathname = (getClass().getSimpleName() + "_" + testName);
+    String pathname = (getClass().getSimpleName() + "_" + getTestMethodName());
     File workingDirectory = new File(pathname);
 
     assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());
 
     CommandStringBuilder command = new CommandStringBuilder(CliStrings.START_SERVER);
 
-    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(testName));
+    command.addOption(CliStrings.START_SERVER__NAME, getClass().getSimpleName().concat("_").concat(getTestMethodName()));
     command.addOption(CliStrings.START_SERVER__USE_CLUSTER_CONFIGURATION, Boolean.FALSE.toString());
     command.addOption(CliStrings.START_SERVER__LOG_LEVEL, "config");
     command.addOption(CliStrings.START_SERVER__INCLUDE_SYSTEM_CLASSPATH);
@@ -863,7 +865,7 @@ public class LauncherLifecycleCommandsDUnitTest extends CliCommandTestBase {
     final int serverPort = ports[0];
     final int locatorPort = ports[1];
 
-    String pathname = getClass().getSimpleName().concat("_").concat(testName);
+    String pathname = getClass().getSimpleName().concat("_").concat(getTestMethodName());
     File workingDirectory = new File(pathname);
 
     assertTrue(workingDirectory.isDirectory() || workingDirectory.mkdir());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java
index de75927..edd056b 100644
--- a/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java
+++ b/gemfire-assembly/src/test/java/com/gemstone/gemfire/management/internal/configuration/SharedConfigurationEndToEndDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.management.internal.configuration;
 
+import static com.gemstone.gemfire.test.dunit.Wait.*;
+
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.RegionShortcut;
@@ -40,8 +42,12 @@ import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
+
 import org.apache.commons.io.FileUtils;
 
 import java.io.File;
@@ -88,7 +94,7 @@ public class SharedConfigurationEndToEndDUnitTest extends CliCommandTestBase {
   }
 
   public void testStartServerAndExecuteCommands() throws InterruptedException, ClassNotFoundException, IOException, ExecutionException {
-    addExpectedException("EntryDestroyedException");
+    IgnoredException.addIgnoredException("EntryDestroyedException");
     Object[] result = setup();
     final int locatorPort = (Integer) result[0];
     final String jmxHost = (String) result[1];
@@ -111,7 +117,7 @@ public class SharedConfigurationEndToEndDUnitTest extends CliCommandTestBase {
 
 
     //shutdown everything
-    getLogWriter().info("Shutting down all the members");
+    LogWriterUtils.getLogWriter().info("Shutting down all the members");
     shutdownAll();
     deleteSavedJarFiles();
   }
@@ -141,7 +147,7 @@ public class SharedConfigurationEndToEndDUnitTest extends CliCommandTestBase {
 
   protected void executeAndVerifyCommand(String commandString) {
     CommandResult cmdResult = executeCommand(commandString);
-    getLogWriter().info("Command Result : \n" + commandResultToString(cmdResult));
+    LogWriterUtils.getLogWriter().info("Command Result : \n" + commandResultToString(cmdResult));
     assertEquals(Status.OK, cmdResult.getStatus());
     assertFalse(cmdResult.failedToPersist());
   }
@@ -332,7 +338,7 @@ public class SharedConfigurationEndToEndDUnitTest extends CliCommandTestBase {
         try {
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locator1Port, locatorLogFile, null,
               locatorProps);
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -343,7 +349,7 @@ public class SharedConfigurationEndToEndDUnitTest extends CliCommandTestBase {
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, TIMEOUT, INTERVAL, true);
+          waitForCriterion(wc, TIMEOUT, INTERVAL, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/main/java/com/gemstone/gemfire/internal/util/DebuggerSupport.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/util/DebuggerSupport.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/util/DebuggerSupport.java
index 49ce32e..3df61ec 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/util/DebuggerSupport.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/util/DebuggerSupport.java
@@ -14,11 +14,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package com.gemstone.gemfire.internal.util;
 
-import com.gemstone.gemfire.i18n.LogWriterI18n;
+import org.apache.logging.log4j.Logger;
+
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.internal.logging.LogService;
+import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
 
 /**
  *
@@ -26,23 +28,24 @@ import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
  *
  */
 public abstract class DebuggerSupport  {
+  private static final Logger logger = LogService.getLogger();
   
   /** Creates a new instance of DebuggerSupport */
   private DebuggerSupport() {
   }
   
   /** Debugger support */
-  public static void waitForJavaDebugger(LogWriterI18n logger) {
-    waitForJavaDebugger(logger, null);
+  public static void waitForJavaDebugger() {
+    waitForJavaDebugger(null);
   }
   
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IL_INFINITE_LOOP", justification="Endless loop is for debugging purposes.") 
-  public static void waitForJavaDebugger(LogWriterI18n logger, String extraLogMsg) {
+  public static void waitForJavaDebugger(String extraLogMsg) {
     boolean cont = false;
     String msg = ":";
     if (extraLogMsg != null)
       msg += extraLogMsg;
-    logger.severe(LocalizedStrings.DebuggerSupport_WAITING_FOR_DEBUGGER_TO_ATTACH_0, msg);
+    logger.fatal(LocalizedMessage.create(LocalizedStrings.DebuggerSupport_WAITING_FOR_DEBUGGER_TO_ATTACH_0, msg));
     boolean interrupted = false;
     while (!cont) { // set cont to true in debugger when ready to continue
       try {
@@ -57,6 +60,6 @@ public abstract class DebuggerSupport  {
     if (interrupted) {
       Thread.currentThread().interrupt();
     }
-    logger.info(LocalizedStrings.DebuggerSupport_DEBUGGER_CONTINUING);
+    logger.info(LocalizedMessage.create(LocalizedStrings.DebuggerSupport_DEBUGGER_CONTINUING));
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java
index 674f0c9..d396877 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/TXExpiryJUnitTest.java
@@ -51,8 +51,8 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.TXManagerImpl;
 import com.gemstone.gemfire.internal.cache.TXStateProxy;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -247,7 +247,7 @@ public class TXExpiryJUnitTest {
         return "never saw entry destroy of " + key;
       }
     };
-    DistributedTestCase.waitForCriterion(waitForExpire, 3000, 10, true);
+    Wait.waitForCriterion(waitForExpire, 3000, 10, true);
   }
   
   public static void waitForEntryExpiration(LocalRegion lr, String key) {
@@ -257,7 +257,7 @@ public class TXExpiryJUnitTest {
         detector = new ExpirationDetector(lr.getEntryExpiryTask(key));
         ExpiryTask.expiryTaskListener = detector;
         ExpiryTask.permitExpiration();
-        DistributedTestCase.waitForCriterion(detector, 3000, 2, true);
+        Wait.waitForCriterion(detector, 3000, 2, true);
       } while (!detector.hasExpired() && detector.wasRescheduled());
     } finally {
       ExpiryTask.expiryTaskListener = null;
@@ -270,7 +270,7 @@ public class TXExpiryJUnitTest {
         detector = new ExpirationDetector(ttl ? lr.getRegionTTLExpiryTask() : lr.getRegionIdleExpiryTask());
         ExpiryTask.expiryTaskListener = detector;
         ExpiryTask.permitExpiration();
-        DistributedTestCase.waitForCriterion(detector, 3000, 2, true);
+        Wait.waitForCriterion(detector, 3000, 2, true);
       } while (!detector.hasExpired() && detector.wasRescheduled());
     } finally {
       ExpiryTask.expiryTaskListener = null;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/CacheRegionClearStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/CacheRegionClearStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/CacheRegionClearStatsDUnitTest.java
index 3502021..7ea3565 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/CacheRegionClearStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/CacheRegionClearStatsDUnitTest.java
@@ -25,8 +25,10 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 /**
  * verifies the count of clear operation
@@ -151,7 +153,7 @@ public class CacheRegionClearStatsDUnitTest extends DistributedTestCase {
 
     client1.invoke(CacheRegionClearStatsDUnitTest.class,
         "createClientCache", new Object[] {
-            getServerHostName(server1.getHost()), port1 });
+            NetworkUtils.getServerHostName(server1.getHost()), port1 });
     client1.invoke(CacheRegionClearStatsDUnitTest.class, "put");
     
     try{
@@ -176,7 +178,7 @@ public class CacheRegionClearStatsDUnitTest extends DistributedTestCase {
 
     client1.invoke(CacheRegionClearStatsDUnitTest.class,
         "createClientCacheDisk", new Object[] {
-            getServerHostName(server1.getHost()), port1 });
+            NetworkUtils.getServerHostName(server1.getHost()), port1 });
     client1.invoke(CacheRegionClearStatsDUnitTest.class, "put");
     
     try{
@@ -192,8 +194,8 @@ public class CacheRegionClearStatsDUnitTest extends DistributedTestCase {
     "validationClearStat");
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     client1.invoke(CacheRegionClearStatsDUnitTest.class, "closeCache");
     // then close the servers
     server1.invoke(CacheRegionClearStatsDUnitTest.class, "closeCache");
@@ -234,7 +236,7 @@ public class CacheRegionClearStatsDUnitTest extends DistributedTestCase {
       r1.clear();
     }
     catch (Exception ex) {
-      fail("failed while put", ex);
+      Assert.fail("failed while put", ex);
     }
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/ClientServerTimeSyncDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/ClientServerTimeSyncDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/ClientServerTimeSyncDUnitTest.java
index 2606a8f..8166318 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/ClientServerTimeSyncDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/ClientServerTimeSyncDUnitTest.java
@@ -29,9 +29,14 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.internal.DSClock;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class ClientServerTimeSyncDUnitTest extends CacheTestCase {
 
@@ -58,25 +63,25 @@ public class ClientServerTimeSyncDUnitTest extends CacheTestCase {
         public Object call() {
           Cache cache = getCache();
           cache.createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
-          getLogWriter().info("Done creating region, now creating CacheServer");
+          LogWriterUtils.getLogWriter().info("Done creating region, now creating CacheServer");
           CacheServer server = null;
           try {
             server = cache.addCacheServer();
             server.setPort(AvailablePortHelper.getRandomAvailableTCPPort());
             server.start();
           } catch (IOException e) {
-            fail("Starting cache server failed.", e);
+            Assert.fail("Starting cache server failed.", e);
           }
   
           // now set an artificial time offset for the test
           system.getClock().setCacheTimeOffset(null, TEST_OFFSET, true);
           
-          getLogWriter().info("Done creating and starting CacheServer on port " + server.getPort());
+          LogWriterUtils.getLogWriter().info("Done creating and starting CacheServer on port " + server.getPort());
           return server.getPort();
         }
       });
       
-      final String hostName = getServerHostName(vm0.getHost());
+      final String hostName = NetworkUtils.getServerHostName(vm0.getHost());
   
       // Start client with proxy region and register interest
         
@@ -98,14 +103,14 @@ public class ClientServerTimeSyncDUnitTest extends CacheTestCase {
       WaitCriterion wc = new WaitCriterion() {
         public boolean done() {
           long clientTimeOffset = clock.getCacheTimeOffset();
-          getLogWriter().info("Client node's new time offset is: " + clientTimeOffset);
+          LogWriterUtils.getLogWriter().info("Client node's new time offset is: " + clientTimeOffset);
           return clientTimeOffset >= TEST_OFFSET;
         }
         public String description() {
           return "Waiting for cacheTimeOffset to be non-zero.  PingOp should have set it to something";
         }
       };
-      waitForCriterion(wc, 60000, 1000, true);
+      Wait.waitForCriterion(wc, 60000, 1000, true);
     } finally {
       cache.close();
       vm1.invoke(CacheTestCase.class, "disconnectFromDS");
@@ -135,27 +140,27 @@ public class ClientServerTimeSyncDUnitTest extends CacheTestCase {
         public Object call() {
           Cache cache = getCache();
           cache.createRegionFactory(RegionShortcut.REPLICATE).create(regionName);
-          getLogWriter().info("Done creating region, now creating CacheServer");
+          LogWriterUtils.getLogWriter().info("Done creating region, now creating CacheServer");
           CacheServer server = null;
           try {
             server = cache.addCacheServer();
             server.setPort(AvailablePortHelper.getRandomAvailableTCPPort());
             server.start();
           } catch (IOException e) {
-            fail("Starting cache server failed.", e);
+            Assert.fail("Starting cache server failed.", e);
           }
   
           // now set an artificial time offset for the test
           system.getClock().setCacheTimeOffset(null, -TEST_OFFSET, true);
           
-          getLogWriter().info("Done creating and starting CacheServer on port " + server.getPort());
+          LogWriterUtils.getLogWriter().info("Done creating and starting CacheServer on port " + server.getPort());
           return server.getPort();
         }
       });
       
-      pause((int)TEST_OFFSET);  // let cacheTimeMillis consume the time offset
+      Wait.pause((int)TEST_OFFSET);  // let cacheTimeMillis consume the time offset
       
-      final String hostName = getServerHostName(vm0.getHost());
+      final String hostName = NetworkUtils.getServerHostName(vm0.getHost());
   
       // Start client with proxy region and register interest
         
@@ -177,7 +182,7 @@ public class ClientServerTimeSyncDUnitTest extends CacheTestCase {
       WaitCriterion wc = new WaitCriterion() {
         public boolean done() {
           long clientTimeOffset = clock.getCacheTimeOffset();
-          getLogWriter().info("Client node's new time offset is: " + clientTimeOffset);
+          LogWriterUtils.getLogWriter().info("Client node's new time offset is: " + clientTimeOffset);
           if (clientTimeOffset >= 0) {
             return false;
           }
@@ -188,7 +193,7 @@ public class ClientServerTimeSyncDUnitTest extends CacheTestCase {
           return "Waiting for cacheTimeOffset to be negative and cacheTimeMillis to stabilize";
         }
       };
-      waitForCriterion(wc, 60000, 1000, true);
+      Wait.waitForCriterion(wc, 60000, 1000, true);
     } finally {
       cache.close();
       vm1.invoke(CacheTestCase.class, "disconnectFromDS");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/ConnectionPoolAndLoaderDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/ConnectionPoolAndLoaderDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/ConnectionPoolAndLoaderDUnitTest.java
index 677ab14..ee382ab 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/ConnectionPoolAndLoaderDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/ConnectionPoolAndLoaderDUnitTest.java
@@ -28,6 +28,7 @@ import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -51,7 +52,8 @@ public class ConnectionPoolAndLoaderDUnitTest  extends CacheTestCase {
     super.setUp();
   }
   
-  public void tearDown2() {
+  @Override
+  protected final void preTearDownCacheTestCase() {
     //TODO grid. This is a hack. The next dunit test to run after
     //this one is the ConnectionPoolAutoDUnit test. That ends up calling
     //getSystem() with no arguments and expects to get a system without
@@ -92,7 +94,7 @@ public class ConnectionPoolAndLoaderDUnitTest  extends CacheTestCase {
       public Object call() {
         Cache cache = getCache();
         PoolFactory factory = PoolManager.createFactory();
-        factory.addServer(getServerHostName(host), serverPort);
+        factory.addServer(NetworkUtils.getServerHostName(host), serverPort);
         factory.create("pool1");
         
         AttributesFactory af = new AttributesFactory();
@@ -171,7 +173,7 @@ public class ConnectionPoolAndLoaderDUnitTest  extends CacheTestCase {
       public Object call() {
         Cache cache = getCache();
         PoolFactory factory = PoolManager.createFactory();
-        factory.addServer(getServerHostName(host), serverPort);
+        factory.addServer(NetworkUtils.getServerHostName(host), serverPort);
         factory.create("pool1");
         
         AttributesFactory af = new AttributesFactory();
@@ -281,7 +283,7 @@ public class ConnectionPoolAndLoaderDUnitTest  extends CacheTestCase {
         Cache cache = getCache();
         useLocator = false;
         PoolFactory factory = PoolManager.createFactory();
-        factory.addServer(getServerHostName(host), serverPort);
+        factory.addServer(NetworkUtils.getServerHostName(host), serverPort);
         factory.create("pool1");
         AttributesFactory af = new AttributesFactory();
         af.setDataPolicy(DataPolicy.NORMAL);
@@ -303,7 +305,7 @@ public class ConnectionPoolAndLoaderDUnitTest  extends CacheTestCase {
         Cache cache = getCache();
         useLocator = false;
         PoolFactory factory = PoolManager.createFactory();
-        factory.addServer(getServerHostName(host), serverPort);
+        factory.addServer(NetworkUtils.getServerHostName(host), serverPort);
         factory.create("pool1");
         AttributesFactory af = new AttributesFactory();
         af.setDataPolicy(DataPolicy.NORMAL);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/ClientServerRegisterInterestsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/ClientServerRegisterInterestsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/ClientServerRegisterInterestsDUnitTest.java
index 89c4e58..58844b0 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/ClientServerRegisterInterestsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/ClientServerRegisterInterestsDUnitTest.java
@@ -35,6 +35,7 @@ import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -68,12 +69,11 @@ public class ClientServerRegisterInterestsDUnitTest extends DistributedTestCase
     super.setUp();
     disconnectAllFromDS();
     setupGemFireCacheServer();
-    addExpectedException("java.net.ConnectException");
+    IgnoredException.addIgnoredException("java.net.ConnectException");
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void preTearDown() throws Exception {
     serverPort.set(0);
     entryEvents.clear();
     gemfireServerVm.invoke(new SerializableRunnable() {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/AutoConnectionSourceDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/AutoConnectionSourceDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/AutoConnectionSourceDUnitTest.java
index 2d5f8fe..b0f3b59 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/AutoConnectionSourceDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/AutoConnectionSourceDUnitTest.java
@@ -37,9 +37,12 @@ import com.gemstone.gemfire.management.membership.ClientMembership;
 import com.gemstone.gemfire.management.membership.ClientMembershipEvent;
 import com.gemstone.gemfire.management.membership.ClientMembershipListenerAdapter;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Tests cases that are particular for the auto connection source
@@ -55,7 +58,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
   
   public void setUp() throws Exception {
     super.setUp();
-    addExpectedException("NoAvailableLocatorsException");
+    IgnoredException.addIgnoredException("NoAvailableLocatorsException");
   }
 
   public AutoConnectionSourceDUnitTest(String name) {
@@ -71,11 +74,11 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     startLocatorInVM(vm0, locatorPort, "");
     
-    String locators = getServerHostName(vm0.getHost())+ "[" + locatorPort + "]";
+    String locators = NetworkUtils.getServerHostName(vm0.getHost())+ "[" + locatorPort + "]";
     
     startBridgeServerInVM(vm1, null, locators);
 
-    startBridgeClientInVM(vm2, null, getServerHostName(vm0.getHost()), locatorPort);
+    startBridgeClientInVM(vm2, null, NetworkUtils.getServerHostName(vm0.getHost()), locatorPort);
 
     putAndWaitForSuccess(vm2, REGION_NAME, "key", "value");
     
@@ -88,7 +91,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     VM vm0 = host.getVM(0);
     
     try {
-      startBridgeClientInVM(vm0, null, getServerHostName(vm0.getHost()), AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET));
+      startBridgeClientInVM(vm0, null, NetworkUtils.getServerHostName(vm0.getHost()), AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET));
       putInVM(vm0, "key", "value");
       fail("Client cache should not have been able to start");
     } catch(Exception e) {
@@ -104,7 +107,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     startLocatorInVM(vm0, locatorPort, "");
     try { 
-      startBridgeClientInVM(vm1, null, getServerHostName(vm0.getHost()), locatorPort);
+      startBridgeClientInVM(vm1, null, NetworkUtils.getServerHostName(vm0.getHost()), locatorPort);
       putInVM(vm0, "key", "value");
       fail("Client cache should not have been able to start");
     } catch(Exception e) {
@@ -122,11 +125,11 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     startLocatorInVM(vm0, locatorPort, "");
     
-    String locators = getServerHostName(vm0.getHost()) + "[" + locatorPort + "]";
+    String locators = NetworkUtils.getServerHostName(vm0.getHost()) + "[" + locatorPort + "]";
     
     startBridgeServerInVM(vm1, null, locators);
     
-    startBridgeClientInVM(vm2, null, getServerHostName(vm0.getHost()), locatorPort);
+    startBridgeClientInVM(vm2, null, NetworkUtils.getServerHostName(vm0.getHost()), locatorPort);
     
     putAndWaitForSuccess(vm2, REGION_NAME, "key", "value");
     
@@ -141,7 +144,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
   
   public void testDynamicallyFindLocators() throws Exception {
     final Host host = Host.getHost(0);
-    final String hostName = getServerHostName(host);
+    final String hostName = NetworkUtils.getServerHostName(host);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
@@ -156,7 +159,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     startLocatorInVM(vm0, locatorPort0, locators);
     
     startLocatorInVM(vm1, locatorPort1, locators);
-    startBridgeClientInVM(vm2, null, getServerHostName(vm0.getHost()), locatorPort0);
+    startBridgeClientInVM(vm2, null, NetworkUtils.getServerHostName(vm0.getHost()), locatorPort0);
     
     InetSocketAddress locatorToWaitFor= new InetSocketAddress(hostName, locatorPort1);
     waitForLocatorDiscovery(vm2, locatorToWaitFor);
@@ -187,11 +190,11 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     
     int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     
-    String locators = getServerHostName(vm0.getHost()) + "[" + locatorPort + "]";
+    String locators = NetworkUtils.getServerHostName(vm0.getHost()) + "[" + locatorPort + "]";
     
     startBridgeServerWithEmbeddedLocator(vm0, null, locators, new String[] {REGION_NAME}, CacheServer.DEFAULT_LOAD_PROBE);
     
-    startBridgeClientInVM(vm2, null, getServerHostName(vm0.getHost()), locatorPort);
+    startBridgeClientInVM(vm2, null, NetworkUtils.getServerHostName(vm0.getHost()), locatorPort);
     
     putAndWaitForSuccess(vm2, REGION_NAME, "key", "value");
     
@@ -223,13 +226,13 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     startLocatorInVM(vm0, locatorPort, "");
     
-    String locators = getServerHostName(vm0.getHost()) + "[" + locatorPort + "]";
+    String locators = NetworkUtils.getServerHostName(vm0.getHost()) + "[" + locatorPort + "]";
     
     startBridgeServerInVM(vm1, new String[] {"group1", "group2"} , locators, new String[] {"A", "B"});
     startBridgeServerInVM(vm2, new String[] {"group2", "group3"}, locators, new String[] {"B", "C"});
 
     
-    startBridgeClientInVM(vm3, "group1", getServerHostName(vm0.getHost()), locatorPort, new String [] {"A", "B", "C"});
+    startBridgeClientInVM(vm3, "group1", NetworkUtils.getServerHostName(vm0.getHost()), locatorPort, new String [] {"A", "B", "C"});
     putAndWaitForSuccess(vm3, "A", "key", "value");
     Assert.assertEquals("value", getInVM(vm1, "A", "key"));
     try {
@@ -239,7 +242,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     
     stopBridgeMemberVM(vm3);
     
-    startBridgeClientInVM(vm3, "group3", getServerHostName(vm0.getHost()), locatorPort, new String [] {"A", "B", "C"});
+    startBridgeClientInVM(vm3, "group3", NetworkUtils.getServerHostName(vm0.getHost()), locatorPort, new String [] {"A", "B", "C"});
     try {
       putInVM(vm3, "A", "key3", "value");
       fail("Should not have been able to find Region A on the server");
@@ -249,7 +252,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     
     stopBridgeMemberVM(vm3);
     
-    startBridgeClientInVM(vm3, "group2", getServerHostName(vm0.getHost()), locatorPort, new String [] {"A", "B", "C"});
+    startBridgeClientInVM(vm3, "group2", NetworkUtils.getServerHostName(vm0.getHost()), locatorPort, new String [] {"A", "B", "C"});
     putInVM(vm3, "B", "key5", "value");
     Assert.assertEquals("value", getInVM(vm1, "B", "key5"));
     Assert.assertEquals("value", getInVM(vm2, "B", "key5"));
@@ -275,18 +278,18 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     
     startLocatorInVM(vm0, locatorPort, "");
     
-    final String locators = getServerHostName(vm0.getHost()) + "[" + locatorPort + "]";
+    final String locators = NetworkUtils.getServerHostName(vm0.getHost()) + "[" + locatorPort + "]";
     
     final int serverPort1 =startBridgeServerInVM(vm1, new String[] {"group1"}, locators);
     final int serverPort2 =addCacheServerInVM(vm1, new String[] {"group2"});
     
-    startBridgeClientInVM(vm2, "group2", getServerHostName(vm0.getHost()), locatorPort);
+    startBridgeClientInVM(vm2, "group2", NetworkUtils.getServerHostName(vm0.getHost()), locatorPort);
     
     checkEndpoints(vm2, new int[] {serverPort2});
     
     stopBridgeMemberVM(vm2);
 
-    startBridgeClientInVM(vm2, "group1", getServerHostName(vm0.getHost()), locatorPort);
+    startBridgeClientInVM(vm2, "group1", NetworkUtils.getServerHostName(vm0.getHost()), locatorPort);
     
     checkEndpoints(vm2, new int[] {serverPort1});
   }
@@ -299,7 +302,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
     VM clientVM = host.getVM(3);
     int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     startLocatorInVM(locatorVM, locatorPort, "");
-    String locators = getServerHostName(locatorVM.getHost()) + "[" + locatorPort + "]";
+    String locators = NetworkUtils.getServerHostName(locatorVM.getHost()) + "[" + locatorPort + "]";
 
     //start a bridge server with a listener
     addBridgeListener(bridge1VM);
@@ -307,7 +310,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
 
     //start a bridge client with a listener
     addBridgeListener(clientVM);
-    startBridgeClientInVM(clientVM, null, getServerHostName(locatorVM.getHost()), locatorPort);
+    startBridgeClientInVM(clientVM, null, NetworkUtils.getServerHostName(locatorVM.getHost()), locatorPort);
     // wait for client to connect
     checkEndpoints(clientVM, new int[] {serverPort1});
     
@@ -401,7 +404,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
         if(remaining <= 0) {
           throw e;
         }
-        pause(100);
+        Wait.pause(100);
         remaining = endTime - System.currentTimeMillis();
       }
     }
@@ -450,7 +453,7 @@ public class AutoConnectionSourceDUnitTest extends LocatorTestBase {
           if (expectedEndpointPorts.size() == actualEndpointPorts.size()) {
             break;
           } else {
-            pause(100);
+            Wait.pause(100);
           }
         } while(retryCount-- > 0);
         Assert.assertEquals(expectedEndpointPorts, actualEndpointPorts);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/CacheServerSSLConnectionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/CacheServerSSLConnectionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/CacheServerSSLConnectionDUnitTest.java
index 3150edd..9bdf56c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/CacheServerSSLConnectionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/client/internal/CacheServerSSLConnectionDUnitTest.java
@@ -34,6 +34,7 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.security.AuthenticationRequiredException;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.util.test.TestUtil;
@@ -294,8 +295,8 @@ public class CacheServerSSLConnectionDUnitTest extends DistributedTestCase {
     params[3] = cacheClientSslRequireAuth;
     params[4] = TRUSTED_STORE;
     params[5] = TRUSTED_STORE;
-    ExpectedException expect = addExpectedException("javax.net.ssl.SSLException", serverVM);
-    ExpectedException expect2 = addExpectedException("IOException", serverVM);
+    IgnoredException expect = IgnoredException.addIgnoredException("javax.net.ssl.SSLException", serverVM);
+    IgnoredException expect2 = IgnoredException.addIgnoredException("IOException", serverVM);
     try{
       //getLogWriter().info("Starting client with server endpoint " + hostName + ":" + port);    
       clientVM.invoke(CacheServerSSLConnectionDUnitTest.class, "setUpClientVMTaskNoSubscription", params);
@@ -384,7 +385,7 @@ public class CacheServerSSLConnectionDUnitTest extends DistributedTestCase {
     params[3] = cacheClientSslRequireAuth;
     params[4] = TRUSTED_STORE;
     params[5] = TRUSTED_STORE;
-    ExpectedException expect = addExpectedException("javax.net.ssl.SSLHandshakeException", serverVM);
+    IgnoredException expect = IgnoredException.addIgnoredException("javax.net.ssl.SSLHandshakeException", serverVM);
     try{
       //getLogWriter().info("Starting client with server endpoint " + hostName + ":" + port);    
       clientVM.invoke(CacheServerSSLConnectionDUnitTest.class, "setUpClientVMTask", params);
@@ -411,15 +412,13 @@ public class CacheServerSSLConnectionDUnitTest extends DistributedTestCase {
     }
   }
   
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     final Host host = Host.getHost(0);
     VM serverVM = host.getVM(1);
     VM clientVM = host.getVM(2);
     clientVM.invoke(CacheServerSSLConnectionDUnitTest.class, "closeClientCacheTask");
     serverVM.invoke(CacheServerSSLConnectionDUnitTest.class, "closeCacheTask");
-    super.tearDown2();
   }
-
 }
 


[59/62] [abbrv] incubator-geode git commit: Merge branch 'develop' into feature/GEODE-17

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientCQPostAuthorizationDUnitTest.java
----------------------------------------------------------------------
diff --cc gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientCQPostAuthorizationDUnitTest.java
index 0000000,e17acc0..3961210
mode 000000,100644..100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientCQPostAuthorizationDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/security/ClientCQPostAuthorizationDUnitTest.java
@@@ -1,0 -1,525 +1,528 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package com.gemstone.gemfire.security;
+ 
+ import java.util.Collection;
+ import java.util.HashMap;
+ import java.util.Map;
+ import java.util.Properties;
+ import java.util.Random;
+ 
+ import security.AuthzCredentialGenerator;
+ import security.CredentialGenerator;
+ 
+ import com.gemstone.gemfire.cache.Region;
+ import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
+ import com.gemstone.gemfire.cache.query.CqAttributes;
+ import com.gemstone.gemfire.cache.query.CqAttributesFactory;
+ import com.gemstone.gemfire.cache.query.CqException;
+ import com.gemstone.gemfire.cache.query.CqListener;
+ import com.gemstone.gemfire.cache.query.CqQuery;
+ import com.gemstone.gemfire.cache.query.QueryService;
+ import com.gemstone.gemfire.cache.query.SelectResults;
+ import com.gemstone.gemfire.cache.query.cq.dunit.CqQueryTestListener;
+ import com.gemstone.gemfire.cache.query.internal.cq.ClientCQImpl;
+ import com.gemstone.gemfire.cache.query.internal.cq.CqService;
+ import com.gemstone.gemfire.cache.query.internal.cq.InternalCqQuery;
+ import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
+ import com.gemstone.gemfire.internal.AvailablePort;
+ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+ import com.gemstone.gemfire.internal.logging.InternalLogWriter;
+ import com.gemstone.gemfire.test.dunit.Host;
+ import com.gemstone.gemfire.test.dunit.Invoke;
+ import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+ import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+ import com.gemstone.gemfire.test.dunit.Wait;
+ import com.gemstone.gemfire.test.dunit.WaitCriterion;
++
++import static java.rmi.activation.ActivationGroup.getSystem;
++
+ /**
+  * This is for multiuser-authentication
+  * 
+  * @author ashetkar
+  *
+  */
+ public class ClientCQPostAuthorizationDUnitTest extends
+     ClientAuthorizationTestBase {
+ 
+ //  public static final String regionName = "ClientCQPostAuthorizationDUnitTest_region";
+ 
+   public static final Map<String, String> cqNameToQueryStrings = new HashMap<String, String>();
+ 
+   static {
+     cqNameToQueryStrings.put("CQ_0", "SELECT * FROM ");
+     cqNameToQueryStrings.put("CQ_1", "SELECT * FROM ");
+   }
+ 
+   public ClientCQPostAuthorizationDUnitTest(String name) {
+     super(name);
+   }
+ 
+   public void setUp() throws Exception {
+ 
+     super.setUp();
+     getSystem();
+     Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
+       public void run() {
+         getSystem();
+       }
+     });
+ 
+     final Host host = Host.getHost(0);
+     server1 = host.getVM(0);
+     server2 = host.getVM(1);
+     client1 = host.getVM(2);
+     client2 = host.getVM(3);
+ 
+     server1.invoke(SecurityTestUtil.class, "registerExpectedExceptions",
+         new Object[] { serverExpectedExceptions });
+     server2.invoke(SecurityTestUtil.class, "registerExpectedExceptions",
+         new Object[] { serverExpectedExceptions });
+     client2.invoke(SecurityTestUtil.class, "registerExpectedExceptions",
+         new Object[] { clientExpectedExceptions });
+     SecurityTestUtil.registerExpectedExceptions(clientExpectedExceptions);
+   }
+ 
+   @Override
+   protected final void preTearDown() throws Exception {
+     client1.invoke(SecurityTestUtil.class, "closeCache");
+     client2.invoke(SecurityTestUtil.class, "closeCache");
+     server1.invoke(SecurityTestUtil.class, "closeCache");
+     server2.invoke(SecurityTestUtil.class, "closeCache");
+   }
+ 
+   public void testAllowCQForAllMultiusers() throws Exception {
+     /*
+      * Start a server
+      * Start a client1 with two users with valid credentials and post-authz'ed for CQ
+      * Each user registers a unique CQ
+      * Client2 does some operations on the region which satisfies both the CQs
+      * Validate that listeners for both the CQs are invoked.
+      */
+     doStartUp(Integer.valueOf(2), Integer.valueOf(5), new Boolean[] {true,
+       true});
+   }
+ 
+   public void testDisallowCQForAllMultiusers() throws Exception {
+     /*
+      * Start a server
+      * Start a client1 with two users with valid credentials but not post-authz'ed for CQ
+      * Each user registers a unique CQ
+      * Client2 does some operations on the region which satisfies both the CQs
+      * Validate that listeners for none of the CQs are invoked.
+      */
+     doStartUp(Integer.valueOf(2), Integer.valueOf(5), new Boolean[] {false,
+       false});
+   }
+ 
+   public void testDisallowCQForSomeMultiusers() throws Exception {
+     /*
+      * Start a server
+      * Start a client1 with two users with valid credentials
+      * User1 is post-authz'ed for CQ but user2 is not.
+      * Each user registers a unique CQ
+      * Client2 does some operations on the region which satisfies both the CQs
+      * Validate that listener for User1's CQ is invoked but that for User2's CQ is not invoked.
+      */
+     doStartUp(Integer.valueOf(2), Integer.valueOf(5), new Boolean[] {true,
+         false});
+   }
+ 
+   public void testAllowCQForAllMultiusersWithFailover() throws Exception {
+     /*
+      * Start a server1
+      * Start a client1 with two users with valid credentials and post-authz'ed for CQ
+      * Each user registers a unique CQ
+      * Client2 does some operations on the region which satisfies both the CQs
+      * Validate that listeners for both the CQs are invoked.
+      * Start server2 and shutdown server1
+      * Client2 does some operations on the region which satisfies both the CQs
+      * Validate that listeners for both the CQs are get updates.
+      */
+     doStartUp(Integer.valueOf(2), Integer.valueOf(5), new Boolean[] {true,
+       true}, Boolean.TRUE);
+   }
+ 
+   public void doStartUp(Integer numOfUsers, Integer numOfPuts,
+       Boolean[] postAuthzAllowed) throws Exception {
+     doStartUp(numOfUsers, numOfPuts, postAuthzAllowed, Boolean.FALSE /* failover */);
+   }
+ 
+   public void doStartUp(Integer numOfUsers, Integer numOfPuts,
+       Boolean[] postAuthzAllowed, Boolean failover) throws Exception {
+       AuthzCredentialGenerator gen = this.getXmlAuthzGenerator();
+       CredentialGenerator cGen = gen.getCredentialGenerator();
+       Properties extraAuthProps = cGen.getSystemProperties();
+       Properties javaProps = cGen.getJavaProperties();
+       Properties extraAuthzProps = gen.getSystemProperties();
+       String authenticator = cGen.getAuthenticator();
+       String accessor = gen.getAuthorizationCallback();
+       String authInit = cGen.getAuthInit();
+       TestAuthzCredentialGenerator tgen = new TestAuthzCredentialGenerator(gen);
+ 
+       Properties serverProps = buildProperties(authenticator, accessor, true,
+           extraAuthProps, extraAuthzProps);
+ 
+       Properties opCredentials;
+       cGen = tgen.getCredentialGenerator();
+       Properties javaProps2 = null;
+       if (cGen != null) {
+         javaProps2 = cGen.getJavaProperties();
+       }
+ 
+       int[] indices = new int[numOfPuts];
+       for (int index = 0; index < numOfPuts; ++index) {
+         indices[index] = index;
+       }
+ 
+       Random rnd = new Random();
+       Properties[] authProps = new Properties[numOfUsers];
+       for (int i = 0; i < numOfUsers; i++) {
+         int rand = rnd.nextInt(100) + 1;
+         if (postAuthzAllowed[i]) {
+           opCredentials = tgen.getAllowedCredentials(new OperationCode[] {
+               OperationCode.EXECUTE_CQ, OperationCode.GET}, // For callback, GET should be allowed
+               new String[] {regionName}, indices, rand);
+ //          authProps[i] = gen.getAllowedCredentials(
+ //              new OperationCode[] {OperationCode.EXECUTE_CQ},
+ //              new String[] {regionName}, rnd.nextInt(100) + 1);
+         } else {
+           opCredentials = tgen.getDisallowedCredentials(new OperationCode[] {
+               OperationCode.GET}, // For callback, GET should be disallowed
+               new String[] {regionName}, indices, rand);
+ //          authProps[i] = gen.getDisallowedCredentials(
+ //              new OperationCode[] {OperationCode.EXECUTE_CQ},
+ //              new String[] {regionName}, rnd.nextInt(100) + 1);
+         }
+         authProps[i] = SecurityTestUtil.concatProperties(new Properties[] {
+             opCredentials, extraAuthProps, extraAuthzProps});
+       }
+ 
+       // Get ports for the servers
+       Integer port1 = Integer.valueOf(AvailablePort
+           .getRandomAvailablePort(AvailablePort.SOCKET));
+       Integer port2 = Integer.valueOf(AvailablePort
+           .getRandomAvailablePort(AvailablePort.SOCKET));
+       Integer locatorPort = Integer.valueOf(AvailablePort
+           .getRandomAvailablePort(AvailablePort.SOCKET));
+       // Close down any running servers
+       server1.invoke(SecurityTestUtil.class, "closeCache");
+       server2.invoke(SecurityTestUtil.class, "closeCache");
+ 
+       server1.invoke(ClientCQPostAuthorizationDUnitTest.class,
+           "createServerCache", new Object[] {serverProps, javaProps, locatorPort, port1});
+       client1.invoke(ClientCQPostAuthorizationDUnitTest.class,
+           "createClientCache", new Object[] {javaProps2, authInit, authProps,
+               new Integer[] {port1, port2}, numOfUsers, postAuthzAllowed});
+       client2.invoke(ClientCQPostAuthorizationDUnitTest.class,
+           "createClientCache", new Object[] {javaProps2, authInit, authProps,
+               new Integer[] {port1, port2}, numOfUsers, postAuthzAllowed});
+ 
+       client1.invoke(ClientCQPostAuthorizationDUnitTest.class, "createCQ",
+           new Object[] {numOfUsers});
+       client1.invoke(ClientCQPostAuthorizationDUnitTest.class, "executeCQ",
+           new Object[] {numOfUsers, new Boolean[] {false, false}, numOfPuts,
+               new String[numOfUsers], postAuthzAllowed});
+ 
+       client2.invoke(ClientCQPostAuthorizationDUnitTest.class, "doPuts",
+           new Object[] {numOfPuts, Boolean.TRUE/* put last key */});
+       if (!postAuthzAllowed[0]) {
+         // There is no point waiting as no user is authorized to receive cq events.
+         try {Thread.sleep(1000);} catch (InterruptedException ie) {}
+       } else {
+         client1.invoke(ClientCQPostAuthorizationDUnitTest.class,
+             "waitForLastKey", new Object[] {Integer.valueOf(0)});
+         if (postAuthzAllowed[1]) {
+           client1.invoke(ClientCQPostAuthorizationDUnitTest.class,
+               "waitForLastKey", new Object[] {Integer.valueOf(1)});
+         }
+       }
+       client1.invoke(ClientCQPostAuthorizationDUnitTest.class,
+           "checkCQListeners", new Object[] {numOfUsers, postAuthzAllowed,
+               numOfPuts + 1/* last key */, 0, !failover});
+       if (failover) {
+         server2.invoke(ClientCQPostAuthorizationDUnitTest.class,
+             "createServerCache", new Object[] {serverProps, javaProps, locatorPort, port2});
+         server1.invoke(SecurityTestUtil.class, "closeCache");
+ 
+         // Allow time for client1 to register its CQs on server2
+         server2.invoke(ClientCQPostAuthorizationDUnitTest.class,
+             "allowCQsToRegister", new Object[] {Integer.valueOf(2)});
+ 
+         client2.invoke(ClientCQPostAuthorizationDUnitTest.class, "doPuts",
+             new Object[] {numOfPuts, Boolean.TRUE/* put last key */});
+         client1.invoke(ClientCQPostAuthorizationDUnitTest.class,
+             "waitForLastKeyUpdate", new Object[] {Integer.valueOf(0)});
+         client1.invoke(ClientCQPostAuthorizationDUnitTest.class,
+             "checkCQListeners", new Object[] {numOfUsers, postAuthzAllowed,
+                 numOfPuts + 1/* last key */, numOfPuts + 1/* last key */,
+                 Boolean.TRUE});
+       }
+   }
+ 
+   public static void createServerCache(Properties serverProps,
+       Properties javaProps, Integer serverPort) {
+     Integer locatorPort = Integer.valueOf(AvailablePort
+         .getRandomAvailablePort(AvailablePort.SOCKET));
+     SecurityTestUtil.createCacheServer((Properties)serverProps, javaProps,
+         locatorPort, null, serverPort, Boolean.TRUE, Integer.valueOf(
+             SecurityTestUtil.NO_EXCEPTION));
+   }
+ 
+   public static void createServerCache(Properties serverProps,
+         Properties javaProps, Integer locatorPort, Integer serverPort) {
+     SecurityTestUtil.createCacheServer((Properties)serverProps, javaProps,
+         locatorPort, null, serverPort, Boolean.TRUE, Integer.valueOf(
+             SecurityTestUtil.NO_EXCEPTION));
+   }
+ 
+   public static void createClientCache(Properties javaProps, String authInit,
+       Properties[] authProps, Integer ports[], Integer numOfUsers,
+       Boolean[] postAuthzAllowed) {
+     SecurityTestUtil.createCacheClientForMultiUserMode(numOfUsers, authInit,
+         authProps, javaProps, ports, null, Boolean.FALSE,
+         SecurityTestUtil.NO_EXCEPTION);
+   }
+ 
+   public static void createCQ(Integer num) {
+     for (int i = 0; i < num; i++) {
+       QueryService cqService = SecurityTestUtil.proxyCaches[i].getQueryService();
+       String cqName = "CQ_" + i;
+       String queryStr = cqNameToQueryStrings.get(cqName)
+           + SecurityTestUtil.proxyCaches[i].getRegion(regionName).getFullPath();
+       // Create CQ Attributes.
+       CqAttributesFactory cqf = new CqAttributesFactory();
+       CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
+       ((CqQueryTestListener)cqListeners[0]).cqName = cqName;
+ 
+       cqf.initCqListeners(cqListeners);
+       CqAttributes cqa = cqf.create();
+ 
+       // Create CQ.
+       try {
+         CqQuery cq1 = cqService.newCq(cqName, queryStr, cqa);
+         assertTrue("newCq() state mismatch", cq1.getState().isStopped());
+       } catch (Exception ex) {
+         AssertionError err = new AssertionError("Failed to create CQ " + cqName
+             + " . ");
+         err.initCause(ex);
+         LogWriterUtils.getLogWriter().info("CqService is :" + cqService, err);
+         throw err;
+       }
+     }
+   }
+ 
+   public static void executeCQ(Integer num, Boolean[] initialResults,
+       Integer expectedResultsSize, String[] expectedErr, Boolean[] postAuthzAllowed) {
+     InternalLogWriter logWriter = InternalDistributedSystem.getStaticInternalLogWriter();
+     for (int i = 0; i < num; i++) {
+       try {
+         if (expectedErr[i] != null) {
+           logWriter.info(
+               "<ExpectedException action=add>" + expectedErr[i]
+                   + "</ExpectedException>");
+         }
+         CqQuery cq1 = null;
+         String cqName = "CQ_" + i;
+         String queryStr = cqNameToQueryStrings.get(cqName)
+             + SecurityTestUtil.proxyCaches[i].getRegion(regionName)
+                 .getFullPath();
+         QueryService cqService = SecurityTestUtil.proxyCaches[i]
+             .getQueryService();
+ 
+         // Get CqQuery object.
+         try {
+           cq1 = cqService.getCq(cqName);
+           if (cq1 == null) {
+             LogWriterUtils.getLogWriter().info(
+                 "Failed to get CqQuery object for CQ name: " + cqName);
+             fail("Failed to get CQ " + cqName);
+           } else {
+             LogWriterUtils.getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
+             assertTrue("newCq() state mismatch", cq1.getState().isStopped());
+           }
+         } catch (Exception ex) {
+           LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
+           LogWriterUtils.getLogWriter().error(ex);
+           AssertionError err = new AssertionError("Failed to execute CQ "
+               + cqName);
+           err.initCause(ex);
+           throw err;
+         }
+ 
+         if (initialResults[i]) {
+           SelectResults cqResults = null;
+ 
+           try {
+             cqResults = cq1.executeWithInitialResults();
+           } catch (CqException ce) {
+             if (ce.getCause() instanceof NotAuthorizedException && !postAuthzAllowed[i]) {
+               LogWriterUtils.getLogWriter().info("Got expected exception for CQ " + cqName);
+             } else {
+               LogWriterUtils.getLogWriter().info("CqService is: " + cqService);
+               ce.printStackTrace();
+               AssertionError err = new AssertionError("Failed to execute CQ "
+                   + cqName);
+               err.initCause(ce);
+               throw err;
+             }
+           } catch (Exception ex) {
+             LogWriterUtils.getLogWriter().info("CqService is: " + cqService);
+             ex.printStackTrace();
+             AssertionError err = new AssertionError("Failed to execute CQ "
+                 + cqName);
+             err.initCause(ex);
+             throw err;
+           }
+           LogWriterUtils.getLogWriter().info("initial result size = " + cqResults.size());
+           assertTrue("executeWithInitialResults() state mismatch", cq1
+               .getState().isRunning());
+           if (expectedResultsSize >= 0) {
+             assertEquals("unexpected results size", expectedResultsSize
+                 .intValue(), cqResults.size());
+           }
+         } else {
+           try {
+             cq1.execute();
+           } catch (CqException ce) {
+             if (ce.getCause() instanceof NotAuthorizedException && !postAuthzAllowed[i]) {
+               LogWriterUtils.getLogWriter().info("Got expected exception for CQ " + cqName);
+             } else {
+               LogWriterUtils.getLogWriter().info("CqService is: " + cqService);
+               ce.printStackTrace();
+               AssertionError err = new AssertionError("Failed to execute CQ "
+                   + cqName);
+               err.initCause(ce);
+               throw err;
+             }
+           } catch (Exception ex) {
+             AssertionError err = new AssertionError("Failed to execute CQ "
+                 + cqName);
+             err.initCause(ex);
+             if (expectedErr == null) {
+               LogWriterUtils.getLogWriter().info("CqService is: " + cqService, err);
+             }
+             throw err;
+           }
+           assertTrue("execute() state mismatch", cq1.getState().isRunning() == postAuthzAllowed[i]);
+         }
+       } finally {
+         if (expectedErr[i] != null) {
+           logWriter.info(
+               "<ExpectedException action=remove>" + expectedErr[i]
+                   + "</ExpectedException>");
+         }
+       }
+     }
+   }
+ 
+   public static void doPuts(Integer num, Boolean putLastKey) {
+ //    Region region = GemFireCache.getInstance().getRegion(regionName);
+     Region region = SecurityTestUtil.proxyCaches[0].getRegion(regionName);
+     for (int i = 0; i < num; i++) {
+       region.put("CQ_key"+i, "CQ_value"+i);
+     }
+     if (putLastKey) {
+       region.put("LAST_KEY", "LAST_KEY");
+     }
+   }
+ 
+   public static void putLastKey() {
+     Region region = GemFireCacheImpl.getInstance().getRegion(regionName);
+     region.put("LAST_KEY", "LAST_KEY");
+   }
+ 
+   public static void waitForLastKey(Integer cqIndex) {
+     String cqName = "CQ_" + cqIndex;
+     QueryService qService = SecurityTestUtil.proxyCaches[cqIndex].getQueryService();
+     ClientCQImpl cqQuery = (ClientCQImpl)qService.getCq(cqName);
+     ((CqQueryTestListener)cqQuery.getCqListeners()[0])
+         .waitForCreated("LAST_KEY");
+ //    WaitCriterion wc = new WaitCriterion() {
+ //      public boolean done() {
+ //        Region region = GemFireCache.getInstance().getRegion(regionName);
+ //        Region.Entry entry = region.getEntry("LAST_KEY");
+ //        if (entry != null && entry.getValue() != null) {
+ //          return false;
+ //        } else if (entry.getValue() != null) {
+ //          return true;
+ //        }
+ //        return false;
+ //      }
+ //      public String description() {
+ //        return "Last key not received.";
+ //      }
+ //    };
+ //    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 100, false);
+   }
+ 
+   public static void waitForLastKeyUpdate(Integer cqIndex) {
+     String cqName = "CQ_" + cqIndex;
+     QueryService qService = SecurityTestUtil.proxyCaches[cqIndex].getQueryService();
+     ClientCQImpl cqQuery = (ClientCQImpl)qService.getCq(cqName);
+     ((CqQueryTestListener)cqQuery.getCqListeners()[0])
+         .waitForUpdated("LAST_KEY");
+   }
+ 
+   public static void allowCQsToRegister(Integer number) {
+     final int num = number;
+     WaitCriterion wc = new WaitCriterion() {
+       public boolean done() {
+         CqService cqService = GemFireCacheImpl.getInstance().getCqService();
+         cqService.start();
+         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
+         if (cqs != null) {
+           return cqs.size() >= num;
+         } else {
+           return false;
+         }
+       }
+ 
+       public String description() {
+         return num + "Waited for " + num
+             + " CQs to be registered on this server.";
+       }
+     };
+     Wait.waitForCriterion(wc, 60 * 1000, 100, false);
+   }
+ 
+   public static void checkCQListeners(Integer numOfUsers,
+       Boolean[] expectedListenerInvocation, Integer createEventsSize,
+       Integer updateEventsSize, Boolean closeCache) {
+     for (int i = 0; i < numOfUsers; i++) {
+       String cqName = "CQ_" + i;
+       QueryService qService = SecurityTestUtil.proxyCaches[i].getQueryService();
+       ClientCQImpl cqQuery = (ClientCQImpl)qService.getCq(cqName);
+       if (expectedListenerInvocation[i]) {
+         for (CqListener listener : cqQuery.getCqListeners()) {
+           assertEquals(createEventsSize.intValue(),
+               ((CqQueryTestListener)listener).getCreateEventCount());
+           assertEquals(updateEventsSize.intValue(),
+               ((CqQueryTestListener)listener).getUpdateEventCount());
+         }
+       } else {
+         for (CqListener listener : cqQuery.getCqListeners()) {
+           assertEquals(0, ((CqQueryTestListener)listener).getTotalEventCount());
+         }
+       }
+       if (closeCache) {
+         SecurityTestUtil.proxyCaches[i].close();
+       }
+     }
+   }
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionReadPathDUnitTest.java
----------------------------------------------------------------------
diff --cc gemfire-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionReadPathDUnitTest.java
index 4ba1fc7,f24b136..48aee7b
--- a/gemfire-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionReadPathDUnitTest.java
+++ b/gemfire-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/distributed/LuceneFunctionReadPathDUnitTest.java
@@@ -45,12 -45,11 +45,13 @@@ import com.gemstone.gemfire.cache.lucen
  import com.gemstone.gemfire.cache30.CacheTestCase;
  import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
  import com.gemstone.gemfire.internal.cache.PartitionedRegion;
+ import com.gemstone.gemfire.test.dunit.Host;
+ import com.gemstone.gemfire.test.dunit.SerializableCallable;
+ import com.gemstone.gemfire.test.dunit.VM;
  import com.gemstone.gemfire.test.junit.categories.DistributedTest;
  
- import dunit.Host;
- import dunit.SerializableCallable;
- import dunit.VM;
++import static org.apache.logging.log4j.core.util.ExtensionLanguageMapping.VM;
 +
  @Category(DistributedTest.class)
  public class LuceneFunctionReadPathDUnitTest extends CacheTestCase {
    private static final String INDEX_NAME = "index";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-web-api/build.gradle
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-web-api/src/test/java/com/gemstone/gemfire/rest/internal/web/controllers/RestAPIsAndInterOpISDUnitTest.java
----------------------------------------------------------------------
diff --cc gemfire-web-api/src/test/java/com/gemstone/gemfire/rest/internal/web/controllers/RestAPIsAndInterOpISDUnitTest.java
index 460f81a,0000000..98362ef
mode 100644,000000..100644
--- a/gemfire-web-api/src/test/java/com/gemstone/gemfire/rest/internal/web/controllers/RestAPIsAndInterOpISDUnitTest.java
+++ b/gemfire-web-api/src/test/java/com/gemstone/gemfire/rest/internal/web/controllers/RestAPIsAndInterOpISDUnitTest.java
@@@ -1,996 -1,0 +1,996 @@@
 +package com.gemstone.gemfire.rest.internal.web.controllers;
 +
 +import java.io.BufferedReader;
 +import java.io.IOException;
 +import java.io.InputStream;
 +import java.io.InputStreamReader;
 +import java.util.Arrays;
 +import java.util.Calendar;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Properties;
 +
 +import com.gemstone.gemfire.cache.server.CacheServer;
++import com.gemstone.gemfire.test.dunit.Host;
++import com.gemstone.gemfire.test.dunit.NetworkUtils;
++import com.gemstone.gemfire.test.dunit.SerializableCallable;
++import com.gemstone.gemfire.test.dunit.SerializableRunnable;
++import com.gemstone.gemfire.test.dunit.VM;
 +import org.apache.http.HttpEntity;
 +import org.apache.http.client.ClientProtocolException;
 +import org.apache.http.client.methods.CloseableHttpResponse;
 +import org.apache.http.client.methods.HttpDelete;
 +import org.apache.http.client.methods.HttpGet;
 +import org.apache.http.client.methods.HttpPost;
 +import org.apache.http.client.methods.HttpPut;
 +import org.apache.http.entity.StringEntity;
 +import org.apache.http.impl.client.CloseableHttpClient;
 +import org.apache.http.impl.client.HttpClients;
 +//import com.gemstone.gemfire.rest.internal.web.util.DateTimeUtils;
 +import org.json.JSONArray;
 +import org.json.JSONException;
 +import org.json.JSONObject;
 +
 +import util.TestException;
 +
 +import com.gemstone.gemfire.cache.AttributesFactory;
 +import com.gemstone.gemfire.cache.Cache;
 +import com.gemstone.gemfire.cache.CacheFactory;
 +import com.gemstone.gemfire.cache.DataPolicy;
 +import com.gemstone.gemfire.cache.Region;
 +import com.gemstone.gemfire.cache.RegionAttributes;
 +import com.gemstone.gemfire.cache.RegionFactory;
 +import com.gemstone.gemfire.cache.RegionShortcut;
 +import com.gemstone.gemfire.cache.client.ClientCache;
 +import com.gemstone.gemfire.cache.client.ClientCacheFactory;
 +import com.gemstone.gemfire.cache.client.ClientRegionFactory;
 +import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
 +import com.gemstone.gemfire.cache.client.internal.LocatorTestBase;
 +import com.gemstone.gemfire.cache.server.ServerLoadProbe;
 +import com.gemstone.gemfire.distributed.DistributedSystem;
 +import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 +import com.gemstone.gemfire.internal.AvailablePort;
 +import com.gemstone.gemfire.internal.AvailablePortHelper;
 +import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 +import com.gemstone.gemfire.management.ManagementTestBase;
 +import com.gemstone.gemfire.management.internal.security.CommandTestBase;
 +import com.gemstone.gemfire.pdx.PdxInstance;
 +
- import dunit.Host;
- import dunit.SerializableCallable;
- import dunit.SerializableRunnable;
- import dunit.VM;
- 
 +/**
 + * Dunit Test containing inter - operations between REST Client and Gemfire cache client
 + * @author Nilkanth Patel
 + * @since 8.0
 + */
 +
 +public class RestAPIsAndInterOpISDUnitTest extends LocatorTestBase {
 +  
 +  private static final long serialVersionUID = -254776154266339226L;
 +
 +  private ManagementTestBase helper;
 +
 +  public static final String PEOPLE_REGION_NAME = "People";
 +
 +  //private static RestTemplate restTemplate;
 +
 +  private static final String findAllPeopleQuery = "/queries?id=findAllPeople&q=SELECT%20*%20FROM%20/People";
 +  private static final String findPeopleByGenderQuery = "/queries?id=filterByGender&q=SELECT%20*%20from%20/People%20where%20gender=$1";
 +  private static final String findPeopleByLastNameQuery = "/queries?id=filterByLastName&q=SELECT%20*%20from%20/People%20where%20lastName=$1";
 +
 +  private static final String[] PARAM_QUERY_IDS_ARRAY = { "findAllPeople",
 +      "filterByGender", "filterByLastName" };
 +  
 +  final static String QUERY_ARGS = "["
 +      + "{"
 +      + "\"@type\": \"string\","
 +      + "\"@value\": \"Patel\""
 +      + "}"
 +      + "]";
 +
 +  final static String PERSON_AS_JSON_CAS = "{"
 +      + "\"@old\" :" 
 +      + "{"
 +      + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 101," + " \"firstName\": \"Mithali\","
 +      + " \"middleName\": \"Dorai\"," + " \"lastName\": \"Raj\","
 +      + " \"birthDate\": \"12/04/1982\"," + "\"gender\": \"FEMALE\"" 
 +      + "},"
 +      + "\"@new\" :" 
 +      + "{"
 +      + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 1101," + " \"firstName\": \"Virat\","
 +      + " \"middleName\": \"Premkumar\"," + " \"lastName\": \"Kohli\","
 +      + " \"birthDate\": \"08/11/1988\"," + "\"gender\": \"MALE\"" 
 +      + "}"
 +      + "}";
 +    
 +  final static String PERSON_AS_JSON_REPLACE = "{"
 +      + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 501," + " \"firstName\": \"Barack\","
 +      + " \"middleName\": \"Hussein\"," + " \"lastName\": \"Obama\","
 +      + " \"birthDate\": \"04/08/1961\"," + "\"gender\": \"MALE\"" 
 +      + "}";
 +  
 +  private static final String PERSON_LIST_AS_JSON = "[" + "{"
 +      + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 3," + " \"firstName\": \"Nishka3\","
 +      + " \"middleName\": \"Nilkanth3\"," + " \"lastName\": \"Patel3\","
 +      + " \"birthDate\": \"07/31/2009\"," + "\"gender\": \"FEMALE\"" + "},"
 +      + "{" + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 4," + " \"firstName\": \"Tanay4\","
 +      + " \"middleName\": \"kiran4\"," + " \"lastName\": \"Patel4\","
 +      + " \"birthDate\": \"23/08/2012\"," + "\"gender\": \"MALE\"" + "}," + "{"
 +      + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 5," + " \"firstName\": \"Nishka5\","
 +      + " \"middleName\": \"Nilkanth5\"," + " \"lastName\": \"Patel5\","
 +      + " \"birthDate\": \"31/09/2009\"," + "\"gender\": \"FEMALE\"" + "},"
 +      + "{" + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 6," + " \"firstName\": \"Tanay6\","
 +      + " \"middleName\": \"Kiran6\"," + " \"lastName\": \"Patel\","
 +      + " \"birthDate\": \"23/08/2012\"," + "\"gender\": \"MALE\"" + "}," + "{"
 +      + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 7," + " \"firstName\": \"Nishka7\","
 +      + " \"middleName\": \"Nilkanth7\"," + " \"lastName\": \"Patel\","
 +      + " \"birthDate\": \"31/09/2009\"," + "\"gender\": \"FEMALE\"" + "},"
 +      + "{" + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 8," + " \"firstName\": \"Tanay8\","
 +      + " \"middleName\": \"kiran8\"," + " \"lastName\": \"Patel\","
 +      + " \"birthDate\": \"23/08/2012\"," + "\"gender\": \"MALE\"" + "}," + "{"
 +      + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 9," + " \"firstName\": \"Nishka9\","
 +      + " \"middleName\": \"Nilkanth9\"," + " \"lastName\": \"Patel\","
 +      + " \"birthDate\": \"31/09/2009\"," + "\"gender\": \"FEMALE\"" + "},"
 +      + "{" + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 10," + " \"firstName\": \"Tanay10\","
 +      + " \"middleName\": \"kiran10\"," + " \"lastName\": \"Patel\","
 +      + " \"birthDate\": \"23/08/2012\"," + "\"gender\": \"MALE\"" + "}," + "{"
 +      + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 11," + " \"firstName\": \"Nishka11\","
 +      + " \"middleName\": \"Nilkanth11\"," + " \"lastName\": \"Patel\","
 +      + " \"birthDate\": \"31/09/2009\"," + "\"gender\": \"FEMALE\"" + "},"
 +      + "{" + "\"@type\": \"com.gemstone.gemfire.rest.internal.web.controllers.Person\","
 +      + "\"id\": 12," + " \"firstName\": \"Tanay12\","
 +      + " \"middleName\": \"kiran12\"," + " \"lastName\": \"Patel\","
 +      + " \"birthDate\": \"23/08/2012\"," + "\"gender\": \"MALE\"" + "}" + "]";
 +
 +  public RestAPIsAndInterOpISDUnitTest(String name) {
 +    super(name);
 +    this.helper = new ManagementTestBase(name);
 +
 +  }
 +
 +  public void setUp() throws Exception {
 +    disconnectAllFromDS();
 +    super.setUp();
 +  }
 +
-   public void tearDown2() throws Exception {
-     super.tearDown2();
++  @Override
++  public void postTearDown() throws Exception {
 +    disconnectAllFromDS();
 +  }
 +  
 +  public static String startBridgeServerWithRestServiceOnInVM(VM vm, final String[] groups, final String locators, final String[] regions, final ServerLoadProbe probe) {
 +    
 +    final String hostName = vm.getHost().getHostName(); 
 +    final int serverPort = AvailablePortHelper.getRandomAvailableTCPPort();
 +    
 +    //create Cache of given VM and start HTTP service with REST APIs service
 +    new RestAPIsAndInterOpISDUnitTest("temp").startBridgeServer(hostName, serverPort, groups, locators, regions, probe);
 +   
 +    String restEndPoint =  "http://" + hostName + ":" + serverPort + "/gemfire-api/v1";
 +    return restEndPoint;
 +  }
 +  
 +  @SuppressWarnings("deprecation")
 +  protected int startBridgeServer(String hostName, int restServicerPort, final String[] groups, final String locators, final String[] regions, final ServerLoadProbe probe) {
 +            
 +    Properties props = new Properties();
 +    props.setProperty(DistributionConfig.MCAST_PORT_NAME, String.valueOf(0));
 +    props.setProperty(DistributionConfig.LOCATORS_NAME, locators);
 +    props.setProperty(DistributionConfig.START_DEV_REST_API_NAME, "true");
 +    props.setProperty(DistributionConfig.HTTP_SERVICE_BIND_ADDRESS_NAME, hostName);
 +    props.setProperty(DistributionConfig.HTTP_SERVICE_PORT_NAME, String.valueOf(restServicerPort));
 +    
 +    //Add security properties
 +    props.setProperty(DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME, "com.gemstone.gemfire.rest.internal.web.controllers.CustomRestAPIsAuthenticator.create");
 +    props.setProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_NAME, "com.gemstone.gemfire.rest.internal.web.controllers.CustomRestAPIsAuthorization.create");
 +    //props.setProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_PP_NAME, "com.gemstone.gemfire.rest.internal.web.controllers.CustomRestAPIsAuthorization.create");
 +    props.setProperty(DistributionConfig.SECURITY_REST_TOKEN_SERVICE_NAME, "com.gemstone.gemfire.rest.internal.web.controllers.DummyTokenService.create");
 +    
 +    DistributedSystem ds = getSystem(props);
 +    Cache cache = CacheFactory.create(ds);
 +    ((GemFireCacheImpl)cache).setReadSerialized(true);
 +    AttributesFactory factory = new AttributesFactory();
 +    
 +    factory.setEnableBridgeConflation(true);
 +    factory.setDataPolicy(DataPolicy.REPLICATE);
 +    RegionAttributes attrs = factory.create();
 +    for(int i = 0; i < regions.length; i++) {
 +      cache.createRegion(regions[i], attrs);
 +    }
 +    
 +    CacheServer server = cache.addCacheServer();
 +    final int serverPort = AvailablePortHelper.getRandomAvailableTCPPort();
 +    server.setPort(serverPort);
 +    server.setGroups(groups);
 +    server.setLoadProbe(probe);
 +    try {
 +      server.start();
 +    } catch (IOException e) {
 +      e.printStackTrace();
 +    }
 +    remoteObjects.put(CACHE_KEY, cache);
 +    return new Integer(serverPort);
 +  }
 +  
 +  public static void doPutsInClientCache() {
 +    ClientCache cache = GemFireCacheImpl.getInstance();
 +    assertNotNull(cache);
 +    Region<String, Object> region = cache.getRegion(PEOPLE_REGION_NAME);
 +   
 +    //put person object
 +    final Person person1 = new Person(101L, "Mithali", "Dorai", "Raj", DateTimeUtils.createDate(1982, Calendar.DECEMBER, 4), Gender.FEMALE);
 +    final Person person2 = new Person(102L, "Sachin", "Ramesh", "Tendulkar", DateTimeUtils.createDate(1975, Calendar.DECEMBER, 14), Gender.MALE);
 +    final Person person3 = new Person(103L, "Saurabh", "Baburav", "Ganguly", DateTimeUtils.createDate(1972, Calendar.AUGUST, 29), Gender.MALE);
 +    final Person person4 = new Person(104L, "Rahul", "subrymanyam", "Dravid", DateTimeUtils.createDate(1979, Calendar.MARCH, 17), Gender.MALE);
 +    final Person person5 = new Person(105L, "Jhulan", "Chidambaram", "Goswami", DateTimeUtils.createDate(1983, Calendar.NOVEMBER, 25), Gender.FEMALE);
 +   
 +    region.put("1", person1);
 +    region.put("2", person2);
 +    region.put("3", person3);
 +    region.put("4", person4);
 +    region.put("5", person5);
 +    
 +    final Person person6 = new Person(101L, "Rahul", "Rajiv", "Gndhi", DateTimeUtils.createDate(1970, Calendar.MAY, 14), Gender.MALE);
 +    final Person person7 = new Person(102L, "Narendra", "Damodar", "Modi", DateTimeUtils.createDate(1945, Calendar.DECEMBER, 24), Gender.MALE);
 +    final Person person8 = new Person(103L, "Atal", "Bihari", "Vajpayee", DateTimeUtils.createDate(1920, Calendar.AUGUST, 9), Gender.MALE);
 +    final Person person9 = new Person(104L, "Soniya", "Rajiv", "Gandhi", DateTimeUtils.createDate(1929, Calendar.MARCH, 27), Gender.FEMALE);
 +    final Person person10 = new Person(104L, "Priyanka", "Robert", "Gandhi", DateTimeUtils.createDate(1973, Calendar.APRIL, 15), Gender.FEMALE);
 +    
 +    final Person person11 = new Person(104L, "Murali", "Manohar", "Joshi", DateTimeUtils.createDate(1923, Calendar.APRIL, 25), Gender.MALE);
 +    final Person person12 = new Person(104L, "Lalkrishna", "Parmhansh", "Advani", DateTimeUtils.createDate(1910, Calendar.JANUARY, 01), Gender.MALE);
 +    final Person person13 = new Person(104L, "Shushma", "kumari", "Swaraj", DateTimeUtils.createDate(1943, Calendar.AUGUST, 10), Gender.FEMALE);
 +    final Person person14 = new Person(104L, "Arun", "raman", "jetly", DateTimeUtils.createDate(1942, Calendar.OCTOBER, 27), Gender.MALE);
 +    final Person person15 = new Person(104L, "Amit", "kumar", "shah", DateTimeUtils.createDate(1958, Calendar.DECEMBER, 21), Gender.MALE);
 +    final Person person16 = new Person(104L, "Shila", "kumari", "Dixit", DateTimeUtils.createDate(1927, Calendar.FEBRUARY, 15), Gender.FEMALE);
 +    
 +    Map<String, Object> userMap = new HashMap<String, Object>();
 +    userMap.put("6", person6);
 +    userMap.put("7", person7);
 +    userMap.put("8", person8);
 +    userMap.put("9", person9);
 +    userMap.put("10", person10);
 +    userMap.put("11", person11);
 +    userMap.put("12", person12);
 +    userMap.put("13", person13);
 +    userMap.put("14", person14);
 +    userMap.put("15", person15);
 +    userMap.put("16", person16);
 +    
 +    region.putAll(userMap);
 +    
 +    if (cache != null)
 +      cache.getLogger().info("Gemfire Cache Client: Puts successfully done");
 +  }
 +
 +  public static void doQueryOpsUsingRestApis(String restEndpoint) {
 +    String currentQueryOp = null;
 +    try {
 +      // Query TestCase-1 :: Prepare parameterized Queries
 +      {
 +        currentQueryOp = "findAllPeopleQuery";
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        HttpPost post = new HttpPost(restEndpoint + findAllPeopleQuery);
 +        post.addHeader("Content-Type", "application/json");
 +        post.addHeader("Accept", "application/json");
 +        post.addHeader("security-username", "admin");
 +        post.addHeader("security-password", "admin");
 +        
 +        CloseableHttpResponse createNamedQueryResponse = httpclient.execute(post);
 +        assertEquals(createNamedQueryResponse.getStatusLine().getStatusCode(), 201);
 +        assertNotNull(createNamedQueryResponse.getEntity());
 +        createNamedQueryResponse.close();        
 +        
 +
 +        post = new HttpPost(restEndpoint + findPeopleByGenderQuery);
 +        post.addHeader("Content-Type", "application/json");
 +        post.addHeader("Accept", "application/json");
 +        post.addHeader("security-username", "admin");
 +        post.addHeader("security-password", "admin");
 +        
 +        createNamedQueryResponse = httpclient.execute(post);
 +        assertEquals(createNamedQueryResponse.getStatusLine().getStatusCode(), 201);
 +        assertNotNull(createNamedQueryResponse.getEntity());
 +        createNamedQueryResponse.close();
 +        
 +
 +        post = new HttpPost(restEndpoint + findPeopleByLastNameQuery);
 +        post.addHeader("Content-Type", "application/json");
 +        post.addHeader("Accept", "application/json");
 +        post.addHeader("security-username", "admin");
 +        post.addHeader("security-password", "admin");
 +        createNamedQueryResponse = httpclient.execute(post);
 +        assertEquals(createNamedQueryResponse.getStatusLine().getStatusCode(), 201);
 +        assertNotNull(createNamedQueryResponse.getEntity());
 +        createNamedQueryResponse.close();
 +      }
 +      
 +      // Query TestCase-2 :: List all parameterized queries
 +      {
 +        currentQueryOp = "listAllQueries";
 +        HttpGet get = new HttpGet(restEndpoint + "/queries");
 +        get.addHeader("security-username", "admin");
 +        get.addHeader("security-password", "admin");
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        CloseableHttpResponse listAllQueriesResponse = httpclient.execute(get);
 +        assertEquals(listAllQueriesResponse.getStatusLine().getStatusCode(), 200);
 +        assertNotNull(listAllQueriesResponse.getEntity());
 +        
 +        HttpEntity entity = listAllQueriesResponse.getEntity();
 +        InputStream content = entity.getContent();
 +        BufferedReader reader = new BufferedReader(new InputStreamReader(
 +            content));
 +        String line;
 +        StringBuffer sb = new StringBuffer();
 +        while ((line = reader.readLine()) != null) {
 +          sb.append(line);
 +        }
 +        listAllQueriesResponse.close();
 +      
 +        // Check whether received response contains expected query IDs.
 +
 +        JSONObject jsonObject = new JSONObject(sb.toString());
 +        JSONArray jsonArray = jsonObject.getJSONArray("queries");
 +        for (int i = 0; i < jsonArray.length(); i++) {
 +          assertTrue(
 +            "PREPARE_PARAMETERIZED_QUERY: function IDs are not matched",
 +            Arrays.asList(PARAM_QUERY_IDS_ARRAY).contains(
 +                jsonArray.getJSONObject(i).getString("id")));
 +        }
 +      }  
 +      
 +      // Query TestCase-3 :: Run the specified named query passing in scalar values for query parameters.
 +      {
 +        currentQueryOp = "filterByLastName";
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        HttpPost post = new HttpPost(restEndpoint + "/queries/filterByLastName");
 +        post.addHeader("Content-Type", "application/json");
 +        post.addHeader("Accept", "application/json");
 +        post.addHeader("security-username", "admin");
 +        post.addHeader("security-password", "admin");
 +        
 +        StringEntity entity = new StringEntity(QUERY_ARGS);       
 +        post.setEntity(entity);
 +        CloseableHttpResponse runNamedQueryResponse = httpclient.execute(post);
 +
 +        assertEquals(200, runNamedQueryResponse.getStatusLine().getStatusCode());
 +        assertNotNull(runNamedQueryResponse.getEntity());
 +      }
 +    } catch ( Exception e ) {
 +      throw new TestException(CommandTestBase.getStackTrace(e));
 +    }
 +  }
 +  
 +  public static void verifyUpdatesInClientCache() {
 +    ClientCache cache = GemFireCacheImpl.getInstance();
 +    assertNotNull(cache);
 +    Region<String, Object> region = cache.getRegion(PEOPLE_REGION_NAME);
 +    
 +    {
 +      Person expectedPerson = new Person(3L, "Nishka3", "Nilkanth3", "Patel3", DateTimeUtils.createDate(2009, Calendar.JULY, 31), Gender.FEMALE );
 +      Object value = region.get("3");
 +      if (value instanceof PdxInstance) {
 +        PdxInstance pi3  = (PdxInstance) value;
 +        Person actualPerson = (Person) pi3.getObject();
 +        assertEquals(actualPerson.getId(), expectedPerson.getId());
 +        assertEquals(actualPerson.getFirstName(), expectedPerson.getFirstName());
 +        assertEquals(actualPerson.getMiddleName(), expectedPerson.getMiddleName());
 +        assertEquals(actualPerson.getLastName(), expectedPerson.getLastName());
 +        assertEquals(actualPerson.getBirthDate(), expectedPerson.getBirthDate());
 +        assertEquals(actualPerson.getGender(), expectedPerson.getGender());
 +      } else if (value instanceof Person) {
 +        fail("VerifyUpdatesInClientCache, Get on key 3, Expected to get value of type PdxInstance ");
 +      }
 +    }
 +     
 +    
 +    //TODO: uncomment it once following issue encountered in put?op=CAS is fixed or document the issue
 +    // CAS functionality is not working in following test case
 +    // step-1: Java client, Region.put("K", A);
 +    //Step-2: Rest CAS request for key "K" with data "@old" = A. CAS is failing as existing PdxInstance in cache and
 +    //        PdxInstance generated from JSON (CAS request) does not match as their value's type are getting changed 
 +    /*
 +    //verify update on key "1"
 +    {
 +      Object obj = region.get("1");
 +      if (obj instanceof PdxInstance) {
 +        PdxInstance pi = (PdxInstance)obj;
 +        Person p1 = (Person)pi.getObject();
 +        System.out.println("Nilkanth1 : verifyUpdatesInClientCache() : GET ON KEY=1" + p1.toString());
 +      }else {
 +        System.out.println("Nilkanth1 : verifyUpdatesInClientCache() GET ON KEY=1  returned OBJECT: " + obj.toString());
 +      }
 +    }
 +    */
 +    
 +    //verify update on key "2"
 +    {
 +      Person expectedPerson = new Person(501L, "Barack", "Hussein", "Obama", DateTimeUtils.createDate(1961, Calendar.APRIL, 8), Gender.MALE );
 +      Object value = region.get("2");
 +      if (value instanceof PdxInstance) {
 +        PdxInstance pi3  = (PdxInstance) value;
 +        Person actualPerson = (Person) pi3.getObject();
 +        assertEquals(actualPerson.getId(), expectedPerson.getId());
 +        assertEquals(actualPerson.getFirstName(), expectedPerson.getFirstName());
 +        assertEquals(actualPerson.getMiddleName(), expectedPerson.getMiddleName());
 +        assertEquals(actualPerson.getLastName(), expectedPerson.getLastName());
 +        assertEquals(actualPerson.getBirthDate(), expectedPerson.getBirthDate());
 +        assertEquals(actualPerson.getGender(), expectedPerson.getGender());
 +      }else {
 +        fail("VerifyUpdatesInClientCache, Get on key 2, Expected to get value of type PdxInstance ");
 +      }
 +    }
 +    
 +    //verify Deleted key "13"
 +    {
 +      Object obj = region.get("13");
 +      assertEquals(obj, null);
 +      
 +      obj = region.get("14");
 +      assertEquals(obj, null);
 +      
 +      obj = region.get("15");
 +      assertEquals(obj, null);
 +      
 +      obj = region.get("16");
 +      assertEquals(obj, null);
 +    }
 +    
 +  }
 +  
 +  public static void doUpdatesUsingRestApis(String restEndpoint) {
 +    //UPdate keys using REST calls
 +    {
 +
 +      try {
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        HttpPut put = new HttpPut(restEndpoint
 +            + "/People/3,4,5,6,7,8,9,10,11,12");
 +        put.addHeader("Content-Type", "application/json");
 +        put.addHeader("Accept", "application/json");
 +        put.addHeader("security-username", "admin");
 +        put.addHeader("security-password", "admin");
 +
 +        StringEntity entity = new StringEntity(PERSON_LIST_AS_JSON);
 +        put.setEntity(entity);
 +        CloseableHttpResponse result = httpclient.execute(put);
 +      } catch (Exception e) {
 +        throw new TestException(CommandTestBase.getStackTrace(e));
 +      }
 +    }
 +    
 +    //Delete Single keys
 +    {
 +      try {
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        HttpDelete delete = new HttpDelete(restEndpoint + "/People/13");
 +        delete.addHeader("Content-Type", "application/json");
 +        delete.addHeader("Accept", "application/json");
 +        delete.addHeader("security-username", "admin");
 +        delete.addHeader("security-password", "admin");
 +        CloseableHttpResponse result = httpclient.execute(delete);
 +      } catch (Exception e) {
 +        throw new TestException(CommandTestBase.getStackTrace(e));
 +      }
 +    }
 +    
 +    //Delete set of keys
 +    {
 +      try {
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        HttpDelete delete = new HttpDelete(restEndpoint + "/People/14,15,16");
 +        delete.addHeader("Content-Type", "application/json");
 +        delete.addHeader("Accept", "application/json");
 +        delete.addHeader("security-username", "admin");
 +        delete.addHeader("security-password", "admin");
 +        
 +        CloseableHttpResponse result = httpclient.execute(delete);
 +      } catch (Exception e) {
 +        throw new TestException(CommandTestBase.getStackTrace(e));
 +      }
 +    }
 +    
 +    //REST put?op=CAS for key 1
 +    /*
 +    try {   
 +    {  
 +      HttpEntity<Object> entity = new HttpEntity<Object>(PERSON_AS_JSON_CAS, headers);
 +      ResponseEntity<String> result = RestTestUtils.getRestTemplate().exchange(
 +        restEndpoint + "/People/1?op=cas",
 +        HttpMethod.PUT, entity, String.class);
 +    }
 +    } catch (HttpClientErrorException e) {
 +      
 +      fail("Caught HttpClientErrorException while doing put with op=cas");
 +    }catch (HttpServerErrorException se) {
 +      fail("Caught HttpServerErrorException while doing put with op=cas");
 +    }
 +    */ 
 +    
 +    //REST put?op=REPLACE for key 2
 +    {
 +      /*HttpEntity<Object> entity = new HttpEntity<Object>(PERSON_AS_JSON_REPLACE, headers);
 +      ResponseEntity<String> result = RestTestUtils.getRestTemplate().exchange(
 +        restEndpoint + "/People/2?op=replace",
 +      HttpMethod.PUT, entity, String.class);*/
 +      
 +      try {
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        HttpPut put = new HttpPut(restEndpoint
 +            + "/People/2?op=replace");
 +        put.addHeader("Content-Type", "application/json");
 +        put.addHeader("Accept", "application/json");
 +        put.addHeader("security-username", "admin");
 +        put.addHeader("security-password", "admin");
 +        
 +        StringEntity entity = new StringEntity(PERSON_AS_JSON_REPLACE);
 +        put.setEntity(entity);
 +        CloseableHttpResponse result = httpclient.execute(put);
 +      } catch (Exception e) {
 +        throw new TestException(CommandTestBase.getStackTrace(e));
 +      }
 +    }
 +  }
 +  
 +  public static void fetchRestServerEndpoints(String restEndpoint) {
 +    HttpGet get = new HttpGet(restEndpoint + "/servers");
 +    get.addHeader("Content-Type", "application/json");
 +    get.addHeader("Accept", "application/json");
 +    get.addHeader("security-username", "admin");
 +    get.addHeader("security-password", "admin");
 +    
 +    CloseableHttpClient httpclient = HttpClients.createDefault();
 +    CloseableHttpResponse response;
 +    
 +    try {
 +      response = httpclient.execute(get);
 +      HttpEntity entity = response.getEntity();
 +      InputStream content = entity.getContent();
 +      BufferedReader reader = new BufferedReader(new InputStreamReader(
 +          content));
 +      String line;
 +      StringBuffer str = new StringBuffer();
 +      while ((line = reader.readLine()) != null) {
 +        str.append(line);
 +      }
 +      
 +      //validate the satus code
 +      assertEquals(response.getStatusLine().getStatusCode(), 200);
 +      
 +      if(response.getStatusLine().getStatusCode() == 200) {
 +        JSONArray jsonArray = new JSONArray(str.toString());
 +        
 +        //verify total number of REST service endpoints in DS
 +        assertEquals(jsonArray.length(), 2);
 +      }
 +      
 +    } catch (ClientProtocolException e) { 
 +      e.printStackTrace();
 +      fail(" Rest Request should not have thrown ClientProtocolException!");
 +    } catch (IOException e) {
 +      e.printStackTrace();
 +      fail(" Rest Request should not have thrown IOException!");
 +    } catch (JSONException e) {
 +      e.printStackTrace();
 +      fail(" Rest Request should not have thrown  JSONException!");
 +    }
 +    
 +  }
 +  public static void VerifyGetUsingJavaAPIs(String restEndpoint) {
 +    ClientCache cache = GemFireCacheImpl.getInstance();
 +    assertNotNull(cache);
 +    Region<String, Object> region = cache.getRegion(PEOPLE_REGION_NAME);
 +    Object value = region.get("1");
 +  }
 +  
 +  public static void doGetsUsingRestApis(String restEndpoint) {
 +    //GET using Region APIs
 +    
 +      ClientCache cache = GemFireCacheImpl.getInstance();
 +      /*
 +      assertNotNull(cache);
 +      Region<String, Object> region = cache.getRegion(PEOPLE_REGION_NAME);
 +      Object value = region.get("1");
 +      if(value instanceof PdxInstance){
 +        System.out.println("Nilkanth: value is of Type PdxInstance");
 +        System.out.println("Nilkanth Person = " + ((PdxInstance)value).getField("id"));
 +      }else{
 +        System.out.println("Nilkanth: value is NOT of Type PdxInstance -> type = "+ value.getClass().getName());
 +      }
 +      */
 +      
 +    
 +    //HttpHeaders headers = setAcceptAndContentTypeHeaders(); 
 +    String currentOperation = null;
 +    JSONObject jObject;
 +    JSONArray jArray;
 +    try {    
 +      //1. Get on key="1" and validate result.
 +      {
 +        currentOperation = "GET on key 1";
 +        
 +        HttpGet get = new HttpGet(restEndpoint + "/People/1");
 +        get.addHeader("Content-Type", "application/json");
 +        get.addHeader("Accept", "application/json");
 +        get.addHeader("security-username", "admin");
 +        get.addHeader("security-password", "admin");
 +
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        CloseableHttpResponse response = httpclient.execute(get);
 +        
 +        HttpEntity entity = response.getEntity();
 +        InputStream content = entity.getContent();
 +        BufferedReader reader = new BufferedReader(new InputStreamReader(
 +            content));
 +        String line;
 +        StringBuffer str = new StringBuffer();
 +        while ((line = reader.readLine()) != null) {
 +          str.append(line);
 +        }
 +        
 +        jObject = new JSONObject(str.toString());
 +      
 +        assertEquals(jObject.get("id"), 101);
 +        assertEquals(jObject.get("firstName"), "Mithali");
 +        assertEquals(jObject.get("middleName"), "Dorai");
 +        assertEquals(jObject.get("lastName"), "Raj");
 +        assertEquals(jObject.get("gender"), Gender.FEMALE.name());
 +      }
 +         
 +      //2. Get on key="16" and validate result.
 +      {
 +        currentOperation = "GET on key 16";
 +
 +        
 +        HttpGet get = new HttpGet(restEndpoint + "/People/16");
 +        get.addHeader("Content-Type", "application/json");
 +        get.addHeader("Accept", "application/json");
 +        get.addHeader("security-username", "admin");
 +        get.addHeader("security-password", "admin");
 +
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        CloseableHttpResponse response = httpclient.execute(get);
 +        
 +        HttpEntity entity = response.getEntity();
 +        InputStream content = entity.getContent();
 +        BufferedReader reader = new BufferedReader(new InputStreamReader(
 +            content));
 +        String line;
 +        StringBuffer str = new StringBuffer();
 +        while ((line = reader.readLine()) != null) {
 +          str.append(line);
 +        }
 +        
 +        jObject = new JSONObject(str.toString());
 +        
 +      
 +        assertEquals(jObject.get("id"), 104);
 +        assertEquals(jObject.get("firstName"), "Shila");
 +        assertEquals(jObject.get("middleName"), "kumari");
 +        assertEquals(jObject.get("lastName"), "Dixit");
 +        assertEquals(jObject.get("gender"), Gender.FEMALE.name());
 +      }
 +      
 +      //3. Get all (getAll) entries in Region
 +      {
 +
 +        HttpGet get = new HttpGet(restEndpoint + "/People");
 +        get.addHeader("Content-Type", "application/json");
 +        get.addHeader("Accept", "application/json");
 +        get.addHeader("security-username", "admin");
 +        get.addHeader("security-password", "admin");
 +
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        CloseableHttpResponse result = httpclient.execute(get);
 +        assertEquals(result.getStatusLine().getStatusCode(), 200);
 +        assertNotNull(result.getEntity());
 +      
 +        HttpEntity entity = result.getEntity();
 +        InputStream content = entity.getContent();
 +        BufferedReader reader = new BufferedReader(new InputStreamReader(
 +            content));
 +        String line;
 +        StringBuffer sb = new StringBuffer();
 +        while ((line = reader.readLine()) != null) {
 +          sb.append(line);
 +        }
 +        result.close();
 +        
 +        try {
 +          jObject = new JSONObject(sb.toString());
 +          jArray = jObject.getJSONArray("People");
 +          assertEquals(jArray.length(), 16);
 +        } catch (JSONException e) {
 +          fail(" Rest Request ::" + currentOperation +  " :: should not have thrown JSONException ");
 +        }
 +      }
 +      
 +      //4. GetAll?limit=10 (10 entries) and verify results
 +      {
 +        HttpGet get = new HttpGet(restEndpoint + "/People?limit=10");
 +        get.addHeader("Content-Type", "application/json");
 +        get.addHeader("Accept", "application/json");
 +        get.addHeader("security-username", "admin");
 +        get.addHeader("security-password", "admin");
 +
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        CloseableHttpResponse response = httpclient.execute(get);
 +        assertEquals(response.getStatusLine().getStatusCode(), 200);
 +        assertNotNull(response.getEntity());
 +        
 +        HttpEntity entity = response.getEntity();
 +        InputStream content = entity.getContent();
 +        BufferedReader reader = new BufferedReader(new InputStreamReader(
 +            content));
 +        String line;
 +        StringBuffer str = new StringBuffer();
 +        while ((line = reader.readLine()) != null) {
 +          str.append(line);
 +        }
 +        
 +        try {
 +          jObject = new JSONObject(str.toString());
 +          jArray = jObject.getJSONArray("People");
 +          assertEquals(jArray.length(), 10);
 +        } catch (JSONException e) {
 +          fail(" Rest Request ::" + currentOperation +  " :: should not have thrown JSONException ");
 +        }
 +      }
 +      
 +      //5. Get keys - List all keys in region
 +      {  
 +        
 +        HttpGet get = new HttpGet(restEndpoint + "/People/keys");
 +        get.addHeader("Content-Type", "application/json");
 +        get.addHeader("Accept", "application/json");
 +        get.addHeader("security-username", "admin");
 +        get.addHeader("security-password", "admin");
 +
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        CloseableHttpResponse response = httpclient.execute(get);
 +        assertEquals(response.getStatusLine().getStatusCode(), 200);
 +        assertNotNull(response.getEntity());
 +        
 +        HttpEntity entity = response.getEntity();
 +        InputStream content = entity.getContent();
 +        BufferedReader reader = new BufferedReader(new InputStreamReader(
 +            content));
 +        String line;
 +        StringBuffer str = new StringBuffer();
 +        while ((line = reader.readLine()) != null) {
 +          str.append(line);
 +        }
 +        
 +        try {
 +          jObject = new JSONObject(str.toString());
 +          jArray = jObject.getJSONArray("keys"); 
 +          assertEquals(jArray.length(), 16);
 +        } catch (JSONException e) {
 +          fail(" Rest Request ::" + currentOperation +  " :: should not have thrown JSONException ");
 +        }
 +      } 
 +      
 +      //6. Get data for specific keys
 +      {  
 +       
 +        HttpGet get = new HttpGet(restEndpoint + "/People/1,3,5,7,9,11");
 +        get.addHeader("Content-Type", "application/json");
 +        get.addHeader("Accept", "application/json");
 +        get.addHeader("security-username", "admin");
 +        get.addHeader("security-password", "admin");
 +
 +        CloseableHttpClient httpclient = HttpClients.createDefault();
 +        CloseableHttpResponse response = httpclient.execute(get);
 +        assertEquals(response.getStatusLine().getStatusCode(), 200);
 +        assertNotNull(response.getEntity());
 +        
 +        HttpEntity entity = response.getEntity();
 +        InputStream content = entity.getContent();
 +        BufferedReader reader = new BufferedReader(new InputStreamReader(
 +            content));
 +        String line;
 +        StringBuffer str = new StringBuffer();
 +        while ((line = reader.readLine()) != null) {
 +          str.append(line);
 +        }
 +        
 +        try {
 +          jObject = new JSONObject(str.toString());
 +          jArray = jObject.getJSONArray("People");
 +          assertEquals(jArray.length(), 6);
 +          
 +        } catch (JSONException e) {
 +          fail(" Rest Request ::" + currentOperation +  " :: should not have thrown JSONException ");
 +        }
 +      } 
 +      
 +    }catch ( Exception e ) {
 +      throw new TestException(CommandTestBase.getStackTrace(e)); 
 +    }
 +    
 +  }
 +
 +  public static void createRegionInClientCache() {
 +    ClientCache cache = GemFireCacheImpl.getInstance();
 +    assertNotNull(cache);
 +    ClientRegionFactory<String, Object> crf = cache
 +        .createClientRegionFactory(ClientRegionShortcut.PROXY);
 +    Region<String, Object> region = crf.create(PEOPLE_REGION_NAME);
 +
 +  }
 +
 +  public static void createRegionInManager() {
 +    Cache cache = GemFireCacheImpl.getInstance();
 +    assertNotNull(cache);
 +
 +    RegionFactory<String, Object> rf = cache
 +        .createRegionFactory(RegionShortcut.REPLICATE);
 +    Region<String, Object> region = rf.create(PEOPLE_REGION_NAME);
 +  }
 +
 +  public static void createRegionInPeerServer() {
 +    Cache cache = GemFireCacheImpl.getInstance();
 +    assertNotNull(cache);
 +
 +    RegionFactory<String, Object> rf = cache
 +        .createRegionFactory(RegionShortcut.REPLICATE);
 +    Region<String, Object> region = rf.create(PEOPLE_REGION_NAME);
 +  }
 +
 +  /**
 +   * InterOps Test between REST-client, Peer Cache Client and Client Cache 
 +   * @throws Exception
 +   */
 + 
 +  public void testInterOpsWithReplicatedRegion() throws Exception {
 +
 +    final Host host = Host.getHost(0);
 +    VM locator = host.getVM(0);
 +    VM manager = host.getVM(1);
 +    VM server = host.getVM(2);
 +    VM client = host.getVM(3);
 +
 +    // start locator
 +    //int locatorPort = AvailablePortHelper.getRandomAvailableTCPPortOnVM(locator);
 +    int locatorPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
 +    
 +    
 +    startLocatorInVM(locator, locatorPort, "");
 +    
 +    // find locators
-     String locators = getServerHostName(locator.getHost()) + "[" + locatorPort
++    String locators = NetworkUtils.getServerHostName(locator.getHost()) + "[" + locatorPort
 +        + "]";
 +     
 +    // start manager (peer cache)
 +    int managerPort = startManagerInVM(manager,/* groups */null, locators,
 +        new String[] {REGION_NAME}, CacheServer.DEFAULT_LOAD_PROBE);
 +    
 +    //start startBridgeServer With RestService enabled
 +    String restEndpoint = (String)server.invoke(RestAPIsAndInterOpISDUnitTest.class,
 +        "startBridgeServerWithRestServiceOnInVM", new Object[] { server ,  null, locators, new String[] {REGION_NAME}, CacheServer.DEFAULT_LOAD_PROBE });
 +    
 +    // create a client cache
-     createClientCacheInVM(client, getServerHostName(locator.getHost()),
++    createClientCacheInVM(client, NetworkUtils.getServerHostName(locator.getHost()),
 +        locatorPort);
 +    
 +    // create region in Manager, peer cache and Client cache nodes
 +    manager.invoke(RestAPIsAndInterOpISDUnitTest.class, "createRegionInManager");
 +    
 +    server.invoke(RestAPIsAndInterOpISDUnitTest.class,
 +        "createRegionInPeerServer");
 +    
 +    client.invoke(RestAPIsAndInterOpISDUnitTest.class,
 +        "createRegionInClientCache");
 +    
 +    
 +    // do some person puts from clientcache
 +    client.invoke(RestAPIsAndInterOpISDUnitTest.class, "doPutsInClientCache");
 +
 +    //TEST: fetch all available REST endpoints
 +    //RestAPIsAndInterOpISDUnitTest.fetchRestServerEndpoints(restEndpoint);
 +    //System.out.println("Nilkanth: Test-11  Fetched REST endpoints SUCCESS!");
 +    
 +    // Controller VM - config REST Client and make HTTP calls
 +    RestAPIsAndInterOpISDUnitTest.doGetsUsingRestApis(restEndpoint);
 +    
 +    //update Data using REST APIs
 +    RestAPIsAndInterOpISDUnitTest.doUpdatesUsingRestApis(restEndpoint);
 +    
 +    client.invoke(RestAPIsAndInterOpISDUnitTest.class, "verifyUpdatesInClientCache");
 +    
 +    //Querying
 +    RestAPIsAndInterOpISDUnitTest.doQueryOpsUsingRestApis(restEndpoint);
 +    
 +    
 +    // stop the client and make sure the bridge server notifies
 +    // stopBridgeMemberVM(client);
 +    helper.closeCache(locator);
 +    helper.closeCache(manager);
 +    helper.closeCache(server);
 +    helper.closeCache(client);
 +
 +  }
 +
 +  private void createClientCacheInVM(VM vm, final String host, final int port) {
 +    SerializableRunnable connect = new SerializableRunnable(
 +        "Start Cache client") {
 +      public void run() {
 +        
 +        // Connect using the GemFire locator and create a Caching_Proxy cache
 +        ClientCache c = new ClientCacheFactory()
 +                        .set("security-client-auth-init", "templates.security.UserPasswordAuthInit.create")
 +                         .set("security-username", "admin")
 +                         .set("security-password", "admin")
 +                         .setPdxReadSerialized(true).addPoolLocator(host, port)
 +                         .create();
 +       
 +        Region r = c.createClientRegionFactory(
 +            ClientRegionShortcut.PROXY).create(REGION_NAME);
 +      }
 +    };
 +
 +    if (vm == null) {
 +      connect.run();
 +    } else {
 +      vm.invoke(connect);
 +    }
 +  }
 +
 +  private int startManagerInVM(VM vm, final String[] groups,
 +      final String locators, final String[] regions, final ServerLoadProbe probe) {
 +    SerializableCallable connect = new SerializableCallable("Start Manager ") {
 +      public Object call() throws IOException {
 +        Properties props = new Properties();
 +        props
 +            .setProperty(DistributionConfig.MCAST_PORT_NAME, String.valueOf(0));
 +        props.setProperty(DistributionConfig.LOCATORS_NAME, locators);
 +
 +        props.setProperty("jmx-manager", "true");
 +        props.setProperty("jmx-manager-start", "true");
 +        props.setProperty(DistributionConfig.JMX_MANAGER_PORT_NAME, "0");
 +        
 +        final int httpPort = AvailablePortHelper.getRandomAvailableTCPPort();
 +        //Set REST service related configuration
 +        props.setProperty(DistributionConfig.START_DEV_REST_API_NAME, "true");
 +        props.setProperty(DistributionConfig.HTTP_SERVICE_BIND_ADDRESS_NAME, "localhost");
 +        props.setProperty(DistributionConfig.HTTP_SERVICE_PORT_NAME, String.valueOf(httpPort));
 +        
 +        //Add security properties
 +        props.setProperty(DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME, "com.gemstone.gemfire.rest.internal.web.controllers.CustomRestAPIsAuthenticator.create");
 +        props.setProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_NAME, "com.gemstone.gemfire.rest.internal.web.controllers.CustomRestAPIsAuthorization.create");
 +        //props.setProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_PP_NAME, "com.gemstone.gemfire.rest.internal.web.controllers.CustomRestAPIsAuthorization.create");
 +        props.setProperty(DistributionConfig.SECURITY_REST_TOKEN_SERVICE_NAME, "com.gemstone.gemfire.rest.internal.web.controllers.DummyTokenService.create");
 +        
 +        DistributedSystem ds = getSystem(props);
 +        Cache cache = CacheFactory.create(ds);
 +        AttributesFactory factory = new AttributesFactory();
 +        
 +        factory.setEnableBridgeConflation(true);
 +        factory.setDataPolicy(DataPolicy.REPLICATE);
 +        RegionAttributes attrs = factory.create();
 +        for (int i = 0; i < regions.length; i++) {
 +          cache.createRegion(regions[i], attrs);
 +        }
 +        CacheServer server = cache.addCacheServer();
 +        final int serverPort = AvailablePortHelper.getRandomAvailableTCPPort();
 +        server.setPort(serverPort);
 +        server.setGroups(groups);
 +        server.setLoadProbe(probe);
 +        server.start();
 +        
 +        return new Integer(serverPort);
 +      }
 +    };
 +    Integer port = (Integer) vm.invoke(connect);
 +    return port.intValue();
 +  }
 +}


[40/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
index 2bb172e..d3b065c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/MultiVMRegionTestCase.java
@@ -114,12 +114,17 @@ import com.gemstone.gemfire.internal.offheap.MemoryChunkWithRefCount;
 import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.RMIException;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
 
 
 /**
@@ -164,8 +169,8 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     disconnectAllFromDS();
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownRegionTestCase() throws Exception {
     DistributedTestCase.cleanupAllVms();
     CCRegion = null;
   }
@@ -457,11 +462,11 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                 Object value = e.getNewValue();
                 assertNotNull(value);
                 try {
-                  getLogWriter().info("++ Adding " + value);
+                  com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("++ Adding " + value);
                   queue.put(value);
 
                 } catch (InterruptedException ex) {
-                  fail("Why was I interrupted?", ex);
+                  com.gemstone.gemfire.test.dunit.Assert.fail("Why was I interrupted?", ex);
                 }
               }
             });
@@ -477,13 +482,13 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               (LinkedBlockingQueue) region.getUserAttribute();
             for (int i = 0; i <= lastNumber; i++) {
               try {
-                getLogWriter().info("++ Waiting for " + i);
+                com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("++ Waiting for " + i);
                 Integer value = (Integer) queue.take();
-                getLogWriter().info("++ Got " + value);
+                com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("++ Got " + value);
                 assertEquals(i, value.intValue());
 
               } catch (InterruptedException ex) {
-                fail("Why was I interrupted?", ex);
+                com.gemstone.gemfire.test.dunit.Assert.fail("Why was I interrupted?", ex);
               }
             }
           }
@@ -501,14 +506,14 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         });
 
-    DistributedTestCase.join(ai0, 30 * 1000, getLogWriter());
-    DistributedTestCase.join(ai1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(ai0, 30 * 1000);
+    ThreadUtils.join(ai1, 30 * 1000);
 
     if (ai0.exceptionOccurred()) {
-      fail("ai0 failed", ai0.getException());
+      com.gemstone.gemfire.test.dunit.Assert.fail("ai0 failed", ai0.getException());
 
     } else if (ai1.exceptionOccurred()) {
-      fail("ai1 failed", ai1.getException());
+      com.gemstone.gemfire.test.dunit.Assert.fail("ai1 failed", ai1.getException());
     }
   }
 
@@ -648,13 +653,13 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           region.put(key, newValue);
         }
       });
-    invokeRepeatingIfNecessary(vm1, new CacheSerializableRunnable("Get entry") {
+    Invoke.invokeRepeatingIfNecessary(vm1, new CacheSerializableRunnable("Get entry") {
         public void run2() throws CacheException {
           Region region =
             getRootRegion().getSubregion(name);
           assertEquals(newValue, region.get(key));
         }
-      });
+      }, getRepeatTimeoutMs());
   }
 
 
@@ -766,7 +771,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         };
 
-    invokeInEveryVM(create);
+    Invoke.invokeInEveryVM(create);
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
@@ -780,7 +785,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         }
       });
 
-    invokeInEveryVM(new CacheSerializableRunnable("Verify region destruction") {
+    Invoke.invokeInEveryVM(new CacheSerializableRunnable("Verify region destruction") {
       public void run2() throws CacheException {
         WaitCriterion ev = new WaitCriterion() {
           public boolean done() {
@@ -790,7 +795,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             return "Waiting for region " + name + " to be destroyed";
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 60 * 1000, 10, true);
+        Wait.waitForCriterion(ev, 60 * 1000, 10, true);
         Region region = getRootRegion().getSubregion(name);
         assertNull(region);
       }
@@ -978,7 +983,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               // changed from severe to fine because it is possible
               // for this to return non-null on d-no-ack
               // that is was invokeRepeatingIfNecessary is called
-              getLogWriter().fine("invalidated entry has value of " + entry.getValue());
+              com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().fine("invalidated entry has value of " + entry.getValue());
             }
             assertNull(entry.getValue());
           }
@@ -1085,7 +1090,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         };
 
-    invokeInEveryVM(create);
+    Invoke.invokeInEveryVM(create);
 
     final Object key = "KEY";
     final Object value = "VALUE";
@@ -1110,7 +1115,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         };
 
-    invokeInEveryVM(put);
+    Invoke.invokeInEveryVM(put);
 
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
@@ -1152,7 +1157,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         };
 
-    invokeInEveryVMRepeatingIfNecessary(verify);
+    Invoke.invokeInEveryVMRepeatingIfNecessary(verify, getRepeatTimeoutMs());
   }
 
   /**
@@ -1212,7 +1217,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                 try {
                   assertEquals(newValue, DataSerializer.readObject(dis));
                 } catch (Exception e) {
-                  fail("Unexpected Exception", e);
+                  com.gemstone.gemfire.test.dunit.Assert.fail("Unexpected Exception", e);
                 }
               }
             };
@@ -1265,7 +1270,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                 try {
                   assertEquals(newValue, DataSerializer.readObject(dis));
                 } catch (Exception e) {
-                  fail("Unexpected Exception", e);
+                  com.gemstone.gemfire.test.dunit.Assert.fail("Unexpected Exception", e);
                 }
               }
             };
@@ -1749,7 +1754,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             assertEquals(value, result);
             return result;
           } catch (TimeoutException ex) {
-            fail("Why did I time out?", ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("Why did I time out?", ex);
           }
           return null;
         }
@@ -1843,7 +1848,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             assertEquals(value, result);
             return result;
           } catch (TimeoutException ex) {
-            fail("Why did I time out?", ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("Why did I time out?", ex);
           }
           return null;
         }
@@ -2302,7 +2307,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
 
           } catch (CacheWriterException ex) {
             if (region.isDestroyed()) {
-              fail("should not have an exception if region is destroyed", ex);
+              com.gemstone.gemfire.test.dunit.Assert.fail("should not have an exception if region is destroyed", ex);
             }
             assertEquals(1, region.size());
             if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
@@ -2341,7 +2346,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                 return "never saw off-heap object count go to zero. Last value was " + ma.getStats().getObjects();
               }
             };
-            DistributedTestCase.waitForCriterion(waitForStatChange, 3000, 10, true);
+            Wait.waitForCriterion(waitForStatChange, 3000, 10, true);
           }
         }
       });
@@ -2596,7 +2601,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                   assertNull(helper.netSearch(true));
 
                 } catch (TimeoutException ex) {
-                  fail("Why did I time out?", ex);
+                  com.gemstone.gemfire.test.dunit.Assert.fail("Why did I time out?", ex);
                 }
                 return value;
               }
@@ -2683,7 +2688,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testDistributedPut: Created Region");
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -2700,7 +2705,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
          }
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -2730,13 +2735,13 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       rgn.put("key", "value");
       getSystem().getLogWriter().info("testDistributedPut: Put Value");
 
-      invokeInEveryVMRepeatingIfNecessary(new CacheSerializableRunnable("testDistributedPut: Verify Received Value") {
+      Invoke.invokeInEveryVMRepeatingIfNecessary(new CacheSerializableRunnable("testDistributedPut: Verify Received Value") {
         public void run2() {
           Region rgn1 = getRootRegion().getSubregion(rgnName);
           assertNotNull("Could not find entry for 'key'", rgn1.getEntry("key"));
           assertEquals("value", rgn1.getEntry("key").getValue());
         }
-      });
+      }, getRepeatTimeoutMs());
 
     }
     catch(Exception e) {
@@ -2912,14 +2917,14 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
       });
 
-    invokeRepeatingIfNecessary(vm2, new CacheSerializableRunnable("Wait for update") {
+    Invoke.invokeRepeatingIfNecessary(vm2, new CacheSerializableRunnable("Wait for update") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(name);
         assertNotNull(region.getEntry(key1));
         assertNotNull(region.getEntry(key2));
         assertNotNull(region.getEntry(key3));
       }
-    });
+    }, getRepeatTimeoutMs());
 
     // Destroy the local entries so we know that they are not found by
     // a netSearch
@@ -2932,7 +2937,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         }
       });
 
-    invokeRepeatingIfNecessary(vm2, new CacheSerializableRunnable("Verify keys") {
+    Invoke.invokeRepeatingIfNecessary(vm2, new CacheSerializableRunnable("Verify keys") {
         public void run2() throws CacheException {
           Region region =
             getRootRegion().getSubregion(name);
@@ -2952,7 +2957,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           assertNotNull(entry3);
           assertEquals(value3, entry3.getValue());
         }
-      });
+      }, getRepeatTimeoutMs());
   }
   
   /**
@@ -3038,14 +3043,14 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       }
     });
     
-    invokeRepeatingIfNecessary(vm2, new CacheSerializableRunnable("Wait for update") {
+    Invoke.invokeRepeatingIfNecessary(vm2, new CacheSerializableRunnable("Wait for update") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(name);
         assertNotNull(region.getEntry(key1));
         assertNotNull(region.getEntry(key2));
         assertNotNull(region.getEntry(key3));
       }
-    });
+    }, getRepeatTimeoutMs());
     
     // apply delta
     vm0.invoke(new CacheSerializableRunnable("Apply delta") {
@@ -3076,7 +3081,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         }
       };
     
-    invokeRepeatingIfNecessary(vm0, verify);
+    Invoke.invokeRepeatingIfNecessary(vm0, verify, getRepeatTimeoutMs());
     
     
     // Destroy the local entries so we know that they are not found by
@@ -3088,7 +3093,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       }
     });
     
-    invokeRepeatingIfNecessary(vm2, verify);
+    Invoke.invokeRepeatingIfNecessary(vm2, verify, getRepeatTimeoutMs());
     
   }
   
@@ -3311,7 +3316,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         assertTrue(index >= 0);
         assertEquals(expectedValues.remove(index), event.getNewValue());
         expectedKeys.remove(index);
-        getLogWriter().info("afterCreate called in " +
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("afterCreate called in " +
          "MirroredDataFromNonMirroredListener for key:" + event.getKey());
       }
     }
@@ -3432,7 +3437,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           Region.Entry entry1 = region.getEntry(key1);
           if (!getRegionAttributes().getDataPolicy().withReplication()) {
             if (entry1 != null) {
-              getLogWriter().info("found entry " + entry1);
+              com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("found entry " + entry1);
             }
             assertNull(entry1);
           }
@@ -3714,7 +3719,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       }
     });
 
-    pause(shortTimeout * 10);
+    Wait.pause(shortTimeout * 10);
 
     // Even though netSearch finds vm1's entry is not expired, it is considered
     // expired with respect to vm0's attributes
@@ -3805,7 +3810,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       }
     });
 
-    pause(shortTimeout * 2);
+    Wait.pause(shortTimeout * 2);
 
     // Even though netSearch finds vm1's entry is not expired, it is considered
     // expired with respect to vm0's attributes
@@ -3941,7 +3946,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               return "never saw create of " + key;
             }
           };
-          DistributedTestCase.waitForCriterion(waitForCreate, 3000, 10, true);
+          Wait.waitForCriterion(waitForCreate, 3000, 10, true);
         }
       });
       
@@ -3965,7 +3970,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                 return "never saw expire of " + key + " entry=" + region.getEntry(key);
               }
             };
-            DistributedTestCase.waitForCriterion(waitForExpire, 4000, 10, true);
+            Wait.waitForCriterion(waitForExpire, 4000, 10, true);
           }
         });
 
@@ -3980,7 +3985,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                 return "never saw expire of " + key + " entry=" + region.getEntry(key);
               }
             };
-            DistributedTestCase.waitForCriterion(waitForExpire, 4000, 10, true);
+            Wait.waitForCriterion(waitForExpire, 4000, 10, true);
             assertTrue(destroyListener.waitForInvocation(555));
             assertTrue(((DestroyListener)destroyListener).eventIsExpiration);
           }
@@ -4090,7 +4095,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                   break;
                 } catch (AssertionFailedError e) {
                   if (retry > 0) {
-                    pause(1);
+                    Wait.pause(1);
                   } else {
                     throw e;
                   }
@@ -4130,7 +4135,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                   return "Entry for key " + key + " never expired (since it still exists) " + expiryInfo;
                 }
               };
-              DistributedTestCase.waitForCriterion(waitForUpdate, 30000, 1, true);
+              Wait.waitForCriterion(waitForUpdate, 30000, 1, true);
             }
             assertNull(region.getEntry(key));
           }
@@ -4192,7 +4197,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           region.create(key, null);
           EntryExpiryTask eet = region.getEntryExpiryTask(key);
           region.create("createExpiryTime", eet.getExpirationTime());
-          waitForExpiryClockToChange(region);
+          Wait.waitForExpiryClockToChange(region);
         }
       });
 
@@ -4234,7 +4239,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               return "never saw update of " + key;
             }
           };
-          DistributedTestCase.waitForCriterion(waitForUpdate, 3000, 10, true);
+          Wait.waitForCriterion(waitForUpdate, 3000, 10, true);
 
           EntryExpiryTask eet = region.getEntryExpiryTask(key);
           long createExpiryTime = (Long) region.get("createExpiryTime");
@@ -4286,7 +4291,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             factory.setScope(Scope.DISTRIBUTED_ACK);
             factory.setDataPolicy(DataPolicy.NORMAL);
             factory.setSubscriptionAttributes(new SubscriptionAttributes(InterestPolicy.ALL));
-            getLogWriter().info("MJT DEBUG: attrs0 are " + factory.create());
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("MJT DEBUG: attrs0 are " + factory.create());
             createRootRegion(factory.create());
           }
           {
@@ -4296,7 +4301,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             if (getRegionAttributes().getDataPolicy() == DataPolicy.NORMAL) {
               factory.setDataPolicy(DataPolicy.PRELOADED);
             }
-            getLogWriter().info("MJT DEBUG: attrs1 are " + factory.create());
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("MJT DEBUG: attrs1 are " + factory.create());
             Region region = createRegion(name, factory.create());
           }
           finishCacheXml(name);
@@ -4358,7 +4363,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             return "replicate count never reached " + expectedProfiles;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 60 * 1000, 200, true);
 
         DataPolicy currentPolicy = getRegionAttributes().getDataPolicy();
         int numProfiles = 0;
@@ -4375,7 +4380,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         // before the get initial image is complete.
         for (int i = 1; i < NB1_NUM_ENTRIES; i += 2) {
           Object key = new Integer(i);
-          getLogWriter().info("Operation #"+i+" on key " + key);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Operation #"+i+" on key " + key);
           switch (i % 6) {
             case 1: // UPDATE
               // use the current timestamp so we know when it happened
@@ -4433,34 +4438,34 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         });
     }
 
-    getLogWriter().info("Before GetInitialImage, data policy is "+getRegionAttributes().getDataPolicy()+", scope is "+getRegionAttributes().getScope());
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Before GetInitialImage, data policy is "+getRegionAttributes().getDataPolicy()+", scope is "+getRegionAttributes().getScope());
     AsyncInvocation asyncGII = vm2.invokeAsync(create);
 
     if (!getRegionAttributes().getScope().isGlobal()) {
       // wait for nonblocking operations to complete
-      DistributedTestCase.join(async, 30 * 1000, getLogWriter());
+      ThreadUtils.join(async, 30 * 1000);
       vm2.invoke(new SerializableRunnable("Set fast image processing") {
           public void run() {
             com.gemstone.gemfire.internal.cache.InitialImageOperation.slowImageProcessing = 0;
           }
         });
-      getLogWriter().info("after async nonblocking ops complete");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("after async nonblocking ops complete");
     }
 
     // wait for GII to complete
-    DistributedTestCase.join(asyncGII, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyncGII, 30 * 1000);
     final long iiComplete = System.currentTimeMillis();
-    getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
 
     if (getRegionAttributes().getScope().isGlobal()) {
       // wait for nonblocking operations to complete
-      DistributedTestCase.join(async, 30 * 1000, getLogWriter());
+      ThreadUtils.join(async, 30 * 1000);
     }
     if (async.exceptionOccurred()) {
-      fail("async failed", async.getException());
+      com.gemstone.gemfire.test.dunit.Assert.fail("async failed", async.getException());
     }
     if (asyncGII.exceptionOccurred()) {
-      fail("asyncGII failed", asyncGII.getException());
+      com.gemstone.gemfire.test.dunit.Assert.fail("asyncGII failed", asyncGII.getException());
     }
 
     // Locally destroy the region in vm0 so we know that they are not found by
@@ -4472,7 +4477,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           region.localDestroyRegion();
         }
       });
-    getLogWriter().info("after localDestroyRegion");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("after localDestroyRegion");
 
       // invoke repeating so noack regions wait for all updates to get processed
     vm2.invokeRepeatingIfNecessary(new CacheSerializableRunnable("Verify entryCount") {
@@ -4495,7 +4500,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           if (entriesDumped) return;
           entriesDumped = true;
 
-          LogWriter logger = getLogWriter();
+          LogWriter logger = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
           logger.info("DUMPING Entries with values in VM that should have been destroyed:");
           for (int i = 5; i < NB1_NUM_ENTRIES; i += 6) {
             try {
@@ -4508,7 +4513,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         }
     }, 5000);
-    getLogWriter().info("after verify entryCount");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("after verify entryCount");
 
 
     vm2.invoke(new CacheSerializableRunnable("Verify keys/values & Nonblocking") {
@@ -4562,7 +4567,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               }
             }
           }
-          getLogWriter().info(name + ": " + numConcurrent + " entries out of " + entryCount +
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": " + numConcurrent + " entries out of " + entryCount +
                               " were updated concurrently with getInitialImage");
           // make sure at least some of them were concurrent
           if (region.getAttributes().getScope().isGlobal()) {
@@ -4578,7 +4583,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         }
       });
-    getLogWriter().info("after verify key/values");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("after verify key/values");
   }
 
   /**
@@ -4690,7 +4695,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             return "replicate count never reached " + expectedProfiles;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 100 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 100 * 1000, 200, true);
 
         // operate on every odd entry with different value, alternating between
         // updates, invalidates, and destroys. These operations are likely
@@ -4768,30 +4773,30 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
 
     if (!getRegionAttributes().getScope().isGlobal()) {
       // wait for nonblocking operations to complete
-      DistributedTestCase.join(async, 30 * 1000, getLogWriter());
+      ThreadUtils.join(async, 30 * 1000);
 
       vm2.invoke(new SerializableRunnable("Set fast image processing") {
           public void run() {
             com.gemstone.gemfire.internal.cache.InitialImageOperation.slowImageProcessing = 0;
           }
         });
-      getLogWriter().info("after async nonblocking ops complete");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("after async nonblocking ops complete");
     }
 
     // wait for GII to complete
-    DistributedTestCase.join(asyncGII, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyncGII, 30 * 1000);
     final long iiComplete = System.currentTimeMillis();
-    getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
     if (getRegionAttributes().getScope().isGlobal()) {
       // wait for nonblocking operations to complete
-      DistributedTestCase.join(async, 30 * 1000, getLogWriter());
+      ThreadUtils.join(async, 30 * 1000);
     }
 
     if (async.exceptionOccurred()) {
-      fail("async failed", async.getException());
+      com.gemstone.gemfire.test.dunit.Assert.fail("async failed", async.getException());
     }
     if (asyncGII.exceptionOccurred()) {
-      fail("asyncGII failed", asyncGII.getException());
+      com.gemstone.gemfire.test.dunit.Assert.fail("asyncGII failed", asyncGII.getException());
     }
 
     // Locally destroy the region in vm0 so we know that they are not found by
@@ -4825,7 +4830,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           if (entriesDumped) return;
           entriesDumped = true;
 
-          LogWriter logger = getLogWriter();
+          LogWriter logger = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
           logger.info("DUMPING Entries with values in VM that should have been destroyed:");
           for (int i = 5; i < NB1_NUM_ENTRIES; i += 6) {
             logger.info(i + "-->" +
@@ -4886,7 +4891,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               }
             }
           }
-          getLogWriter().info(name + ": " + numConcurrent + " entries out of " + entryCount +
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": " + numConcurrent + " entries out of " + entryCount +
                               " were updated concurrently with getInitialImage");
           // make sure at least some of them were concurrent
           {
@@ -4999,7 +5004,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             return "profile count never reached " + expectedProfiles;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 30 * 1000, 200, true);
 
         // operate on every odd entry with different value, alternating between
         // updates, invalidates, and destroys. These operations are likely
@@ -5081,7 +5086,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     if (!getRegionAttributes().getScope().isGlobal()) {
       // wait for nonblocking operations to complete
       try {
-        DistributedTestCase.join(async, 30 * 1000, getLogWriter());
+        ThreadUtils.join(async, 30 * 1000);
       } finally {
         vm2.invoke(new SerializableRunnable("Set fast image processing") {
           public void run() {
@@ -5089,16 +5094,16 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         });
       }
-      getLogWriter().info("after async nonblocking ops complete");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("after async nonblocking ops complete");
     }
 
     // wait for GII to complete
-    DistributedTestCase.join(asyncGII, 30 * 1000, getLogWriter());
+    ThreadUtils.join(asyncGII, 30 * 1000);
     final long iiComplete = System.currentTimeMillis();
-    getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
     if (getRegionAttributes().getScope().isGlobal()) {
       // wait for nonblocking operations to complete
-      DistributedTestCase.join(async, 30 * 1000, getLogWriter());
+      ThreadUtils.join(async, 30 * 1000);
     }
     if (asyncGII.exceptionOccurred()) {
       throw new Error("asyncGII failed", asyncGII.getException());
@@ -5139,7 +5144,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           if (entriesDumped) return;
           entriesDumped = true;
 
-          LogWriter logger = getLogWriter();
+          LogWriter logger = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
           logger.info("DUMPING Entries with values in VM that should have been destroyed:");
           for (int i = 5; i < NB1_NUM_ENTRIES; i += 6) {
             logger.info(i + "-->" +
@@ -5205,7 +5210,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               }
             }
           }
-          getLogWriter().info(name + ": " + numConcurrent + " entries out of " + entryCount +
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": " + numConcurrent + " entries out of " + entryCount +
                               " were updated concurrently with getInitialImage");
 
           // [sumedh] Occasionally fails. Do these assertions really make sense?
@@ -5279,7 +5284,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     // start asynchronous process that does updates to the data
     AsyncInvocation async = vm0.invokeAsync(new CacheSerializableRunnable("Do Nonblocking Operations") {
       public void run2() throws CacheException {
-        pause(200); // give the gii guy a chance to start
+        Wait.pause(200); // give the gii guy a chance to start
         Region region =
           getRootRegion().getSubregion(name);
 
@@ -5296,7 +5301,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             return "profile count never became exactly " + expectedProfiles;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 60 * 1000, 200, true);
 
         // since we want to force a GII while updates are flying, make sure
         // the other VM gets its CreateRegionResponse and starts its GII
@@ -5365,7 +5370,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       }
     });
     
-    ExpectedException ex = addExpectedException("RegionDestroyedException");
+    IgnoredException ex = IgnoredException.addIgnoredException("RegionDestroyedException");
     try {
     // in the meantime, do the get initial image in vm2
     AsyncInvocation asyncGII = vm2.invokeAsync(new CacheSerializableRunnable("Create Mirrored Region") {
@@ -5404,9 +5409,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       });
     if (getRegionAttributes().getScope().isGlobal()) {
       // wait for nonblocking operations to complete
-      DistributedTestCase.join(async, 30 * 1000, getLogWriter());
+      ThreadUtils.join(async, 30 * 1000);
       if (async.exceptionOccurred()) {
-        fail("async invocation failed", async.getException());
+        com.gemstone.gemfire.test.dunit.Assert.fail("async invocation failed", async.getException());
       }
 
       vm2.invoke(new SerializableRunnable("Set fast image processing") {
@@ -5414,22 +5419,22 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             com.gemstone.gemfire.internal.cache.InitialImageOperation.slowImageProcessing = 0;
           }
         });
-      getLogWriter().info("after async nonblocking ops complete");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("after async nonblocking ops complete");
     }
 
     // wait for GII to complete
     //getLogWriter().info("starting wait for GetInitialImage Completion");
-    DistributedTestCase.join(asyncGII, 30 * 1000, getLogWriter());
-    getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
+    ThreadUtils.join(asyncGII, 30 * 1000);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Complete GetInitialImage at: " + System.currentTimeMillis());
     if (getRegionAttributes().getScope().isGlobal()) {
       // wait for nonblocking operations to complete
-      DistributedTestCase.join(async, 30 * 1000, getLogWriter());
+      ThreadUtils.join(async, 30 * 1000);
     }
     if (async.exceptionOccurred()) {
-      fail("async failed", async.getException());
+      com.gemstone.gemfire.test.dunit.Assert.fail("async failed", async.getException());
     }
     if (asyncGII.exceptionOccurred()) {
-      fail("asyncGII failed", asyncGII.getException());
+      com.gemstone.gemfire.test.dunit.Assert.fail("asyncGII failed", asyncGII.getException());
     }
     } finally { 
       ex.remove();
@@ -5460,10 +5465,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
 
-    getLogWriter().info(name + ": before creates");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": before creates");
     vm0.invoke(create);
     vm1.invoke(create);
-    getLogWriter().info(name + ": after creates");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": after creates");
 
     final Object key = "KEY";
     final Object key2 = "KEY2";
@@ -5476,7 +5481,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           disconnectFromDS();
         }
       });
-    getLogWriter().info(name + ": after vm2 disconnect");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": after vm2 disconnect");
 
     try {
     vm0.invoke(new CacheSerializableRunnable("Put int") {
@@ -5485,7 +5490,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           IntWrapper.IntWrapperSerializer serializer =
             (IntWrapper.IntWrapperSerializer)
             DataSerializer.register(c);
-          getLogWriter().info("Registered serializer id:" + serializer.getId()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Registered serializer id:" + serializer.getId()
               + " class:" + c.getName());
 
           Region region = getRootRegion().getSubregion(name);
@@ -5495,7 +5500,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           assertTrue(serializer.wasInvoked);
         }
       });
-    getLogWriter().info(name + ": after vm0 put");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": after vm0 put");
 
     SerializableRunnable get = new CacheSerializableRunnable("Get int") {
         public void run2() throws CacheException {
@@ -5521,7 +5526,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                 return "DataSerializer with id 120 was never registered";
               }
             };
-            DistributedTestCase.waitForCriterion(ev, 30 * 1000, 10, true);
+            Wait.waitForCriterion(ev, 30 * 1000, 10, true);
           } finally {
             InternalDataSerializer.GetMarker.WAIT_MS = savVal;
           }
@@ -5532,7 +5537,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         }
       };
     vm1.invoke(get);
-    getLogWriter().info(name + ": after vm1 get");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": after vm1 get");
 
     // Make sure that VMs that connect after registration can get the
     // serializer
@@ -5546,7 +5551,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         }
       });
     vm2.invoke(create);
-    getLogWriter().info(name + ": after vm2 create");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": after vm2 create");
     vm2.invoke(new CacheSerializableRunnable("Put long") {
         public void run2() throws CacheException {
           Region region = getRootRegion().getSubregion(name);
@@ -5560,9 +5565,9 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           assertTrue(serializer.wasInvoked);
         }
       });
-    getLogWriter().info(name + ": after vm2 put");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": after vm2 put");
     vm2.invoke(get);
-    getLogWriter().info(name + ": after vm2 get");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": after vm2 get");
 
     SerializableRunnable get2 = new CacheSerializableRunnable("Get long") {
         public void run2() throws CacheException {
@@ -5574,17 +5579,17 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         }
       };
     vm0.invoke(get2);
-    getLogWriter().info(name + ": after vm0 get2");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": after vm0 get2");
     vm1.invoke(get2);
-    getLogWriter().info(name + ": after vm1 get2");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": after vm1 get2");
 
     // wait a little while for other netsearch requests to return
     // before unregistering the serializers that will be needed to process these
     // responses.
     } finally {
-    pause(1500);
+    Wait.pause(1500);
     unregisterAllSerializers();
-    getLogWriter().info(name + ": after unregister");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(name + ": after unregister");
     }
   }
 
@@ -5696,7 +5701,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
     // wait a little while for other netsearch requests to return
     // before unregistering the serializers that will be needed to process these
     // responses.
-    pause(1500);
+    Wait.pause(1500);
     unregisterAllSerializers();
     }
   }
@@ -5707,7 +5712,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
    * system.
    */
   private static void unregisterAllSerializers() {
-    unregisterAllDataSerializersFromAllVms();
+    DistributedTestUtils.unregisterAllDataSerializersFromAllVms();
     cleanupAllVms();
   }
 
@@ -6005,7 +6010,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         return "waiting for entry event";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     EntryEvent listenEvent = cdcl.getEntryEvent();
     assertNotNull("Cannot assert TX CacheListener Events with a null Entry Event", listenEvent);
     assertEquals(re, listenEvent.getRegion());
@@ -6071,7 +6076,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXSimpleOps: Created region");
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -6088,7 +6093,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXSimpleOps: Created Key");
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -6126,10 +6131,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         assertEquals(commitWaits, dmStats.getCommitWaits());
       }
       getSystem().getLogWriter().info("testTXSimpleOps: Create/Put Value");
-      invokeInEveryVM(MultiVMRegionTestCase.class,
+      Invoke.invokeInEveryVM(MultiVMRegionTestCase.class,
                       "assertCacheCallbackEvents",
                       new Object[] {rgnName, txId, "key", null, "value"});
-      invokeInEveryVMRepeatingIfNecessary(new CacheSerializableRunnable("testTXSimpleOps: Verify Received Value") {
+      Invoke.invokeInEveryVMRepeatingIfNecessary(new CacheSerializableRunnable("testTXSimpleOps: Verify Received Value") {
         public void run2() {
           Region rgn1 = getRootRegion().getSubregion(rgnName);
           assertNotNull("Could not find entry for 'key'", rgn1.getEntry("key"));
@@ -6172,17 +6177,17 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           cdcL.assertCount(0, 1, 0, 0);
 
         }
-      });
+      }, getRepeatTimeoutMs());
 
       txMgr.begin();
       rgn.put("key", "value2");
       txId = txMgr.getTransactionId();
       txMgr.commit();
       getSystem().getLogWriter().info("testTXSimpleOps: Put(update) Value2");
-      invokeInEveryVM(MultiVMRegionTestCase.class,
+      Invoke.invokeInEveryVM(MultiVMRegionTestCase.class,
                       "assertCacheCallbackEvents",
                       new Object[] {rgnName, txId, "key", "value", "value2"});
-      invokeInEveryVMRepeatingIfNecessary(new CacheSerializableRunnable("testTXSimpleOps: Verify Received Value") {
+      Invoke.invokeInEveryVMRepeatingIfNecessary(new CacheSerializableRunnable("testTXSimpleOps: Verify Received Value") {
         public void run2() {
           Region rgn1 = getRootRegion().getSubregion(rgnName);
           assertNotNull("Could not find entry for 'key'", rgn1.getEntry("key"));
@@ -6215,7 +6220,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           cdcL.assertCount(0, 2, 0, 0);
 
         }
-      });
+      }, getRepeatTimeoutMs());
 
       txMgr.begin();
       rgn.invalidate("key");
@@ -6223,10 +6228,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       txMgr.commit();
       getSystem().getLogWriter().info("testTXSimpleOps: invalidate key");
       // validate each of the CacheListeners EntryEvents
-      invokeInEveryVM(MultiVMRegionTestCase.class,
+      Invoke.invokeInEveryVM(MultiVMRegionTestCase.class,
                       "assertCacheCallbackEvents",
                       new Object[] {rgnName, txId, "key", "value2", null});
-      invokeInEveryVMRepeatingIfNecessary(new CacheSerializableRunnable("testTXSimpleOps: Verify Received Value") {
+      Invoke.invokeInEveryVMRepeatingIfNecessary(new CacheSerializableRunnable("testTXSimpleOps: Verify Received Value") {
         public void run2() {
           Region rgn1 = getRootRegion().getSubregion(rgnName);
           assertNotNull("Could not find entry for 'key'", rgn1.getEntry("key"));
@@ -6260,7 +6265,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           cdcL.assertCount(0, 2, 1, 0);
 
         }
-      });
+      }, getRepeatTimeoutMs());
 
       txMgr.begin();
       rgn.destroy("key");
@@ -6268,10 +6273,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       txMgr.commit();
       getSystem().getLogWriter().info("testTXSimpleOps: destroy key");
       // validate each of the CacheListeners EntryEvents
-      invokeInEveryVM(MultiVMRegionTestCase.class,
+      Invoke.invokeInEveryVM(MultiVMRegionTestCase.class,
                       "assertCacheCallbackEvents",
                       new Object[] {rgnName, txId, "key", null, null});
-      invokeInEveryVMRepeatingIfNecessary(new CacheSerializableRunnable("testTXSimpleOps: Verify Received Value") {
+      Invoke.invokeInEveryVMRepeatingIfNecessary(new CacheSerializableRunnable("testTXSimpleOps: Verify Received Value") {
         public void run2() {
           Region rgn1 = getRootRegion().getSubregion(rgnName);
           assertTrue(!rgn1.containsKey("key"));
@@ -6302,7 +6307,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             (CountingDistCacheListener) rgn1.getAttributes().getCacheListeners()[0];
           cdcL.assertCount(0, 2, 1, 1);
         }
-      });
+      }, getRepeatTimeoutMs());
 
     }
     catch(Exception e) {
@@ -6371,7 +6376,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           flushIfNecessary(rgn);
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -6593,7 +6598,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created region1");
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -6605,7 +6610,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created key");
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -6620,7 +6625,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created region2");
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -6633,7 +6638,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created Key");
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -6648,7 +6653,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created Region");
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -6661,7 +6666,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           getSystem().getLogWriter().info("testTXMultiRegion: Created Key");
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -7334,7 +7339,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           createRegion(rgnName, rgnAtts.create());
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -7350,7 +7355,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           createRegion(rgnName, rgnAtts.create());
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
@@ -7474,12 +7479,12 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", e);
         }
       }
     };
 
-    invokeInEveryVM(create);
+    Invoke.invokeInEveryVM(create);
     // make sure each op sequence has the correct affect transaction event
     // check C + C -> EX
     // check C + P -> C
@@ -7504,7 +7509,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       txMgr.commit();
       // Make sure commit did not trigger callbacks
       //// callbackVal.reAssert();
-      invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: C+P->C") {
+      Invoke.invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: C+P->C") {
           public void run2() {
             Region rgn1 = getRootRegion().getSubregion(rgnName);
             CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
@@ -7581,7 +7586,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       assertTrue(!rgn.containsValueForKey("key"));
       txMgr.commit();
       //// callbackVal.reAssert();
-      invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: C+DI->C (invalid value)") {
+      Invoke.invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: C+DI->C (invalid value)") {
           public void run2() {
             Region rgn1 = getRootRegion().getSubregion(rgnName);
             CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
@@ -7651,7 +7656,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       assertTrue(rgn.containsKey("key"));
       assertTrue(!rgn.containsValueForKey("key"));
       txMgr.commit();
-      invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: committed LI + TX DI-> NOOP") {
+      Invoke.invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: committed LI + TX DI-> NOOP") {
           public void run2() {
             Region rgn1 = getRootRegion().getSubregion(rgnName);
             CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
@@ -7681,7 +7686,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         assertTrue(rgn.containsKey("key"));
         assertTrue(!rgn.containsValueForKey("key"));
         txMgr.commit();
-        invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: TX LI + TX DI -> LI") {
+        Invoke.invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: TX LI + TX DI -> LI") {
             public void run2() {
               Region rgn1 = getRootRegion().getSubregion(rgnName);
               CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
@@ -7733,7 +7738,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       assertTrue(!rgn.containsKey("key"));
       txMgr.commit();
       //// callbackVal.reAssert();
-      invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: C+DD->DD") {
+      Invoke.invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: C+DD->DD") {
           public void run2() {
             Region rgn1 = getRootRegion().getSubregion(rgnName);
             CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
@@ -7769,13 +7774,13 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       // Check C + LI -> C
       if (!getRegionAttributes().getDataPolicy().withReplication()) {
         // assume that remote regions have same mirror type as local
-        invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: C+LI-> entry creation") {
+        Invoke.invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: C+LI-> entry creation") {
             public void run2() {
               Region rgn1 = getRootRegion().getSubregion(rgnName);
               try {
                 rgn1.create("key", null);
               } catch (CacheException e) {
-                fail("While creating key", e);
+                com.gemstone.gemfire.test.dunit.Assert.fail("While creating key", e);
               }
             }
           });
@@ -7791,7 +7796,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       assertTrue(!rgn.containsValueForKey("key"));
       txMgr.commit();
       //// callbackVal.reAssert();
-      invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: C+LI->C (with value)") {
+      Invoke.invokeInEveryVM(new CacheSerializableRunnable("testTXAlgebra: check: C+LI->C (with value)") {
           public void run2() {
             Region rgn1 = getRootRegion().getSubregion(rgnName);
             CacheTransactionManager txMgr2 = getCache().getCacheTransactionManager();
@@ -7960,11 +7965,11 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               try {
                 bridge.start();
               } catch (IOException ex) {
-                fail("While creating bridge", ex);
+                com.gemstone.gemfire.test.dunit.Assert.fail("While creating bridge", ex);
               }
             }
           } catch (CacheException ex) {
-            fail("While creating region", ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
           }
         }
       };
@@ -8047,7 +8052,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             RegionFactory f = getCache().createRegionFactory(getRegionAttributes());
             CCRegion = (LocalRegion)f.create(name);
           } catch (CacheException ex) {
-            fail("While creating region", ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
           }
         }
       };
@@ -8065,7 +8070,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             assertTrue("expected some event conflation", events>0);
           }
         } catch (CacheException e) {
-          fail("while performing concurrent operations", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
         }
 //        } catch (InterruptedException e) {
 //          fail("someone interrupted my sleep");
@@ -8126,7 +8131,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             long events = CCRegion.getCachePerfStats().getDeltaFailedUpdates();
             assertTrue("expected some failed deltas", events>0);
           } catch (CacheException e) {
-            fail("while performing concurrent operations", e);
+            com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
           }
         }
       };
@@ -8170,11 +8175,11 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
       long start = System.currentTimeMillis();
       RegionVersionVector vm0vv = getVersionVector(vm0);
       long end = System.currentTimeMillis();
-      getLogWriter().info("version vector transmission took " + (end-start) + " ms");
-      getLogWriter().info("vm0 vector = " + vm0vv);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("version vector transmission took " + (end-start) + " ms");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("vm0 vector = " + vm0vv);
   
-      RegionVersionVector vm1vv = getVersionVector(vm1);    getLogWriter().info("vm1 vector = " + vm1vv);
-      RegionVersionVector vm2vv = getVersionVector(vm2);    getLogWriter().info("vm2 vector = " + vm2vv);
+      RegionVersionVector vm1vv = getVersionVector(vm1);    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("vm1 vector = " + vm1vv);
+      RegionVersionVector vm2vv = getVersionVector(vm2);    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("vm2 vector = " + vm2vv);
       
       Map<String, VersionTag> vm0Versions = (Map<String, VersionTag>)vm0.invoke(this.getClass(), "getCCRegionVersions");
       Map<String, VersionTag> vm1Versions = (Map<String, VersionTag>)vm1.invoke(this.getClass(), "getCCRegionVersions");
@@ -8235,7 +8240,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           boolean includeClear = true;
           doOpsLoop(msToRun, includeClear);
         } catch (CacheException e) {
-          fail("while performing concurrent operations", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
         }
       }
     };
@@ -8250,7 +8255,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           f.setDiskSynchronous(syncDiskWrite);
           CCRegion = (LocalRegion)f.create(name);
         } catch (CacheException ex) {
-          fail("While creating region", ex);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
         }
       }
     };
@@ -8349,7 +8354,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             }
             CCRegion = (LocalRegion)f.create(name);
           } catch (CacheException ex) {
-            fail("While creating region", ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
           }
         }
       };
@@ -8365,7 +8370,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         try {
           doOpsLoop(5000, true);
         } catch (CacheException e) {
-          fail("while performing concurrent operations", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
         }
 //        } catch (InterruptedException e) {
 //          fail("someone interrupted my sleep");
@@ -8460,7 +8465,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               sendSerialMessageToAll(); // flush the ops
             }
           } catch (CacheException ex) {
-            fail("While creating region", ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
           }
         }
       };
@@ -8483,7 +8488,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               sendSerialMessageToAll(); // flush the ops
             }
           } catch (CacheException e) {
-            fail("while performing destroy operations", e);
+            com.gemstone.gemfire.test.dunit.Assert.fail("while performing destroy operations", e);
           }
 //          OSProcess.printStacks(0, getLogWriter(), false);
         }
@@ -8507,10 +8512,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             }
           };
           try {
-            waitForCriterion(waitForExpiration, TombstoneService.REPLICATED_TOMBSTONE_TIMEOUT+10000, 1000, true);
+            Wait.waitForCriterion(waitForExpiration, TombstoneService.REPLICATED_TOMBSTONE_TIMEOUT+10000, 1000, true);
           } catch (AssertionFailedError e) {
             CCRegion.dumpBackingMap();
-            getLogWriter().info("tombstone service state: " + CCRegion.getCache().getTombstoneService());
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("tombstone service state: " + CCRegion.getCache().getTombstoneService());
             throw e;
           }
         }
@@ -8539,7 +8544,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
                   + " tombstones left out of " + origCount + " initial tombstones";
               }
             };
-            waitForCriterion(waitForExpiration, TombstoneService.REPLICATED_TOMBSTONE_TIMEOUT+10000, 1000, true);
+            Wait.waitForCriterion(waitForExpiration, TombstoneService.REPLICATED_TOMBSTONE_TIMEOUT+10000, 1000, true);
             logger.debug("creating tombstones.  current count={}", CCRegion.getTombstoneCount());
             for (int i=0; i<numEntries; i++) {
               CCRegion.create("cckey" + i, i);
@@ -8562,10 +8567,10 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             }
           } catch (AssertionFailedError e) {
             CCRegion.dumpBackingMap();
-            getLogWriter().info("tombstone service state: " + CCRegion.getCache().getTombstoneService());
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("tombstone service state: " + CCRegion.getCache().getTombstoneService());
             throw e;
           } catch (CacheException e) {
-            fail("while performing create/destroy operations", e);
+            com.gemstone.gemfire.test.dunit.Assert.fail("while performing create/destroy operations", e);
           } finally {
             TombstoneService.GC_MEMORY_THRESHOLD = oldLimit;
           }
@@ -8600,7 +8605,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
               sendSerialMessageToAll(); // flush the ops
             }
           } catch (CacheException e) {
-            fail("while performing create operations", e);
+            com.gemstone.gemfire.test.dunit.Assert.fail("while performing create operations", e);
           }
         }
       });
@@ -8680,7 +8685,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             }
             CCRegion = (LocalRegion)f.create(name);
           } catch (CacheException ex) {
-            fail("While creating region", ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
           }
         }
       };
@@ -8700,7 +8705,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             assertTrue("expected some event conflation", events>0);
           }
         } catch (CacheException e) {
-          fail("while performing concurrent operations", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
         }
       }
     };
@@ -8861,7 +8866,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             }
             CCRegion = (LocalRegion)f.create(name);
           } catch (CacheException ex) {
-            fail("While creating region", ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
           }
         }
       };
@@ -8881,7 +8886,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
             assertTrue("expected some event conflation", events>0);
           }
         } catch (CacheException e) {
-          fail("while performing concurrent operations", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("while performing concurrent operations", e);
         }
       }
     };
@@ -8941,7 +8946,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           public void close() {
           }
           public Object load(LoaderHelper helper) throws CacheLoaderException {
-            getLogWriter().info("The test CacheLoader has been invoked for key '" + helper.getKey() + "'");
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("The test CacheLoader has been invoked for key '" + helper.getKey() + "'");
             return "loadedValue";
           }
         });
@@ -8999,7 +9004,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
           }
           CCRegion = (LocalRegion)f.create(regionName);
         } catch (CacheException ex) {
-          fail("While creating region", ex);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While creating region", ex);
         }
       }
     };
@@ -9029,7 +9034,7 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         for (int i=0; i<100; i++) {
           RegionEntry entry = r.getRegionEntry("cckey"+i);
           int stamp = entry.getVersionStamp().getEntryVersion();
-          getLogWriter().info("checking key cckey" + i + " having version " + stamp + " entry=" + entry);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("checking key cckey" + i + " having version " + stamp + " entry=" + entry);
           assertEquals(2, stamp);
           assertEquals(result.get("cckey"+i), i+1);
         }
@@ -9052,12 +9057,21 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
         }
       }
       if (!failed) {
-        fail("asyncInvocation 0 returned exception", e);
+        com.gemstone.gemfire.test.dunit.Assert.fail("asyncInvocation 0 returned exception", e);
       }
     }
     return failed;
   }
   
+  /**
+   * The number of milliseconds to try repeating validation code in the
+   * event that AssertionFailedError is thrown.  For ACK scopes, no
+   * repeat should be necessary.
+   */
+  protected long getRepeatTimeoutMs() {
+    return 0;
+  }
+  
   public static void assertNoClearTimeouts() {
     // if there are timeouts on waiting for version vector dominance then
     // some operation was not properly recorded in the VM throwing this
@@ -9087,13 +9101,13 @@ public abstract class MultiVMRegionTestCase extends RegionTestCase {
   public static byte[] getCCRegionVersionVector() throws Exception {
     Object id = getMemberId();
     int vm = VM.getCurrentVMNum();
-    getLogWriter().info("vm" + vm + " with id " + id + " copying " + CCRegion.getVersionVector().fullToString());
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("vm" + vm + " with id " + id + " copying " + CCRegion.getVersionVector().fullToString());
     RegionVersionVector vector = CCRegion.getVersionVector().getCloneForTransmission();
-    getLogWriter().info("clone is " + vector);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("clone is " + vector);
     HeapDataOutputStream dos = new HeapDataOutputStream(3000, Version.CURRENT);
     DataSerializer.writeObject(vector, dos);
     byte[] bytes = dos.toByteArray();
-    getLogWriter().info("serialized size is " + bytes.length);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("serialized size is " + bytes.length);
     return bytes;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/OffHeapLRUEvictionControllerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/OffHeapLRUEvictionControllerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/OffHeapLRUEvictionControllerDUnitTest.java
index 8f3573b..0caa7bd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/OffHeapLRUEvictionControllerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/OffHeapLRUEvictionControllerDUnitTest.java
@@ -23,6 +23,7 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceType;
 import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -40,7 +41,7 @@ public class OffHeapLRUEvictionControllerDUnitTest extends
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -50,12 +51,8 @@ public class OffHeapLRUEvictionControllerDUnitTest extends
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PRBucketSynchronizationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PRBucketSynchronizationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PRBucketSynchronizationDUnitTest.java
index c81a55f..a68bd46 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PRBucketSynchronizationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PRBucketSynchronizationDUnitTest.java
@@ -40,9 +40,14 @@ import com.gemstone.gemfire.internal.cache.VMCachedDeserializable;
 import com.gemstone.gemfire.internal.cache.versions.VMVersionTag;
 import com.gemstone.gemfire.internal.cache.versions.VersionSource;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * concurrency-control tests for client/server
@@ -80,8 +85,8 @@ public class PRBucketSynchronizationDUnitTest extends CacheTestCase {
    * did not see secondary buckets perform a delta-GII.
    */
   public void doBucketsSyncOnPrimaryLoss(TestType typeOfTest) {
-    addExpectedException("killing member's ds");
-    addExpectedException("killing member's ds");
+    IgnoredException.addIgnoredException("killing member's ds");
+    IgnoredException.addIgnoredException("killing member's ds");
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -127,7 +132,7 @@ public class PRBucketSynchronizationDUnitTest extends CacheTestCase {
       // Now we crash the primary bucket owner simulating death during distribution.
       // The backup buckets should perform a delta-GII for the lost member and
       // get back in sync
-      crashDistributedSystem(primaryOwner);
+      DistributedTestUtils.crashDistributedSystem(primaryOwner);
   
       for (VM vm: verifyVMs) {
         verifySynchronized(vm, primaryID);
@@ -195,7 +200,7 @@ public class PRBucketSynchronizationDUnitTest extends CacheTestCase {
         tag.setEntryVersion(1);
         tag.setIsRemoteForTesting();
         EntryEventImpl event = EntryEventImpl.create(bucket, Operation.CREATE, "Object3", true, primary, true, false);        
-        getLogWriter().info("applying this event to the cache: " + event);
+        LogWriterUtils.getLogWriter().info("applying this event to the cache: " + event);
         event.setNewValue(new VMCachedDeserializable("value3", 12));
         event.setVersionTag(tag);
         bucket.getRegionMap().basicPut(event, System.currentTimeMillis(), true, false, null, false, false);
@@ -210,12 +215,12 @@ public class PRBucketSynchronizationDUnitTest extends CacheTestCase {
         event = EntryEventImpl.create(bucket, Operation.CREATE, "Object5", true, primary, true, false);
         event.setNewValue(Token.TOMBSTONE);
         event.setVersionTag(tag);
-        getLogWriter().info("applying this event to the cache: " + event);
+        LogWriterUtils.getLogWriter().info("applying this event to the cache: " + event);
         bucket.getRegionMap().basicPut(event, System.currentTimeMillis(), true, false, null, false, false);
         event.release();
 
         bucket.dumpBackingMap();
-        getLogWriter().info("bucket version vector is now " + bucket.getVersionVector().fullToString());
+        LogWriterUtils.getLogWriter().info("bucket version vector is now " + bucket.getVersionVector().fullToString());
         assertTrue("bucket should hold entry Object3 now", bucket.containsKey("Object3"));
         return true;
       }
@@ -227,17 +232,17 @@ public class PRBucketSynchronizationDUnitTest extends CacheTestCase {
       public Object call() throws Exception {
         PartitionedRegion pr = (PartitionedRegion)TestRegion;
         final BucketRegion bucket = pr.getDataStore().getLocalBucketById(0);
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           String waitingFor = "primary is still in membership view: " + crashedMember;
           boolean dumped = false;
           public boolean done() {
             if (TestRegion.getCache().getDistributionManager().isCurrentMember(crashedMember)) {
-              getLogWriter().info(waitingFor);
+              LogWriterUtils.getLogWriter().info(waitingFor);
               return false;
             }
             if (!TestRegion.containsKey("Object3")) {
               waitingFor = "entry for Object3 not found";
-              getLogWriter().info(waitingFor);
+              LogWriterUtils.getLogWriter().info(waitingFor);
               return false;
             }
             RegionEntry re = bucket.getRegionMap().getEntry("Object5");
@@ -247,7 +252,7 @@ public class PRBucketSynchronizationDUnitTest extends CacheTestCase {
                 bucket.dumpBackingMap();
               }
               waitingFor = "entry for Object5 not found";
-              getLogWriter().info(waitingFor);
+              LogWriterUtils.getLogWriter().info(waitingFor);
               return false;
             }
             if (!re.isTombstone()) {
@@ -256,7 +261,7 @@ public class PRBucketSynchronizationDUnitTest extends CacheTestCase {
                 bucket.dumpBackingMap();
               }
               waitingFor = "Object5 is not a tombstone but should be: " + re;
-              getLogWriter().info(waitingFor);
+              LogWriterUtils.getLogWriter().info(waitingFor);
               return false;
             }
             return true;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PartitionedRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PartitionedRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PartitionedRegionDUnitTest.java
index 234203f..0408eb7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PartitionedRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PartitionedRegionDUnitTest.java
@@ -41,6 +41,7 @@ import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionException;
 import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 import com.gemstone.gemfire.internal.logging.PureLogWriter;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
@@ -187,7 +188,7 @@ public class PartitionedRegionDUnitTest extends MultiVMRegionTestCase {
         fact.addCacheListener(new CacheListenerAdapter(){
           @Override
           public void afterInvalidate(EntryEvent event) {
-            getLogWriter().info("afterInvalidate invoked with " + event);
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("afterInvalidate invoked with " + event);
             InvalidateInvoked = true;
           }
         });
@@ -227,7 +228,7 @@ public class PartitionedRegionDUnitTest extends MultiVMRegionTestCase {
             
             createRegion(name, "INCOMPATIBLE_ROOT", getRegionAttributes());
           } catch (CacheException ex) {
-            fail("While creating Partitioned region", ex);
+            Assert.fail("While creating Partitioned region", ex);
           }
         }
       });
@@ -245,7 +246,7 @@ public class PartitionedRegionDUnitTest extends MultiVMRegionTestCase {
             }
 
           } catch (CacheException ex) {
-            fail("While creating Partitioned Region", ex);
+            Assert.fail("While creating Partitioned Region", ex);
           }
         }
       });
@@ -259,7 +260,7 @@ public class PartitionedRegionDUnitTest extends MultiVMRegionTestCase {
           try {
             createRegion(regionName, "root", getRegionAttributes());
           } catch (CacheException ex) {
-            fail("While creating Partitioned region", ex);
+            Assert.fail("While creating Partitioned region", ex);
           }
         }
     };
@@ -293,7 +294,7 @@ public class PartitionedRegionDUnitTest extends MultiVMRegionTestCase {
             }
           }
           catch (Exception ex) {
-            fail("while creating or populating partitioned region", ex);
+            Assert.fail("while creating or populating partitioned region", ex);
           }
           finally {
             if (region != null) {
@@ -360,7 +361,7 @@ public class PartitionedRegionDUnitTest extends MultiVMRegionTestCase {
               }
               catch (Exception ex2) {
               }
-              fail("Unexpected exception", ex);
+              Assert.fail("Unexpected exception", ex);
             }
           }
       });
@@ -496,7 +497,7 @@ public class PartitionedRegionDUnitTest extends MultiVMRegionTestCase {
           try {
             createRegion(regionName, "root", getRegionAttributes());
           } catch (CacheException ex) {
-            fail("While creating Partitioned region", ex);
+            Assert.fail("While creating Partitioned region", ex);
           }
         }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PartitionedRegionOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PartitionedRegionOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PartitionedRegionOffHeapDUnitTest.java
index 455cefc..efd42ad 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PartitionedRegionOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PartitionedRegionOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -37,7 +38,7 @@ public class PartitionedRegionOffHeapDUnitTest extends PartitionedRegionDUnitTes
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -47,12 +48,8 @@ public class PartitionedRegionOffHeapDUnitTest extends PartitionedRegionDUnitTes
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PreloadedRegionTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PreloadedRegionTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PreloadedRegionTestCase.java
index 97bae11..86dfcc7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PreloadedRegionTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PreloadedRegionTestCase.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.DataPolicy;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -62,7 +63,7 @@ public class PreloadedRegionTestCase extends MultiVMRegionTestCase {
           getSystem().getLogWriter().info("testDistributedCreate: Created Region");
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          Assert.fail("While creating region", e);
         }
       }
     };
@@ -76,7 +77,7 @@ public class PreloadedRegionTestCase extends MultiVMRegionTestCase {
           getSystem().getLogWriter().info("testDistributedCReate: Created Key");
         }
         catch (CacheException e) {
-          fail("While creating region", e);
+          Assert.fail("While creating region", e);
         }
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllCallBkRemoteVMDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllCallBkRemoteVMDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllCallBkRemoteVMDUnitTest.java
index ac818f3..caa8333 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllCallBkRemoteVMDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/PutAllCallBkRemoteVMDUnitTest.java
@@ -42,7 +42,10 @@ import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  *
@@ -81,15 +84,16 @@ public class PutAllCallBkRemoteVMDUnitTest extends DistributedTestCase {
       VM vm1 = host.getVM(1);
       vm0.invoke(PutAllCallBkRemoteVMDUnitTest.class, "createCacheForVM0");
       vm1.invoke(PutAllCallBkRemoteVMDUnitTest.class, "createCacheForVM1");
-      getLogWriter().info("Cache created successfully");
+      LogWriterUtils.getLogWriter().info("Cache created successfully");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(PutAllCallBkRemoteVMDUnitTest.class, "closeCache");
-        vm1.invoke(PutAllCallBkRemoteVMDUnitTest.class, "closeCache");
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(PutAllCallBkRemoteVMDUnitTest.class, "closeCache");
+      vm1.invoke(PutAllCallBkRemoteVMDUnitTest.class, "closeCache");
     }
     
     public static synchronized void createCacheForVM0(){
@@ -167,7 +171,7 @@ public class PutAllCallBkRemoteVMDUnitTest extends DistributedTestCase {
                 }catch (Exception ex){
                     throw new RuntimeException("exception putting entries", ex);
                 }
-                getLogWriter().info("****************paperRegion.get(afterCreate)***************"+paperRegion.get("afterCreate"));
+                LogWriterUtils.getLogWriter().info("****************paperRegion.get(afterCreate)***************"+paperRegion.get("afterCreate"));
 
                 WaitCriterion ev = new WaitCriterion() {
                   public boolean done() {
@@ -184,7 +188,7 @@ public class PutAllCallBkRemoteVMDUnitTest extends DistributedTestCase {
                     return "Waiting for event";
                   }
                 };
-                DistributedTestCase.waitForCriterion(ev, 3000, 200, true);
+                Wait.waitForCriterion(ev, 3000, 200, true);
             }
         });
         
@@ -337,9 +341,9 @@ public class PutAllCallBkRemoteVMDUnitTest extends DistributedTestCase {
             if(counter==null) counter = new Integer(1);
             paperRegion.put("afterCreate",new Integer(counter.intValue()+1));
             
-            getLogWriter().info("In afterCreate"+putAllcounter);
+            LogWriterUtils.getLogWriter().info("In afterCreate"+putAllcounter);
             if(putAllcounter == forCreate){
-                getLogWriter().info("performingtrue");
+                LogWriterUtils.getLogWriter().info("performingtrue");
                 afterCreate = true;
             }
             try{
@@ -350,7 +354,7 @@ public class PutAllCallBkRemoteVMDUnitTest extends DistributedTestCase {
                 
             }
             notified = true;
-            getLogWriter().info("*******afterCreate***** Key :"+event.getKey()+ " Value :"+event.getNewValue());
+            LogWriterUtils.getLogWriter().info("*******afterCreate***** Key :"+event.getKey()+ " Value :"+event.getNewValue());
         }
         
         public void afterUpdate(EntryEvent event){
@@ -358,9 +362,9 @@ public class PutAllCallBkRemoteVMDUnitTest extends DistributedTestCase {
             Integer counter = (Integer)paperRegion.get("afterUpdate");
             if(counter==null) counter = new Integer(1);
             paperRegion.put("afterUpdate",new Integer(counter.intValue()+1));
-            getLogWriter().info("In afterUpdate"+afterUpdateputAllcounter);
+            LogWriterUtils.getLogWriter().info("In afterUpdate"+afterUpdateputAllcounter);
             if(afterUpdateputAllcounter == forUpdate){
-                getLogWriter().info("performingtrue afterUpdate");
+                LogWriterUtils.getLogWriter().info("performingtrue afterUpdate");
                 afterUpdate = true;
             }
             try{
@@ -373,7 +377,7 @@ public class PutAllCallBkRemoteVMDUnitTest extends DistributedTestCase {
             
             notified = true;
             
-            getLogWriter().info("*******afterUpdate***** Key :"+event.getKey()+ " Value :"+event.getNewValue());
+            LogWriterUtils.getLogWriter().info("*******afterUpdate***** Key :"+event.getKey()+ " Value :"+event.getNewValue());
             
         }
     }
@@ -383,15 +387,15 @@ public class PutAllCallBkRemoteVMDUnitTest extends DistributedTestCase {
             Integer counter = (Integer)paperRegion.get("beforeCreate");
             if(counter==null) counter = new Integer(1);
             paperRegion.put("beforeCreate",new Integer(counter.intValue()+1));
-            getLogWriter().info("*******BeforeCreate***** event="+event);
+            LogWriterUtils.getLogWriter().info("*******BeforeCreate***** event="+event);
         }
         
         public void beforeUpdate(EntryEvent event) {
             Integer counter = (Integer)paperRegion.get("beforeUpdate");
             if(counter==null) counter = new Integer(1);
             paperRegion.put("beforeUpdate",new Integer(counter.intValue()+1));
-            getLogWriter().info("In beforeUpdate"+beforeUpdateputAllcounter);
-            getLogWriter().info("*******BeforeUpdate***** event="+event);
+            LogWriterUtils.getLogWriter().info("In beforeUpdate"+beforeUpdateputAllcounter);
+            LogWriterUtils.getLogWriter().info("*******BeforeUpdate***** event="+event);
         }
     }
 }// end of test class



[60/62] [abbrv] incubator-geode git commit: Merge branch 'develop' into feature/GEODE-17

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/CommandTestBase.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/CommandTestBase.java
index a7e8139,0000000..3c7b0da
mode 100644,000000..100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/CommandTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/CommandTestBase.java
@@@ -1,602 -1,0 +1,603 @@@
 +package com.gemstone.gemfire.management.internal.security;
 +
 +import java.io.IOException;
 +import java.io.PrintStream;
 +import java.io.PrintWriter;
 +import java.io.StringWriter;
 +import java.net.InetAddress;
 +import java.net.UnknownHostException;
 +import java.util.Map;
 +import java.util.Properties;
 +import java.util.Set;
 +import java.util.regex.Matcher;
 +import java.util.regex.Pattern;
 +
++import com.gemstone.gemfire.management.internal.cli.HeadlessGfsh;
++import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 +import util.TestException;
 +
 +import com.gemstone.gemfire.cache.Cache;
 +import com.gemstone.gemfire.cache30.CacheTestCase;
 +import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 +import com.gemstone.gemfire.internal.AvailablePortHelper;
 +import com.gemstone.gemfire.management.ManagementService;
 +import com.gemstone.gemfire.management.internal.cli.CommandManager;
 +import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 +import com.gemstone.gemfire.management.internal.cli.parser.CommandTarget;
 +import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 +import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
 +import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
 +
- import dunit.Host;
- import dunit.SerializableCallable;
- import dunit.SerializableRunnable;
++import com.gemstone.gemfire.test.dunit.Host;
++import com.gemstone.gemfire.test.dunit.SerializableCallable;
++import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 +
 +/**
 + * Base class for all the CLI/gfsh command dunit tests. Derived from CliCommandTestBase but not using
 + * TestableGfsh but HeadlessGfsh
 + * 
 + * @author Tushar Khairnar
 + */
 +public class CommandTestBase extends CacheTestCase {
 +  
 +  private static final long serialVersionUID = 1L;
 +
 +  protected static final String USE_HTTP_SYSTEM_PROPERTY = "useHTTP";
 +
 +  private ManagementService managementService;
 +  
 +  private transient HeadlessGfsh shell;
 +
 +  private boolean useHttpOnConnect = Boolean.getBoolean("useHTTP");
 +
 +  private int httpPort;
 +  private int jmxPort;
 +
 +  private String jmxHost;
 +  
 +  protected String securityFile = null;
 +  protected String userName = null;
 +  protected String password = null;
 +  protected String commandError = null;
 +  
 +  public CommandTestBase(String name) {
 +    super(name);
 +  }
 +
 +  public void setUp() throws Exception {
 +    super.setUp();
 +  }
 +
 +  @Override
-   public void tearDown2() throws Exception {
++  public void postTearDownCacheTestCase() throws Exception {
 +    destroyDefaultSetup();
-     super.tearDown2();
 +  }
 +  
 +  protected void setUseHttpOnConnect(boolean useHttpOnConnect){
 +    this.useHttpOnConnect = useHttpOnConnect;
 +  }
 +  
 +  /**
 +   * Create all of the components necessary for the default setup. The provided properties will be used when creating
 +   * the default cache. This will create GFSH in the controller VM (VM[4]) (no cache) and the manager in VM[0] (with
 +   * cache). When adding regions, functions, keys, whatever to your cache for tests, you'll need to use
 +   * Host.getHost(0).getVM(0).invoke(new SerializableRunnable() { public void run() { ... } } in order to have this
 +   * setup run in the same VM as the manager.
 +   * <p/>
 +   * @param props the Properties used when creating the cache for this default setup.
 +   * @return the default testable GemFire shell.
 +   */
 +  @SuppressWarnings("serial")
 +  protected final HeadlessGfsh createDefaultSetup(final Properties props) {
 +    Object[] result = (Object[]) Host.getHost(0).getVM(0).invoke(new SerializableCallable() {
 +      public Object call() {
 +        final Object[] result = new Object[3];
 +        final Properties localProps = (props != null ? props : new Properties());
 +
 +        try {
 +          jmxHost = InetAddress.getLocalHost().getHostName();
 +        }
 +        catch (UnknownHostException ignore) {
 +          jmxHost = "localhost";
 +        }
 +
 +        if (!localProps.containsKey(DistributionConfig.NAME_NAME)) {
 +          localProps.setProperty(DistributionConfig.NAME_NAME, "Manager");
 +        }
 +
 +        final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
 +
 +        jmxPort = ports[0];
 +        httpPort = ports[1];
 +
 +        localProps.setProperty(DistributionConfig.JMX_MANAGER_NAME, "true");
 +        localProps.setProperty(DistributionConfig.JMX_MANAGER_START_NAME, "true");
 +        localProps.setProperty(DistributionConfig.JMX_MANAGER_BIND_ADDRESS_NAME, String.valueOf(jmxHost));
 +        localProps.setProperty(DistributionConfig.JMX_MANAGER_PORT_NAME, String.valueOf(jmxPort));
 +        localProps.setProperty(DistributionConfig.HTTP_SERVICE_PORT_NAME, String.valueOf(httpPort));
 +
 +        getSystem(localProps);
 +        verifyManagementServiceStarted(getCache());
 +
 +        result[0] = jmxHost;
 +        result[1] = jmxPort;
 +        result[2] = httpPort;
 +
 +        return result;
 +      }
 +    });
 +
 +    this.jmxHost = (String) result[0];
 +    this.jmxPort = (Integer) result[1];
 +    this.httpPort = (Integer) result[2];
 +
 +    return defaultShellConnect();
 +  }
 +  
 +  protected boolean useHTTPByTest(){
 +    return false;
 +  }
 +
 +  /**
 +   * Destroy all of the components created for the default setup.
 +   */
 +  @SuppressWarnings("serial")
 +  protected final void destroyDefaultSetup() {
 +    if (this.shell != null) {
 +      
 +      if(shell.isConnectedAndReady()){
 +        executeCommand(shell, "disconnect");
 +      }
 +      
 +      // Note: HeadlessGfsh doesn't use Launcher & code that does System.exit()
 +      // for gfsh is in Launcher. This is just to ensure cleanup.
 +      executeCommand(shell, "exit");
 +
 +      this.shell.terminate();
 +      this.shell = null;
 +    }
 +
 +    disconnectAllFromDS();
 +
 +    Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
 +      public void run() {
 +        verifyManagementServiceStopped();
 +      }
 +    });
 +  }
 +
 +  /**
 +   * Start the default management service using the provided Cache.
 +   * 
 +   * @param cache
 +   *          Cache to use when creating the management service
 +   */
 +  private void verifyManagementServiceStarted(Cache cache) {
 +    assert(cache != null);
 +
 +    this.managementService = ManagementService.getExistingManagementService(cache);
 +    assertNotNull(this.managementService);
 +    assertTrue(this.managementService.isManager());
 +    assertTrue(checkIfCommandsAreLoadedOrNot());
 +  }
 +  
 +  public static boolean checkIfCommandsAreLoadedOrNot(){          
 +    CommandManager manager;
 +    try {
 +      manager = CommandManager.getInstance();
 +      Map<String,CommandTarget> commands = manager.getCommands();
 +      Set set = commands.keySet();
 +      if(commands.size() < 1 ){
 +        return false;
 +      }
 +      return true;
 +    } catch (ClassNotFoundException | IOException e) {
 +      throw new RuntimeException("Could not load commands", e);
 +    }                
 +}
 +
 +  /**
 +   * Stop the default management service.
 +   */
 +  private void verifyManagementServiceStopped() {
 +    if (this.managementService != null) {
 +      assertFalse(this.managementService.isManager());
 +      this.managementService = null;
 +    }
 +  }
 +
 +  /**
 +   * Connect the default shell to the default JMX server.
 +   * 
 +   * @return The default shell.
 +   */
 +  protected HeadlessGfsh defaultShellConnect() {
 +    HeadlessGfsh shell = getDefaultShell();
 +    shellConnect(this.jmxHost, this.jmxPort, this.httpPort, shell);
 +    return shell;
 +  }
 +
 +  /**
 +   * Connect a shell to the JMX server at the given host and port
 +   * 
 +   * @param host
 +   *          Host of the JMX server
 +   * @param jmxPort
 +   *          Port of the JMX server
 +   * @param shell
 +   *          Shell to connect
 +   */
 +  protected void shellConnect(final String host, final int jmxPort, final int httpPort, HeadlessGfsh shell) {
 +    assert(host != null);
 +    assert(shell != null);
 +
 +    final CommandStringBuilder command = new CommandStringBuilder(CliStrings.CONNECT);
 +    String endpoint;
 +
 +    if (useHttpOnConnect) {
 +      endpoint = "http://" + host + ":" + httpPort + "/gemfire/v1";
 +      command.addOption(CliStrings.CONNECT__USE_HTTP, Boolean.TRUE.toString());
 +      command.addOption(CliStrings.CONNECT__URL, endpoint);
 +     
 +    }
 +    else {
 +      endpoint = host + "[" + jmxPort + "]";
 +      command.addOption(CliStrings.CONNECT__JMX_MANAGER, endpoint);
 +    }
 +
 +    if(this.securityFile != null){
 +      command.addOption(CliStrings.CONNECT__SECURITY_PROPERTIES, securityFile);
 +    }
 +    
 +    if(this.userName!=null) {
 +      command.addOption(CliStrings.CONNECT__USERNAME, userName);
 +    }
 +    
 +    if(this.password!=null) {
 +      command.addOption(CliStrings.CONNECT__PASSWORD, password);
 +    }
 +
 +    CommandResult result = executeCommand(shell, command.toString());
 +
 +    if (!shell.isConnectedAndReady()) {
 +      throw new TestException("Connect command failed to connect to manager " + endpoint + " result=" + commandResultToString(result));
 +    }
 +
 +    info("Successfully connected to managing node using " + (useHttpOnConnect ? "HTTP" : "JMX"));
 +    assertEquals(true, shell.isConnectedAndReady());
 +  }
 +
 +  /**
 +   * Get the default shell (will create one if it doesn't already exist).
 +   * 
 +   * @return The default shell
 +   */
 +  protected synchronized final HeadlessGfsh getDefaultShell() {
 +    if (this.shell == null) {
 +      this.shell = createShell();
 +    }
 +
 +    return this.shell;
 +  }
 +
 +  /**
 +   * Create a TestableGfsh object.
 +   * 
 +   * @return The created shell.
 +   */
 +  protected HeadlessGfsh createShell() {
 +    try {
 +      Gfsh.SUPPORT_MUTLIPLESHELL = true;
 +      String shellId = getClass().getSimpleName()+"_"+getName();
 +      HeadlessGfsh shell = new HeadlessGfsh(shellId, 300);
 +      
 +      //Various command use ThreadLocal instance. following call makes
 +      //HeadlessGfsh available to all the commands
 +      shell.setThreadLocalInstance();
 +      
 +      //Added to avoid trimming of the columns. Not needed for non-interactive shell
 +      //shell.setEnvProperty(Gfsh.ENV_APP_RESULT_VIEWER, "non-basic");
 +      //shell.start();
 +      info("Started headless shell: " + shell);
 +      return shell;
 +    } catch (ClassNotFoundException e) {
 +      throw new TestException(getStackTrace(e));
 +    } catch (IOException e) {
 +      throw new TestException(getStackTrace(e));
 +    }
 +  }
 +
 +  /**
 +   * Execute a command using the default shell and clear the shell events before returning.
 +   * 
 +   * @param command
 +   *          Command to execute
 +   * @return The result of the command execution
 +   */
 +  protected CommandResult executeCommand(String command) {
 +    assert(command != null);
 +
 +    return executeCommand(getDefaultShell(), command);
 +  }
 +
 +  /**
 +   * Execute a command in the provided shell and clear the shell events before returning.
 +   * 
 +   * @param shell
 +   *          Shell in which to execute the command.
 +   * @param command
 +   *          Command to execute
 +   * @return The result of the command execution
 +   */
 +  protected CommandResult executeCommand(HeadlessGfsh shell, String command) {
 +    assert(shell != null);
 +    assert(command != null);
 +    CommandResult commandResult = executeCommandWithoutClear(shell, command);
 +    shell.clear();
 +    return commandResult;
 +  }
 +
 +  /**
 +   * Execute a command using the default shell. Useful for getting additional information from the shell after the
 +   * command has been executed (using getDefaultShell().???). Caller is responsible for calling
 +   * getDefaultShell().clearEvents() when done.
 +   * 
 +   * @param command
 +   *          Command to execute
 +   * @return The result of the command execution
 +   */
 +  @SuppressWarnings("unused")
 +  protected CommandResult executeCommandWithoutClear(String command) {
 +    assert(command != null);
 +
 +    return executeCommandWithoutClear(getDefaultShell(), command);
 +  }
 +
 +  /**
 +   * Execute a command in the provided shell. Useful for getting additional information from the shell after the command
 +   * has been executed (using getDefaultShell().???). Caller is responsible for calling getDefaultShell().clearEvents()
 +   * when done.
 +   * 
 +   * @param shell
 +   *          Shell in which to execute the command.
 +   * @param command
 +   *          Command to execute
 +   * @return The result of the command execution
 +   */
 +  protected CommandResult executeCommandWithoutClear(HeadlessGfsh shell, String command) {
 +    assert(shell != null);
 +    assert(command != null);
 +
 +    try {
 +      info("Executing command " + command + " with command Mgr " + CommandManager.getInstance());
 +    } catch (ClassNotFoundException cnfex) {
 +      throw new TestException(getStackTrace(cnfex));
 +    } catch (IOException ioex) {
 +      throw new TestException(getStackTrace(ioex));
 +    }
 +    
 +    Object result =null;
 +    
 +    try {
 +      
 +      boolean status = shell.executeCommand(command);
 +      info("Waiting for result");
 +      result = shell.getResult();
 +      info("Got the Result");
 +      info("CommandExecutionSuccess=" + status );
 +      info("Status=" + shell.getCommandExecutionStatus());
 +      info("ShellOutputString=<" + shell.outputString + ">");
 +      info("ErrorString=<" + shell.getErrorString() + ">");
 +      
 +      if (shell.hasError() || shell.getCommandExecutionStatus() == -1) {
 +        error("executeCommand completed with error : " + shell.getErrorString() + " output=<" + shell.outputString
 +            + ">");
 +      }
 +      
 +      if(result == null || result.equals(HeadlessGfsh.ERROR_RESULT))
 +          return null;
 +      
 +    } catch (InterruptedException e) {
 +      throw new TestException(getStackTrace(e));
 +    }
 +    
 +    return (CommandResult)result;     
 +  }
 +
 +  /**
 +   * Utility method for viewing the results of a command.
 +   * 
 +   * @param commandResult
 +   *          Results to dump
 +   * @param printStream
 +   *          Stream to dump the results to
 +   */
 +  protected void printResult(final CommandResult commandResult, PrintStream printStream) {
 +    assert(commandResult != null);
 +    assert(printStream != null);
 +    commandResult.resetToFirstLine();
 +    printStream.print(commandResultToString(commandResult));
 +  }
 +
 +  protected String commandResultToString(final CommandResult commandResult) {
 +    assertNotNull(commandResult);
 +    commandResult.resetToFirstLine();
 +    StringBuilder buffer = new StringBuilder(commandResult.getHeader());
 +    while (commandResult.hasNextLine()) {
 +      buffer.append(commandResult.nextLine());
 +    }
 +    buffer.append(commandResult.getFooter());
 +    return buffer.toString();
 +  }
 +
 +  /**
 +   * Utility method for finding the CommandResult object in the Map of CommandOutput objects.
 +   * 
 +   * @param commandOutput
 +   *          CommandOutput Map to search
 +   * @return The CommandResult object or null if not found.
 +   */
 +  protected CommandResult extractCommandResult(Map<String, Object> commandOutput) {
 +    assert(commandOutput != null);
 +
 +    for (Object resultObject : commandOutput.values()) {
 +      if (resultObject instanceof CommandResult) {
 +        CommandResult result = (CommandResult) resultObject;
 +        result.resetToFirstLine();
 +        return result;
 +      }
 +    }
 +    return null;
 +  }
 +
 +  /**
 +   * Utility method to determine how many times a string occurs in another string. Note that when looking for matches
 +   * substrings of other matches will be counted as a match. For example, looking for "AA" in the string "AAAA" will
 +   * result in a return value of 3.
 +   * 
 +   * @param stringToSearch
 +   *          String to search
 +   * @param stringToCount
 +   *          String to look for and count
 +   * @return The number of matches.
 +   */
 +  protected int countMatchesInString(final String stringToSearch, final String stringToCount) {
 +    assert(stringToSearch != null);
 +    assert(stringToCount != null);
 +    
 +    int length = stringToSearch.length();
 +    int count = 0;
 +    for (int i = 0; i < length; i++) {
 +      if (stringToSearch.substring(i).startsWith(stringToCount)) {
 +        count++;
 +      }
 +    }
 +    return count;
 +  }
 +
 +  /**
 +   * Determines if a string contains a trimmed line that matches the pattern. So, any single line whose leading and
 +   * trailing spaces have been removed which contains a string that exactly matches the given pattern will be considered
 +   * a match.
 +   * 
 +   * @param stringToSearch
 +   *          String to search
 +   * @param stringPattern
 +   *          Pattern to search for
 +   * @return True if a match is found, false otherwise
 +   */
 +  protected boolean stringContainsLine(final String stringToSearch, final String stringPattern) {
 +    assert(stringToSearch != null);
 +    assert(stringPattern != null);
 +    
 +    Pattern pattern = Pattern.compile("^\\s*" + stringPattern + "\\s*$", Pattern.MULTILINE);
 +    Matcher matcher = pattern.matcher(stringToSearch);
 +    return matcher.find();
 +  }
 +
 +  /**
 +   * Counts the number of distinct lines in a String.
 +   * 
 +   * @param stringToSearch
 +   *          String to search for lines.
 +   * @param countBlankLines
 +   *          Whether to count blank lines (true to count)
 +   * @return The number of lines found.
 +   */
 +  protected int countLinesInString(final String stringToSearch, final boolean countBlankLines) {
 +    assert(stringToSearch != null);
 +    
 +    int length = stringToSearch.length();
 +    int count = 0;
 +    char character = 0;
 +    boolean foundNonSpaceChar = false;
 +
 +    for (int i = 0; i < length; i++) {
 +      character = stringToSearch.charAt(i);
 +      if (character == '\n' || character == '\r') {
 +        if (countBlankLines) {
 +          count++;
 +        } else {
 +          if (foundNonSpaceChar) {
 +            count++;
 +          }
 +        }
 +        foundNonSpaceChar = false;
 +      } else if (character != ' ' && character != '\t') {
 +        foundNonSpaceChar = true;
 +      }
 +    }
 +
 +    // Even if the last line isn't terminated, it still counts as a line
 +    if (character != '\n' && character != '\r') {
 +      count++;
 +    }
 +
 +    return count;
 +  }
 +
 +  /** 
 +   * Get a specific line from the string (using \n or \r as a line separator).
 +   * 
 +   * @param stringToSearch String to get the line from
 +   * @param lineNumber Line number to get
 +   * @return The line
 +   */
 +  protected String getLineFromString(final String stringToSearch, final int lineNumber) {
 +    assert(stringToSearch != null);
 +    assert(lineNumber > 0);
 +    
 +    int length = stringToSearch.length();
 +    int count = 0;
 +    int startIndex = 0;
 +    char character;
 +    int endIndex = length;
 +
 +    for (int i = 0; i < length; i++) {
 +      character = stringToSearch.charAt(i);
 +      if (character == '\n' || character == '\r') {
 +        if (lineNumber == 1) {
 +          endIndex = i;
 +          break;
 +        }
 +        if (++count == lineNumber-1) {
 +          startIndex = i+1;
 +        } else if (count >= lineNumber) {
 +          endIndex = i;
 +          break;
 +        }
 +      }
 +    }
 +
 +    return stringToSearch.substring(startIndex, endIndex);
 +  }
 +  
 +  protected void info(String string) {
-     getLogWriter().info(string);
++    LogWriterUtils.getLogWriter().info(string);
 +  }
 +
 +  protected void debug(String string) {
-     getLogWriter().fine(string);
++    LogWriterUtils.getLogWriter().fine(string);
 +  }
 +
 +  protected void error(String string) {
 +    commandError = string;
-     getLogWriter().error(string);
++    LogWriterUtils.getLogWriter().error(string);
 +  }
 +
 +  protected void error(String string, Throwable e) {
 +    commandError = string;
-     getLogWriter().error(string, e);
++    LogWriterUtils.getLogWriter().error(string, e);
 +  }
 +  
 +  protected Object[] getJMXEndPoint(){
 +    return new Object[] {jmxHost, jmxPort };
 +  }
 +  
 +  public static String getStackTrace(Throwable aThrowable) {
 +    StringWriter sw = new StringWriter();
 +    aThrowable.printStackTrace(new PrintWriter(sw, true));
 +    return sw.toString();
 + }
 +
 +}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/IntegratedSecDUnitTest.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/IntegratedSecDUnitTest.java
index 8e63524,0000000..4fa0804
mode 100644,000000..100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/IntegratedSecDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/IntegratedSecDUnitTest.java
@@@ -1,698 -1,0 +1,705 @@@
 +package com.gemstone.gemfire.management.internal.security;
 +
 +//import hydra.Log;
 +
 +import java.io.IOException;
 +import java.net.MalformedURLException;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Properties;
++import java.util.concurrent.TimeUnit;
 +
 +import javax.management.InstanceNotFoundException;
 +import javax.management.MBeanException;
 +import javax.management.MBeanServerConnection;
 +import javax.management.ObjectName;
 +import javax.management.ReflectionException;
 +import javax.management.remote.JMXConnector;
 +import javax.management.remote.JMXConnectorFactory;
 +import javax.management.remote.JMXServiceURL;
 +
++import com.gemstone.gemfire.management.internal.cli.HeadlessGfsh;
++import com.gemstone.gemfire.test.dunit.DistributedTestCase;
++import com.gemstone.gemfire.test.dunit.Host;
++import com.gemstone.gemfire.test.dunit.SerializableRunnable;
++import com.gemstone.gemfire.test.dunit.VM;
++import com.gemstone.gemfire.test.dunit.WaitCriterion;
 +import org.apache.logging.log4j.Logger;
 +
 +import com.gemstone.gemfire.LogWriter;
 +import com.gemstone.gemfire.cache.Cache;
 +import com.gemstone.gemfire.cache.CacheFactory;
 +import com.gemstone.gemfire.cache.Region;
 +import com.gemstone.gemfire.cache.RegionFactory;
 +import com.gemstone.gemfire.cache.RegionService;
 +import com.gemstone.gemfire.cache.RegionShortcut;
 +import com.gemstone.gemfire.cache.client.ClientCache;
 +import com.gemstone.gemfire.cache.client.ClientCacheFactory;
 +import com.gemstone.gemfire.cache.client.ClientRegionFactory;
 +import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
 +import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
 +import com.gemstone.gemfire.cache.server.CacheServer;
 +import com.gemstone.gemfire.distributed.DistributedMember;
 +import com.gemstone.gemfire.distributed.DistributedSystem;
 +import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 +import com.gemstone.gemfire.internal.AvailablePortHelper;
 +import com.gemstone.gemfire.internal.logging.LogService;
 +import com.gemstone.gemfire.management.DistributedRegionMXBean;
 +import com.gemstone.gemfire.management.ManagementService;
 +import com.gemstone.gemfire.management.ManagerMXBean;
 +import com.gemstone.gemfire.management.internal.MBeanJMXAdapter;
 +import com.gemstone.gemfire.management.internal.cli.domain.DataCommandRequest;
 +import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 +import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData;
 +import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData.SectionResultData;
 +import com.gemstone.gemfire.management.internal.cli.result.ErrorResultData;
 +import com.gemstone.gemfire.management.internal.cli.result.ResultData;
 +import com.gemstone.gemfire.management.internal.security.ResourceOperationContext.ResourceOperationCode;
 +import com.gemstone.gemfire.security.AuthInitialize;
 +import com.gemstone.gemfire.security.AuthenticationFailedException;
 +
- import dunit.DistributedTestCase;
- import dunit.Host;
- import dunit.SerializableRunnable;
- import dunit.VM;
++import static com.jayway.awaitility.Awaitility.waitAtMost;
++import static org.hamcrest.Matchers.equalTo;
++import static org.hamcrest.Matchers.is;
++import static org.hamcrest.Matchers.not;
++
++//import dunit.DistributedTestCase;
++//import dunit.Host;
++//import dunit.SerializableRunnable;
++//import dunit.VM;
 +
 +/**
 + * @author tushark
 + * 
 + */
 +public class IntegratedSecDUnitTest extends CommandTestBase {
 +  
 +  private static Logger logger = LogService.getLogger();
 +
 +  public static class AuthInitializer implements AuthInitialize {
 +
 +    public static AuthInitialize create() {
 +      return new AuthInitializer();
 +    }
 +
 +    public void init(LogWriter systemLogger, LogWriter securityLogger) throws AuthenticationFailedException {
 +    }
 +
 +    public Properties getCredentials(Properties p, DistributedMember server, boolean isPeer)
 +        throws AuthenticationFailedException {
 +      return p;
 +    }
 +
 +    public void close() {
 +    }
 +  }
 +
 +  private static final long serialVersionUID = 1L;
 +  private static IntegratedSecDUnitTest instance = new IntegratedSecDUnitTest("IntegratedSecDUnitTest");
 +
 +  private Cache cache;
 +  private DistributedSystem ds;
 +  private CacheServer cacheServer;
 +  private ClientCache clientCache;
 +  private int cacheServerPort;
 +  private String hostName;
 +
 +  public IntegratedSecDUnitTest(String name) {
 +    super(name);
 +  }
 +
 +  public Cache createCache(Properties props) throws Exception {
 +    ds = getSystem(props);
 +    cache = CacheFactory.create(ds);
 +    if (cache == null) {
 +      throw new Exception("CacheFactory.create() returned null ");
 +    }
 +    return cache;
 +  }
 +
 +  private void createServer() throws IOException {
 +    cacheServerPort = AvailablePortHelper.getRandomAvailableTCPPort();
 +    cacheServer = cache.addCacheServer();
 +    cacheServer.setPort(cacheServerPort);
 +    cacheServer.start();
 +    hostName = cacheServer.getHostnameForClients();
 +  }
 +
 +  public int getCacheServerPort() {
 +    return cacheServerPort;
 +  }
 +
 +  public String getCacheServerHost() {
 +    return hostName;
 +  }
 +
 +  public void stopCacheServer() {
 +    this.cacheServer.stop();
 +  }
 +
 +  @SuppressWarnings("rawtypes")
 +  public void setUpServerVM(Properties gemFireProps) throws Exception {
 +    logger.info("Creating server vm cache with props =" + gemFireProps);
-     gemFireProps.setProperty(DistributionConfig.NAME_NAME, testName + "Server");
++    gemFireProps.setProperty(DistributionConfig.NAME_NAME, getTestMethodName() + "Server");
 +    createCache(gemFireProps);
 +    RegionFactory factory = cache.createRegionFactory(RegionShortcut.REPLICATE);
 +    Region r = factory.create("serverRegion");
 +    assertNotNull(r);
 +    r.put("serverkey", "servervalue");
 +    assertEquals(1,r.size());
 +    logger.info("Created serverRegion with 1 key=serverKey");
 +  }
 +
 +  public void setUpClientVM(Properties gemFireProps, String host, int port, String user, String password) {
-     gemFireProps.setProperty(DistributionConfig.NAME_NAME, testName + "Client");
++    gemFireProps.setProperty(DistributionConfig.NAME_NAME, getTestMethodName() + "Client");
 +    gemFireProps.setProperty("security-client-auth-init",
 +        "com.gemstone.gemfire.management.internal.security.IntegratedSecDUnitTest$AuthInitializer.create");
 +    logger.info("Creating client cache with props =" + gemFireProps);
 +    ClientCacheFactory clientCacheFactory = new ClientCacheFactory(gemFireProps);
 +    clientCacheFactory.addPoolServer(host, port);
 +    clientCacheFactory.setPoolMultiuserAuthentication(true);
 +    clientCache = clientCacheFactory.create();
 +    ClientRegionFactory<String, String> regionFactory = clientCache
 +        .createClientRegionFactory(ClientRegionShortcut.PROXY);
 +           
 +    Region<String, String> region = regionFactory.create("serverRegion");
 +    assertNotNull(region);    
 +    
 +    Properties properties = new Properties();
 +    properties.setProperty("security-username", user);
 +    properties.setProperty("security-password", password);
 +    RegionService regionService = instance.clientCache.createAuthenticatedView(properties);
 +    Region secRegion = regionService.getRegion("serverRegion");
 +    assertNotNull(secRegion.get("serverkey"));
 +  }
 +
 +  public static void setUpServerVMTask(Properties props) throws Exception {
 +    instance.setUpServerVM(props);
 +  }
 +
 +  public static void createServerTask() throws Exception {
 +    instance.createServer();
 +  }
 +
 +  public static void setUpClientVMTask(Properties gemFireProps, String host, int port, String user, String password)
 +      throws Exception {
 +    instance.setUpClientVM(gemFireProps, host, port, user, password);
 +  }
 +
 +  public static Object[] getCacheServerEndPointTask() {
 +    Object[] array = new Object[2];
 +    array[0] = instance.getCacheServerHost();
 +    array[1] = instance.getCacheServerPort();
 +    return array;
 +  }
 +
 +  public static void closeCacheTask() {
 +    instance.cache.close();
 +  }
 +
 +  public static void closeClientCacheTask() {
 +    instance.clientCache.close();
 +  }
 +
 +  /**
 +   * 
 +   * VM0 -> Manager
 +   * VM1 -> Server
 +   * Vm2 -> CacheClient
 +   * 
 +   * @param testName
 +   * @throws IOException
 +   */
 +
 +  @SuppressWarnings("serial")
 +  void setup(String testName) throws IOException {
 +
 +    configureIntSecDescriptor();
 +
 +    Properties props = new Properties();
 +
 +    props.put(DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME,
 +        "com.gemstone.gemfire.management.internal.security.TestAuthenticator.create");
 +    props.put(DistributionConfig.SECURITY_CLIENT_ACCESSOR_NAME,
 +        "com.gemstone.gemfire.management.internal.security.TestAccessControl.create");
 +    props.put(DistributionConfig.SECURITY_CLIENT_ACCESSOR_PP_NAME,
 +        "com.gemstone.gemfire.management.internal.security.TestAccessControl.create");
 +    props.setProperty(DistributionConfig.NAME_NAME, "Manager");
 +    HeadlessGfsh gfsh = createDefaultSetup(props);
 +    assertNotNull(gfsh);
 +    assertEquals(true, gfsh.isConnectedAndReady());
 +
 +    props.list(System.out);
 +
 +    final Host host = Host.getHost(0);
 +    VM serverVM = host.getVM(1);
 +    VM clientVM = host.getVM(2);
 +    VM managerVM = host.getVM(0);
 +    serverVM.invoke(IntegratedSecDUnitTest.class, "setUpServerVMTask", new Object[] { props });
 +    serverVM.invoke(IntegratedSecDUnitTest.class, "createServerTask");
 +
 +    Object array[] = (Object[]) serverVM.invoke(IntegratedSecDUnitTest.class, "getCacheServerEndPointTask");
 +    String hostName = (String) array[0];
 +    int port = (Integer) array[1];
 +    Object params[] = new Object[] { props, hostName, port, "tushark", "password123" };
 +    logger.info("Starting client with server endpoint " + hostName + ":" + port);
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "setUpClientVMTask", params);
 +    
 +    SerializableRunnable checkRegionMBeans = new SerializableRunnable() {
 +      @Override
 +      public void run() {
 +        Cache cache = getCache();
 +        final ManagementService service = ManagementService.getManagementService(cache);
 +
-         final WaitCriterion waitForMaangerMBean = new WaitCriterion() {
-           @Override
-           public boolean done() {
-             ManagerMXBean bean1 = service.getManagerMXBean();
-             DistributedRegionMXBean bean2 = service.getDistributedRegionMXBean("/serverRegion");
-             if (bean1 == null) {
-               logger.info("Still probing for ManagerMBean");
-               return false;
-             } else {
-               logger.info("Still probing for DistributedRegionMXBean=" + bean2);
-               return (bean2 != null);
-             }
-           }
- 
-           @Override
-           public String description() {
-             return "Probing for DistributedRegionMXBean for serverRegion";
-           }
-         };
- 
-         DistributedTestCase.waitForCriterion(waitForMaangerMBean, 30000, 2000, true);
++//        final WaitCriterion waitForMaangerMBean = new WaitCriterion() {
++//          @Override
++//          public boolean done() {
++//            ManagerMXBean bean1 = service.getManagerMXBean();
++//            DistributedRegionMXBean bean2 = service.getDistributedRegionMXBean("/serverRegion");
++//            if (bean1 == null) {
++//              logger.info("Still probing for ManagerMBean");
++//              return false;
++//            } else {
++//              logger.info("Still probing for DistributedRegionMXBean=" + bean2);
++//              return (bean2 != null);
++//            }
++//          }
++//
++//          @Override
++//          public String description() {
++//            return "Probing for DistributedRegionMXBean for serverRegion";
++//          }
++//        };
++
++//        DistributedTestCase.waitForCriterion(waitForMaangerMBean, 30000, 2000, true);
++        waitAtMost(30, TimeUnit.SECONDS).untilCall(service.getManagerMXBean(), is(not(null)));
++        waitAtMost(30, TimeUnit.SECONDS).untilCall(service.getDistributedRegionMXBean("/serverRegion"), is(not(null)));
 +
 +        assertNotNull(service.getMemberMXBean());
 +        assertNotNull(service.getManagerMXBean());
 +        DistributedRegionMXBean bean = service.getDistributedRegionMXBean("/serverRegion");
 +        assertNotNull(bean);
 +      }
 +    };
 +    managerVM.invoke(checkRegionMBeans);        
 +  }
 +
 +  @SuppressWarnings("serial")
 +  private void configureIntSecDescriptor() {
 +    this.userName = "tushark";
 +    this.password = "password123";
 +
 +    final Host host = Host.getHost(0);
 +    final VM serverVM = host.getVM(1);
 +    final VM clientVM = host.getVM(2);
 +    final VM managerVM = host.getVM(0);
 +    SerializableRunnable grantOpsUser1Runnable = new SerializableRunnable() {
 +      @Override
 +      public void run() {
 +        TestAuthenticator.addUser("tushark", "password123");
 +        TestAuthenticator.addUser("dataRead", "password123");
 +        TestAuthenticator.addUser("dataWrite", "password123");
 +        TestAuthenticator.addUser("monitor", "password123");
 +        TestAuthenticator.addUser("admin", "password123");
 +        TestAuthenticator.addUser("custom", "password123");
 +
 +        TestAccessControl.grantCacheOp("tushark", OperationCode.GET);
 +        TestAccessControl.grantCacheOp("tushark", OperationCode.PUT);
 +        TestAccessControl.grantCacheOp("tushark", OperationCode.DESTROY);
 +        TestAccessControl.grantCacheOp("tushark", OperationCode.REMOVEALL);
 +        TestAccessControl.grantCacheOp("tushark", OperationCode.EXECUTE_FUNCTION);
 +        TestAccessControl.grantCacheOp("tushark", OperationCode.QUERY);
 +
 +        TestAccessControl.grantResourceOp("tushark", ResourceOperationCode.DATA_READ);
 +        TestAccessControl.grantResourceOp("tushark", ResourceOperationCode.DATA_WRITE);
 +        TestAccessControl.grantResourceOp("tushark", ResourceOperationCode.MONITOR);
 +        TestAccessControl.grantResourceOp("tushark", ResourceOperationCode.CHANGE_ALERT_LEVEL);
 +
 +        TestAccessControl.grantCacheOp("dataRead", OperationCode.GET);
 +        TestAccessControl.grantResourceOp("dataRead", ResourceOperationCode.DATA_READ);
 +
 +        TestAccessControl.grantCacheOp("dataWrite", OperationCode.GET);
 +        TestAccessControl.grantCacheOp("dataWrite", OperationCode.PUT);
 +        TestAccessControl.grantCacheOp("dataWrite", OperationCode.DESTROY);
 +        TestAccessControl.grantCacheOp("dataWrite", OperationCode.REGION_CREATE);
 +        TestAccessControl.grantCacheOp("dataWrite", OperationCode.REGION_DESTROY);
 +        TestAccessControl.grantResourceOp("dataWrite", ResourceOperationCode.DATA_WRITE);
 +
 +        TestAccessControl.grantResourceOp("monitor", ResourceOperationCode.DATA_READ);
 +        TestAccessControl.grantResourceOp("monitor", ResourceOperationCode.MONITOR);
 +
 +        TestAccessControl.grantResourceOp("admin", ResourceOperationCode.ADMIN);
 +
 +        TestAccessControl.grantResourceOp("custom", ResourceOperationCode.DATA_READ);
 +        TestAccessControl.grantResourceOp("custom", ResourceOperationCode.SHOW_DEADLOCKS);        
 +        TestAccessControl.grantResourceOp("custom", ResourceOperationCode.CREATE_REGION);
 +        TestAccessControl.grantCacheOp("custom", OperationCode.REGION_CREATE);
 +      }
 +    };
 +    managerVM.invoke(grantOpsUser1Runnable);
 +    serverVM.invoke(grantOpsUser1Runnable);
 +  }
 +
 +  @SuppressWarnings("serial")
 +  public static void grantCacheOp(final String user, final String code) {
 +    final Host host = Host.getHost(0);
 +    final VM serverVM = host.getVM(1);
 +    final VM managerVM = host.getVM(0);
 +    SerializableRunnable grantOpsUser1Runnable = new SerializableRunnable() {
 +      @Override
 +      public void run() {
 +        TestAccessControl.grantCacheOp(user, OperationCode.parse(code));
 +      }
 +    };
 +    managerVM.invoke(grantOpsUser1Runnable);
 +    serverVM.invoke(grantOpsUser1Runnable);
 +  }
 +
 +  @SuppressWarnings("serial")
 +  public static void revokeCacheOp(final String user, final String code) {
 +    final Host host = Host.getHost(0);
 +    final VM serverVM = host.getVM(1);
 +    final VM managerVM = host.getVM(0);
 +    SerializableRunnable grantOpsUser1Runnable = new SerializableRunnable() {
 +      @Override
 +      public void run() {
 +        TestAccessControl.revokeCacheOp(user, OperationCode.parse(code));
 +      }
 +    };
 +    managerVM.invoke(grantOpsUser1Runnable);
 +    serverVM.invoke(grantOpsUser1Runnable);
 +  }
 +
 +  @SuppressWarnings("serial")
 +  public static void grantResourceOp(final String user, final String code) {
 +    final Host host = Host.getHost(0);
 +    final VM serverVM = host.getVM(1);
 +    final VM managerVM = host.getVM(0);
 +    SerializableRunnable grantOpsUser1Runnable = new SerializableRunnable() {
 +      @Override
 +      public void run() {
 +        TestAccessControl.grantResourceOp(user, ResourceOperationCode.parse(code));
 +      }
 +    };
 +    managerVM.invoke(grantOpsUser1Runnable);
 +    serverVM.invoke(grantOpsUser1Runnable);
 +  }
 +
 +  @SuppressWarnings("serial")
 +  public static void revokeResourceOp(final String user, final String code) {
 +    final Host host = Host.getHost(0);
 +    final VM serverVM = host.getVM(1);
 +    final VM managerVM = host.getVM(0);
 +    SerializableRunnable grantOpsUser1Runnable = new SerializableRunnable() {
 +      @Override
 +      public void run() {
 +        TestAccessControl.revokeResourceOp(user, ResourceOperationCode.parse(code));
 +      }
 +    };
 +    managerVM.invoke(grantOpsUser1Runnable);
 +    serverVM.invoke(grantOpsUser1Runnable);
 +  }
 +
 +  
 +  public static void doPutUsingClientCache(final String regionPath, final String key, final String value,
 +      final boolean expectSuccess, String user, String password) {   
 +    try {
 +      Properties properties = new Properties();
 +      properties.setProperty("security-username", user);
 +      properties.setProperty("security-password", password);
 +      RegionService regionService = instance.clientCache.createAuthenticatedView(properties);
 +      Region region = regionService.getRegion(regionPath);
 +      assertNotNull(region);
 +      Object oldValue = region.put(key, value);
 +      logger.info("doPutUsingClientCache : Put key=" + key + " for user="+ user+" newValue="+ value + " oldValue="+ oldValue + " expectSuccess="+expectSuccess);
 +      if (!expectSuccess)
 +        fail("Region Put was expected to fail");
 +    } catch (Exception e) {
 +      if (!expectSuccess) {
 +        logger.info("expectSuccess=false => " + e.getMessage());
 +      } else {
 +        logger.error("Unexpected error", e);
 +        fail("Unknown reason " + e.getMessage());
 +      }
 +    }
 +  }
 +
 +  public void doPutUsingGfsh(final String regionPath, final String key, final String value,
 +      final boolean expectSuccess, String user, String password) {
 +    String command = "put --region=" + regionPath + " --key=" + key + " --value=" + value;
 +    changeGfshUser(user, password);
 +    CommandResult result = executeCommand(command);
 +    logger.info("CommandResult " + result);
 +    if (expectSuccess) {
 +      validateGfshResult(result, expectSuccess);
 +      printCommandOutput(result);
 +    }
 +    else {
 +      logger.info("Error line :" + this.commandError);
 +      assertTrue(this.commandError.contains("Access Denied"));
 +      this.commandError = null;
 +      // validateGfshResultError(result);
 +    }
 +  }
 +
 +  private static void validateGfshResultError(CommandResult result) {
 +    if (result.getType().equals(ResultData.TYPE_ERROR)) {
 +      ErrorResultData data = (ErrorResultData) result.getResultData();
 +      logger.info("Error resultData : " + data.toString());
 +    } else
 +      fail("Unexpected result type " + result.getType());
 +  }
 +
 +  private static void validateGfshResult(CommandResult cmdResult, boolean expected) {
 +    if (ResultData.TYPE_COMPOSITE.equals(cmdResult.getType())) {
 +      CompositeResultData rd = (CompositeResultData) cmdResult.getResultData();
 +      SectionResultData section = rd.retrieveSectionByIndex(0);
 +      boolean result = (Boolean) section.retrieveObject("Result");
 +      assertEquals(expected, result);
 +    } else
 +      fail("Expected CompositeResult Returned Result Type " + cmdResult.getType());
 +  }
 +
 +  public static void doGetUsingClientCache(final String regionPath, final String key, final boolean expectSuccess,
 +      String user, String password) {    
 +    try {
 +      Properties properties = new Properties();
 +      properties.setProperty("security-username", user);
 +      properties.setProperty("security-password", password);
 +      RegionService regionService = instance.clientCache.createAuthenticatedView(properties);
 +      Region region = regionService.getRegion(regionPath);
 +      assertNotNull(region);
 +      Object value = region.get(key);
 +      logger.info("doGetUsingClientCache : Get key=" + key + " for user="+ user+" value="+ value + " expectSuccess="+expectSuccess);
 +      assertNotNull(value);
 +      if (!expectSuccess)
 +        fail("Region Get was expected to fail");
 +    } catch (Exception e) {
 +      if (!expectSuccess) {
 +        logger.info("expectSuccess=true => " + e.getMessage());
 +      } else {
 +        logger.error("Unexpected error", e);
 +        fail("Unknown reason " + e.getMessage());
 +      }
 +    }
 +  }
 +  
 +  public void doGetUsingGfsh(final String regionPath, final String key, final boolean expectSuccess, String user,
 +      String password) {
 +    String command = "get --region=" + regionPath + " --key=" + key;
 +    changeGfshUser(user, password);
 +    CommandResult result = executeCommand(command);    
 +    if (expectSuccess) {
 +      printCommandOutput(result);
 +      validateGfshResult(result, expectSuccess);      
 +    }
 +    else {
 +      logger.info("Error line :" + this.commandError);
 +      assertTrue(this.commandError.contains("Access Denied"));
 +      this.commandError = null;
 +    }
 +  }
 +
 +  private void changeGfshUser(String user, String password) {
 +    if (!this.userName.equals(user)) {
 +      executeCommand("disconnect");
 +      this.userName = user;
 +      this.password = password;
 +      defaultShellConnect();
 +    }
 +  }
 +  
 +  public void doCommandUsingGfsh(String command, final boolean expectSuccess, String user, String password) {
 +    changeGfshUser(user, password);
 +    CommandResult result = executeCommand(command);
 +    if (expectSuccess) {
 +      assertNotNull(result);
 +      printCommandOutput(result);
 +      //assertFalse(result.getType().equals(ResultData.TYPE_ERROR));
 +    }
 +    else {
 +      logger.info("Error line :" + this.commandError);
 +      assertTrue(this.commandError.contains("Access Denied"));
 +      this.commandError = null;
 +    }
 +  }
 +  
 +  private static void printCommandOutput(CommandResult cmdResult) {
 +    assertNotNull(cmdResult);
 +    logger.info("Command Output : ");
 +    StringBuilder sb = new StringBuilder();
 +    cmdResult.resetToFirstLine();
 +    while (cmdResult.hasNextLine()) {
 +      sb.append(cmdResult.nextLine()).append(DataCommandRequest.NEW_LINE);
 +    }
 +    logger.info(sb.toString());
 +    logger.info("");      
 +  }
 +  
 +  private void doShowLogUsingJMX(boolean expectSuccess, String user, String password) {
 +    Object[] endPoint = getJMXEndPoint();
 +    String[] creds = new String[] { user, password };
 +    try {
 +      JMXConnector connector = _getGemfireMBeanServer((Integer) endPoint[1], creds);
 +      MBeanServerConnection mbeanServer = connector.getMBeanServerConnection();
 +      ObjectName memberON = (ObjectName)mbeanServer.invoke(MBeanJMXAdapter.getDistributedSystemName(), "fetchMemberObjectName", 
 +          new Object[]{"Manager"}, new String[]{String.class.getCanonicalName()});      
 +      String logs = (String) mbeanServer.invoke(memberON, "showLog", new Object[]{60}, new String[]{int.class.toString()});
 +      logger.info("JMX Output :" + logs);
 +      connector.close();
 +      if(!expectSuccess)
 +        fail("Expected Access Denied...");      
 +    } catch (InstanceNotFoundException e) {
 +      logger.error("Unexpected Error", e);
 +      fail("Unexpected Error " + e.getMessage());
 +    } catch (MBeanException e) {
 +      logger.error("Unexpected Error", e);
 +      fail("Unexpected Error " + e.getMessage());
 +    } catch (ReflectionException e) {
 +      logger.error("Unexpected Error", e);
 +      fail("Unexpected Error " + e.getMessage());
 +    } catch (IOException e) {
 +      logger.error("Unexpected Error", e);
 +      fail("Unexpected Error " + e.getMessage());
 +    } catch (SecurityException e) {
 +      if(expectSuccess){
 +        fail("Expected successful jmx execution");
 +      } else {
 +        //expected
 +      }
 +    }
 +  }
 +  
 +  
 +  
 +  @SuppressWarnings({ "unchecked", "rawtypes" })
 +  private JMXConnector _getGemfireMBeanServer(int port, Object creds) {
 +    JMXServiceURL url;
 +    try {
 +      url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://:" + port + "/jmxrmi");
 +      if (creds != null) {
 +        Map env = new HashMap();        
 +        env.put(JMXConnector.CREDENTIALS, creds);
 +        JMXConnector jmxc = JMXConnectorFactory.connect(url, env);
 +        return jmxc;
 +      } else {
 +        JMXConnector jmxc = JMXConnectorFactory.connect(url, null);
 +        return jmxc;
 +      }
 +    } catch (MalformedURLException e) {
 +      fail("Error connecting to port=" + port + " " + e.getMessage());
 +    } catch (IOException e) {
 +      fail("Error connecting to port=" + port + " " + e.getMessage());
 +    }
 +    return null;
 +  }
 +
 +  public void testDataCommandsFromDifferentClients() throws IOException {
 +    final Host host = Host.getHost(0);
 +    final VM clientVM = host.getVM(2);
 +
 +    setup("testDataCommandsFromDifferentClients");
 +    executeCommand("list members");
 +    changeGfshUser("dataRead", "password123");
 +    executeCommand("list members");
 +
 +    // check tushark can execute get/put/delete/execute function/query operation from cacheclient and through
 +    // data-commands
 +    String region = "serverRegion";
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doPutUsingClientCache", new Object[] { region, "myk1", "myv1", true,
 +        "tushark", "password123" });
 +    doGetUsingGfsh(region, "myk1", true, "tushark", "password123");
 +    doPutUsingGfsh(region, "myk2", "myv2", true, "tushark", "password123");
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doGetUsingClientCache", new Object[] { region, "myk2", true,
 +        "tushark", "password123" });
 +    revokeCacheOp("tushark", "PUT");
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doPutUsingClientCache", new Object[] { region, "myk1", "myv1",
 +        false, "tushark", "password123" });
 +    doPutUsingGfsh(region, "myk2", "myv2", false, "tushark", "password123");
 +    grantCacheOp("tushark", "PUT");
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doPutUsingClientCache", new Object[] { region, "myk1", "myv1", true,
 +        "tushark", "password123" });
 +    doPutUsingGfsh(region, "myk2", "myv2", true, "tushark", "password123");
 +    
 +    
 +    //dataRead Role
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doPutUsingClientCache", new Object[] { region, "myk1", "myv1", false,
 +      "dataRead", "password123" });
 +    doPutUsingGfsh(region, "myk2", "myv2", false, "dataRead", "password123");
 +    doGetUsingGfsh(region, "myk1", true, "dataRead", "password123");
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doGetUsingClientCache", new Object[] { region, "myk2", true,
 +      "dataRead", "password123" });    
 +    
 +    //dataWrite Role
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doPutUsingClientCache", new Object[] { region, "myk1", "myv1", true,
 +      "dataWrite", "password123" });
 +    doPutUsingGfsh(region, "myk2", "myv2", true, "dataWrite", "password123");
 +    doGetUsingGfsh(region, "myk1", true, "dataWrite", "password123");
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doGetUsingClientCache", new Object[] { region, "myk2", true,
 +      "dataWrite", "password123" });
 +    
 +    
 +    
 +    //admin and monitor and custom roles can not execute get-put commands 
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doPutUsingClientCache", new Object[] { region, "myk1", "myv1", false,
 +      "admin", "password123" });
 +    doPutUsingGfsh(region, "myk2", "myv2", false, "admin", "password123");
 +    doGetUsingGfsh(region, "myk1", false, "admin", "password123");
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doGetUsingClientCache", new Object[] { region, "myk2", false,
 +      "admin", "password123" });
 +    
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doPutUsingClientCache", new Object[] { region, "myk1", "myv1", false,
 +      "monitor", "password123" });
 +    doPutUsingGfsh(region, "myk2", "myv2", false, "monitor", "password123");
 +    doGetUsingGfsh(region, "myk1", false, "monitor", "password123");
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doGetUsingClientCache", new Object[] { region, "myk2", false,
 +      "monitor", "password123" });
 +    
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doPutUsingClientCache", new Object[] { region, "myk1", "myv1", false,
 +      "custom", "password123" });
 +    doPutUsingGfsh(region, "myk2", "myv2", false, "custom", "password123");
 +    doGetUsingGfsh(region, "myk1", false, "custom", "password123");
 +    clientVM.invoke(IntegratedSecDUnitTest.class, "doGetUsingClientCache", new Object[] { region, "myk2", false,
 +      "custom", "password123" });    
 +    
 +    // tushark can execute monitor command but not region create        
 +    doCommandUsingGfsh("show metrics", true, "monitor", "password123");
 +    doCommandUsingGfsh("show dead-locks --file=deadlocks_monitor.txt", true, "monitor", "password123");
 +    
 +    // dataWrite can execute create region
 +    doCommandUsingGfsh("create region --type=REPLICATE --name=dataWriteRegion", true, "dataWrite", "password123");
 +    doCommandUsingGfsh("create region --type=REPLICATE --name=dataReadRegion", false, "dataRead", "password123");
 +    
 +    // custom can create region create but not put region
 +    doCommandUsingGfsh("create region --type=REPLICATE --name=customRegion", true, "custom", "password123");
 +    doPutUsingGfsh("customRegion", "key", "value", false, "custom", "password123");
 +    
 +    // custom can execute show deadlocks - revoke it check again
 +    doCommandUsingGfsh("show metrics", false, "custom", "password123");
 +    doCommandUsingGfsh("show dead-locks --file=deadlocks_custom_1.txt", true, "custom", "password123");
 +    revokeResourceOp("custom", ResourceOperationCode.SHOW_DEADLOCKS.toString());
 +    grantResourceOp("custom", ResourceOperationCode.SHOW_METRICS.toString());
 +    doCommandUsingGfsh("show metrics", true, "custom", "password123");
 +    doCommandUsingGfsh("show dead-locks --file=deadlocks_custom_2.txt", false, "custom", "password123");
 +    grantResourceOp("custom", ResourceOperationCode.SHOW_DEADLOCKS.toString());    
 +    doCommandUsingGfsh("show metrics", true, "custom", "password123");
 +    doCommandUsingGfsh("show dead-locks --file=deadlocks_custom_3.txt", true, "custom", "password123");    
 +    
 +    /* Commented due to error with gradle but was working with ant build earlier 
 +    Error string is :  TailLogRequest/Response processed in application vm with shared logging
 +    //check jmx and gfsh
 +    doCommandUsingGfsh("show log --member=Manager", true, "monitor", "password123");
 +    doCommandUsingGfsh("show log --member=Manager", false, "dataWrite", "password123");
 +    doCommandUsingGfsh("show log --member=Manager", false, "custom", "password123");
 +    
 +    
 +    doShowLogUsingJMX(true, "monitor", "password123");
 +    doShowLogUsingJMX(false, "dataWrite", "password123");
 +    doShowLogUsingJMX(false, "custom", "password123");
 +
 +    
 +    grantResourceOp("custom", ResourceOperationCode.SHOW_LOG.toString());
 +    doCommandUsingGfsh("show log --member=Manager", true, "custom", "password123");
 +    doShowLogUsingJMX(true, "custom", "password123");*/
 +  }
- 
-   
- 
-   public void tearDown2() throws Exception {
-     super.tearDown2();
-   }
- 
 +}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/OperationCodesForDataCommandsIntegrationTest.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/OperationCodesForDataCommandsIntegrationTest.java
index 0000000,b8c1c9d..bda4642
mode 000000,100755..100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/OperationCodesForDataCommandsIntegrationTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/OperationCodesForDataCommandsIntegrationTest.java
@@@ -1,0 -1,101 +1,101 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package com.gemstone.gemfire.management.internal.security;
+ 
+ import static org.assertj.core.api.Assertions.assertThat;
+ 
+ import java.io.IOException;
+ import java.util.HashMap;
+ import java.util.Map;
+ import java.util.Properties;
+ 
+ import org.junit.After;
+ import org.junit.Before;
+ import org.junit.Rule;
+ import org.junit.Test;
+ import org.junit.contrib.java.lang.system.RestoreSystemProperties;
+ import org.junit.experimental.categories.Category;
+ import org.junit.rules.TestName;
+ 
+ import com.gemstone.gemfire.cache.CacheFactory;
+ import com.gemstone.gemfire.distributed.DistributedSystem;
+ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+ import com.gemstone.gemfire.management.internal.security.ResourceOperationContext.ResourceOperationCode;
+ import com.gemstone.gemfire.management.internal.security.AuthorizeOperationForMBeansIntegrationTest.TestAccessControl;
+ import com.gemstone.gemfire.management.internal.security.AuthorizeOperationForMBeansIntegrationTest.TestAuthenticator;
+ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+ 
+ /**
+  * Tests operation codes for data commands.
+  */
+ @Category(IntegrationTest.class)
+ @SuppressWarnings("deprecation")
+ public class OperationCodesForDataCommandsIntegrationTest {
+ 
+   private GemFireCacheImpl cache;
+   private DistributedSystem ds;
+   private Map<String, ResourceOperationCode> commands = new HashMap<String, ResourceOperationCode>();
+   
+   @Rule
+   public TestName testName = new TestName();
+   
+   @Rule
+   public RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties();
+ 
+   @Before
+   public void setUp() {
+     System.setProperty("resource-auth-accessor", TestAccessControl.class.getName());
+     System.setProperty("resource-authenticator", TestAuthenticator.class.getName());
+     
+     Properties properties = new Properties();
+     properties.put("name", testName.getMethodName());
+     properties.put(DistributionConfig.LOCATORS_NAME, "");
+     properties.put(DistributionConfig.MCAST_PORT_NAME, "0");
+     properties.put(DistributionConfig.HTTP_SERVICE_PORT_NAME, "0");
+     
+     this.ds = DistributedSystem.connect(properties);
+     this.cache = (GemFireCacheImpl) CacheFactory.create(ds);
+ 
 -    this.commands.put("put --key=k1 --value=v1 --region=/region1", ResourceOperationCode.PUT_REGION);    
 -    this.commands.put("locate entry --key=k1 --region=/region1", ResourceOperationCode.LOCATE_ENTRY_REGION);
 -    this.commands.put("query --query=\"select * from /region1\"", ResourceOperationCode.QUERYDATA_DS);
 -    this.commands.put("export data --region=value --file=value --member=value", ResourceOperationCode.EXPORT_DATA_REGION);
 -    this.commands.put("import data --region=value --file=value --member=value", ResourceOperationCode.IMPORT_DATA_REGION);
 -    this.commands.put("rebalance", ResourceOperationCode.REBALANCE_DS);
++    this.commands.put("put --key=k1 --value=v1 --region=/region1", ResourceOperationCode.PUT);
++    this.commands.put("locate entry --key=k1 --region=/region1", ResourceOperationCode.LOCATE_ENTRY);
++    this.commands.put("query --query=\"select * from /region1\"", ResourceOperationCode.QUERY);
++    this.commands.put("export data --region=value --file=value --member=value", ResourceOperationCode.EXPORT_DATA);
++    this.commands.put("import data --region=value --file=value --member=value", ResourceOperationCode.IMPORT_DATA);
++    this.commands.put("rebalance", ResourceOperationCode.REBALANCE);
+   }
+ 
+   @After
+   public void tearDown() throws IOException {
+     if (this.cache != null) {
+       this.cache.close();
+       this.cache = null;
+     }
+     if (this.ds != null) {
+       this.ds.disconnect();
+       this.ds = null;
+     }
+   }
+   
+   @Test
+   public void commandsShouldMapToCorrectResourceCodes() throws Exception {
+     for (String command : this.commands.keySet()) {
+       CLIOperationContext ctx = new CLIOperationContext(command);
+       assertThat(ctx.getResourceOperationCode()).isEqualTo(this.commands.get(command));
+     }
+   }
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/OperationCodesForDistributedSystemMXBeanTest.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/OperationCodesForDistributedSystemMXBeanTest.java
index 0000000,8b7edbf..65fcf56
mode 000000,100755..100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/OperationCodesForDistributedSystemMXBeanTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/OperationCodesForDistributedSystemMXBeanTest.java
@@@ -1,0 -1,76 +1,76 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package com.gemstone.gemfire.management.internal.security;
+ 
+ import static org.assertj.core.api.Assertions.assertThat;
+ 
+ import javax.management.ObjectName;
+ 
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ 
+ import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
+ import com.gemstone.gemfire.management.internal.MBeanJMXAdapter;
+ import com.gemstone.gemfire.management.internal.security.ResourceOperationContext.ResourceOperationCode;
+ import com.gemstone.gemfire.test.junit.categories.UnitTest;
+ 
+ /**
+  * Tests operation codes for DistributedSystemMXBean operations.
+  */
+ @Category(UnitTest.class)
+ public class OperationCodesForDistributedSystemMXBeanTest {
+ 
+   private final String[] distributedSystemMXBeanOperations = {
+       "listCacheServerObjectNames", 
+       "viewRemoteClusterStatus", 
+       "getTotalHeapSize", 
+       "setQueryCollectionsDepth", 
+       "getQueryCollectionsDepth",
+       "changeAlertLevel", 
+       "backupAllMembers", 
+       "revokeMissingDiskStores", 
+       "shutDownAllMembers", 
+       "queryData", 
+       "queryDataForCompressedResult",
+       "setQueryResultSetLimit"
+   };
+ 
+   private final ResourceOperationCode[] distributedSystemResourceOperationCodes = {
+       ResourceOperationCode.LIST_DS, 
+       ResourceOperationCode.LIST_DS, 
 -      ResourceOperationCode.READ_DS,
 -      ResourceOperationCode.QUERYDATA_DS, 
 -      ResourceOperationCode.READ_DS, 
 -      ResourceOperationCode.CHANGE_ALERT_LEVEL_DS, 
 -      ResourceOperationCode.BACKUP_DS,
 -      ResourceOperationCode.REMOVE_DISKSTORE_DS, 
 -      ResourceOperationCode.SHUTDOWN_DS, 
 -      ResourceOperationCode.QUERYDATA_DS, 
 -      ResourceOperationCode.QUERYDATA_DS,
 -      ResourceOperationCode.QUERYDATA_DS 
++      ResourceOperationCode.GET,
++      ResourceOperationCode.QUERY,
++      ResourceOperationCode.GET,
++      ResourceOperationCode.CHANGE_ALERT_LEVEL,
++      ResourceOperationCode.BACKUP_MEMBERS,
++      ResourceOperationCode.REVOKE_MISSING_DISKSTORE,
++      ResourceOperationCode.SHUTDOWN,
++      ResourceOperationCode.QUERY,
++      ResourceOperationCode.QUERY,
++      ResourceOperationCode.QUERY
+   };
+   
+   @Test
+   public void operationsShouldMapToCodes() {
+     ObjectName objectName = MBeanJMXAdapter.getDistributedSystemName();
+     for (int i = 0; i < distributedSystemMXBeanOperations.length; i++) {
+       JMXOperationContext context = new JMXOperationContext(objectName, distributedSystemMXBeanOperations[i]);
+       assertThat(context.getResourceOperationCode()).isEqualTo(distributedSystemResourceOperationCodes[i]);
+       assertThat(context.getOperationCode()).isEqualTo(OperationCode.RESOURCE);
+     }
+   }
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/RESTAdminAPISecurityDUnitTest.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/RESTAdminAPISecurityDUnitTest.java
index 5481ff7,0000000..cd7b4c9
mode 100644,000000..100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/RESTAdminAPISecurityDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/RESTAdminAPISecurityDUnitTest.java
@@@ -1,26 -1,0 +1,22 @@@
 +package com.gemstone.gemfire.management.internal.security;
 +
- import dunit.Host;
- import dunit.SerializableCallable;
- 
 +public class RESTAdminAPISecurityDUnitTest extends CLISecurityDUnitTest {
 +
 +  private static final long serialVersionUID = 1L;
 +
 +  public RESTAdminAPISecurityDUnitTest(String name) {
 +    super(name);
 +  }
 +
 +  public void setUp() throws Exception {
 +    super.setUp();
 +    this.setUseHttpOnConnect(true);
 +  }
 +
 +
 +  @Override
-   public void tearDown2() throws Exception {
-     super.tearDown2();
++  public void preTearDownCacheTestCase() throws Exception {
 +    this.setUseHttpOnConnect(false);
 +  }
 +
 +}


[33/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
index f3d5182..79669b2 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionBucketCreationDistributionDUnitTest.java
@@ -35,10 +35,13 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDataStore.BucketVisitor;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -115,22 +118,22 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
     // put().
     validateBucket2NodeBeforePutInMultiplePartitionedRegion(
         startIndexForRegion, endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Bucket2Node region of partition regions before any put() successfully validated ");
     // doing put() operation on multiple partition region
     putInMultiplePartitionedRegion(startIndexForRegion, endIndexForRegion,
         startIndexForKey, endIndexForKey);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Put() operation successfully in partition regions");
     // validating bucket regions of multiple partition regions.
     validateBucketsAfterPutInMultiplePartitionRegion(startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Bucket regions of partition regions successfully validated");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testBucketCerationInMultiPlePartitionRegion() Successfully completed");
   }
 
@@ -168,23 +171,23 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy);
         
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Partition Regions successfully created ");
     // doing put() operation from vm0 only
     putInMultiplePartitionRegionFromOneVm(vm[0], startIndexForRegion,
         endIndexForRegion, startIndexForKey, endIndexForKey);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Put() Opereration done only from one VM ");
     // validating bucket distribution ovar all the nodes
     int noBucketsExpectedOnEachNode = getNoBucketsExpectedOnEachNode();
     validateBucketsDistributionInMultiplePartitionRegion(startIndexForRegion,
         endIndexForRegion, noBucketsExpectedOnEachNode);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Bucket regions are equally distributed");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testBucketCerationInMultiPlePartitionRegion() successfully completed");
   }
 
@@ -226,23 +229,23 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
     // creating multiple partition regions on 3 nodes with localMaxMemory=200 redundancy = 0
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Partition Regions successfully created ");
     // doing put() operation from all vms
     putInMultiplePartitionedRegionFromAllVms(startIndexForRegion,
         endIndexForRegion, startIndexForKey, endIndexForKey);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Put() Opereration done only from one VM ");
     // validating bucket distribution ovar all the nodes
     int noBucketsExpectedOnEachNode = getNoBucketsExpectedOnEachNode() - 4;
     validateBucketsDistributionInMultiplePartitionRegion(startIndexForRegion,
         endIndexForRegion, noBucketsExpectedOnEachNode);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Bucket regions are equally distributed");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testBucketCerationInMultiPlePartitionRegion() successfully created");
   }
 
@@ -295,7 +298,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
     // doing put() in multiple partition regions from 3 nodes.
     putInMultiplePartitionedRegionFrom3Nodes(startIndexForRegion,
         endIndexForRegion, startIndexForKey, endIndexForKey);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketDistributionAfterNodeAdditionInPR() - Put() operation successfully in partition regions on 3 Nodes");
 
@@ -313,15 +316,15 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
     // doing put() in multiple partition regions from 3 nodes.
     putInMultiplePartitionedRegionFrom3Nodes(startIndexForRegion,
         endIndexForRegion, startIndexForKey, endIndexForKey);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketDistributionAfterNodeAdditionInPR() - Put() operation successfully in partition regions on 4th node");
     // validating bucket creation in the 4th node
     validateBucketsOnAllNodes(startIndexForRegion, endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketDistributionAfterNodeAdditionInPR() - buckets on all the nodes are validated");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testBucketDistributionAfterNodeAdditionInPR() successfully created");
   }
 
@@ -367,7 +370,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
         endIndexForRegion, startIndexForKey, endIndexForKey);
     validateTotalNumBuckets(prPrefix, vmList, startIndexForRegion,
         endIndexForRegion, expectedNumBuckets);
-    getLogWriter().info("testTotalNumBucketProperty() completed successfully");
+    LogWriterUtils.getLogWriter().info("testTotalNumBucketProperty() completed successfully");
 
   }
 
@@ -421,7 +424,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
     
     Host host = Host.getHost(0);
     createVMs(host);
-    invokeInEveryVM(new SerializableRunnable("Create PR") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("Create PR") {
       public void run() {
         getCache().createRegion(regionName, createRegionAttrs(0, 10, maxBuckets));
         
@@ -512,12 +515,12 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("Exception during " + count, async[count].getException());
+        Assert.fail("Exception during " + count, async[count].getException());
       }
     }
   }
@@ -554,11 +557,11 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
       }
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("Exception during " + count, async[count].getException());
+        Assert.fail("Exception during " + count, async[count].getException());
       }
     }
   }
@@ -587,12 +590,12 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
  
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during" + count, async[count].getException());
+        Assert.fail("exception during" + count, async[count].getException());
       }
     }
   }
@@ -625,12 +628,12 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
      }
     }
   }
@@ -697,12 +700,12 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
   }
@@ -742,12 +745,12 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
   }
@@ -771,12 +774,12 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
         startIndexForRegion, endIndexForRegion));
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        getLogWriter().warning("Failure in async invocation on vm " 
+        LogWriterUtils.getLogWriter().warning("Failure in async invocation on vm " 
             + vm[count]
             + " with exception " + async[count].getException());
         throw async[count].getException();
@@ -807,12 +810,12 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < 4; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < 4; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("got exception on " + count, async[count].getException());
+        Assert.fail("got exception on " + count, async[count].getException());
       }
     }
 
@@ -828,12 +831,12 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        getLogWriter().warning("Failure of async invocation on VM " + 
+        LogWriterUtils.getLogWriter().warning("Failure of async invocation on VM " + 
             this.vm[count] + " exception thrown " + async[count].getException());
         throw async[count].getException();
       }
@@ -862,12 +865,12 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("Validation of bucket distribution failed on " + count,
+        Assert.fail("Validation of bucket distribution failed on " + count,
             async[count].getException());
       }
     }
@@ -948,12 +951,12 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
 
           }
           if (redundancyManageFlag == 0) {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "validateRedundancy() - Redundancy not satisfied for the partition region  : "
                     + pr.getName());
           }
           else {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "validateRedundancy() - Redundancy satisfied for the partition region  : "
                     + pr.getName());
           }
@@ -1139,7 +1142,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
           assertTrue(pr.getRegionAdvisor().getNumProfiles() > 0);
           assertTrue(pr.getRegionAdvisor().getNumDataStores() > 0);
           final int bucketSetSize = pr.getRegionAdvisor().getCreatedBucketsCount();
-          getLogWriter().info("BucketSet size " + bucketSetSize);
+          LogWriterUtils.getLogWriter().info("BucketSet size " + bucketSetSize);
           if (bucketSetSize != 0) {
             Set buckets = pr.getRegionAdvisor().getBucketSet();
             Iterator it  = buckets.iterator();
@@ -1153,7 +1156,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
                 numBucketsWithStorage++;
               }
             } catch (NoSuchElementException end) {
-              getLogWriter().info("BucketSet iterations " + numBucketsWithStorage);
+              LogWriterUtils.getLogWriter().info("BucketSet iterations " + numBucketsWithStorage);
             }
             fail("There should be no buckets assigned");
           }
@@ -1185,7 +1188,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
           
           assertNotNull(pr.getDataStore());
           final int localBSize = pr.getDataStore().getBucketsManaged();
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "validateBucketsDistribution() - Number of bukctes for "
                   + pr.getName() + " : "  + localBSize);
 
@@ -1260,7 +1263,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
           cache.createRegion(prPrefix + i,
               createRegionAttrs(redundancy, localMaxMem, numBuckets));
         }
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info(
                 "createMultiplePartitionRegion() - Partition Regions Successfully Completed ");
       }
@@ -1306,7 +1309,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
           Set bucketsWithStorage = pr.getRegionAdvisor().getBucketSet();
           assertEquals(expectedNumBuckets, bucketsWithStorage.size());
         }
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Total Number of buckets validated in partition region");
       }
     };
@@ -1379,7 +1382,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
     createPartitionRegion(vmList, midIndexForRegion, endIndexForNode,
         localMaxMemory, redundancyTwo);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testBucketCerationInMultiPlePartitionRegion() - Partition Regions successfully created ");
   }
@@ -1391,7 +1394,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
   {
     for (int i = 0; i < 4; i++) {
       if (vm[i] == null)
-        getLogWriter().fine("VM is null" + vm[i]);
+        LogWriterUtils.getLogWriter().fine("VM is null" + vm[i]);
       vm[i].invoke(calculateMemoryOfPartitionRegion(i, i + 1));
     }
   }
@@ -1427,7 +1430,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
         while (sizeItr.hasNext()) {
           assertEquals(sizeItr.next(), objSize);
         }
-        getLogWriter().info("Size of partition region on each node is equal");
+        LogWriterUtils.getLogWriter().info("Size of partition region on each node is equal");
       }
     };
     vm[0].invoke(testTotalMemory);
@@ -1505,7 +1508,7 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     /** testing whether exception occurred */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
@@ -1530,9 +1533,9 @@ public class PartitionedRegionBucketCreationDistributionDUnitTest extends
             .getRegion(Region.SEPARATOR + regionName);
         for (int i = 0; i < MAX_SIZE * 2; i++) {
           pr.put(key + i, Obj);
-          getLogWriter().info("MAXSIZE : " + i);
+          LogWriterUtils.getLogWriter().info("MAXSIZE : " + i);
         }
-        getLogWriter().info("Put successfully done for vm" + key);
+        LogWriterUtils.getLogWriter().info("Put successfully done for vm" + key);
       }
     };
     return putForLocalMaxMemory;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheCloseDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheCloseDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheCloseDUnitTest.java
index d458947..8fd6f4e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheCloseDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCacheCloseDUnitTest.java
@@ -27,10 +27,13 @@ import com.gemstone.gemfire.cache.PartitionAttributesFactory;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Test to verify the meta-data cleanUp done at the time of cache close Op. This
@@ -99,7 +102,7 @@ public class PartitionedRegionCacheCloseDUnitTest extends
             key = new Integer(k);
             pr.put(key, rName + k);
           }
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info("VM0 Done put successfully for PR = " + rName + j);
         }
       }
@@ -120,16 +123,16 @@ public class PartitionedRegionCacheCloseDUnitTest extends
             key = new Integer(k);
             pr.put(key, rName + k);
           }
-          getLogWriter()
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter()
               .info("VM1 Done put successfully for PR = " + rName + j);
         }
       }
     });
-    DistributedTestCase.join(async0, 30 * 1000, getLogWriter());
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async0, 30 * 1000);
+    ThreadUtils.join(async1, 30 * 1000);
 
    if(async0.exceptionOccurred()) {
-     fail("Exception during async0", async0.getException());
+     Assert.fail("Exception during async0", async0.getException());
    }
    
     // Here we would close cache on one of the vms.
@@ -200,7 +203,7 @@ public class PartitionedRegionCacheCloseDUnitTest extends
         for (int j = 0; j < MAX_REGIONS; j++) {
           final String regionName = "#" + rName + j;
 
-          waitForCriterion(new WaitCriterion() {
+          Wait.waitForCriterion(new WaitCriterion() {
             
             private Set<Node> nodes;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCreationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCreationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCreationDUnitTest.java
index a81570b..78c70dc 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCreationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionCreationDUnitTest.java
@@ -34,10 +34,13 @@ import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 @SuppressWarnings("serial")
@@ -65,7 +68,7 @@ public class PartitionedRegionCreationDUnitTest extends
    */
   public void testSequentialCreation() throws Exception
   {
-    getLogWriter().info("*****CREATION TEST ACK STARTED*****");
+    LogWriterUtils.getLogWriter().info("*****CREATION TEST ACK STARTED*****");
     final String name = getUniqueName();
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
@@ -88,7 +91,7 @@ public class PartitionedRegionCreationDUnitTest extends
     vm1.invoke(getCacheSerializableRunnableForPRValidate(name));
     vm2.invoke(getCacheSerializableRunnableForPRValidate(name));
     vm3.invoke(getCacheSerializableRunnableForPRValidate(name));
-    getLogWriter().info("*****CREATION TEST ACK ENDED*****");
+    LogWriterUtils.getLogWriter().info("*****CREATION TEST ACK ENDED*****");
   }
 
   /**
@@ -101,7 +104,7 @@ public class PartitionedRegionCreationDUnitTest extends
   // 2/8/06
   public void testConcurrentCreation() throws Throwable
   {
-    getLogWriter().info("*****CREATION TEST NO_ACK STARTED*****");
+    LogWriterUtils.getLogWriter().info("*****CREATION TEST NO_ACK STARTED*****");
     final String name = getUniqueName();
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
@@ -121,12 +124,12 @@ public class PartitionedRegionCreationDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
 
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
     
@@ -135,7 +138,7 @@ public class PartitionedRegionCreationDUnitTest extends
     vm1.invoke(getCacheSerializableRunnableForPRValidate(name));
     vm2.invoke(getCacheSerializableRunnableForPRValidate(name));
     vm3.invoke(getCacheSerializableRunnableForPRValidate(name));
-    getLogWriter().info("*****CREATION TEST NO_ACK ENDED*****");
+    LogWriterUtils.getLogWriter().info("*****CREATION TEST NO_ACK ENDED*****");
   }
 
   /**
@@ -251,13 +254,13 @@ public class PartitionedRegionCreationDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     th.join(30 * 1000);
 
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
 
@@ -424,7 +427,7 @@ public class PartitionedRegionCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-    getLogWriter().info("*****INITIALIZATION TEST STARTED*****");
+    LogWriterUtils.getLogWriter().info("*****INITIALIZATION TEST STARTED*****");
     int AsyncInvocationArrSize = 8;
     AsyncInvocation[] async = new AsyncInvocation[AsyncInvocationArrSize];
     async[0] = vm0.invokeAsync(getCacheSerializableRunnableForPRCreate(name,
@@ -438,12 +441,12 @@ public class PartitionedRegionCreationDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < 4; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
 
     for (int count = 0; count < 4; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
     
@@ -454,15 +457,15 @@ public class PartitionedRegionCreationDUnitTest extends
     
     /** main thread is waiting for the other threads to complete */
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
   
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
-    getLogWriter().info("*****INITIALIZATION TEST ENDED*****");
+    LogWriterUtils.getLogWriter().info("*****INITIALIZATION TEST ENDED*****");
   }
 
   /**
@@ -480,7 +483,7 @@ public class PartitionedRegionCreationDUnitTest extends
     VM vm1 = host.getVM(1);
     VM vm2 = host.getVM(2);
     VM vm3 = host.getVM(3);
-    getLogWriter().info("*****REGISTRATION TEST STARTED*****");
+    LogWriterUtils.getLogWriter().info("*****REGISTRATION TEST STARTED*****");
     int AsyncInvocationArrSize = 8;
     AsyncInvocation[] async = new AsyncInvocation[AsyncInvocationArrSize];
     async[0] = vm0.invokeAsync(getCacheSerializableRunnableForPRCreate(name,
@@ -494,12 +497,12 @@ public class PartitionedRegionCreationDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < 4; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
 
     for (int count = 0; count < 4; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
     
@@ -514,15 +517,15 @@ public class PartitionedRegionCreationDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
   
     for (int count = 4; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
-    getLogWriter().info("*****REGISTRATION TEST ENDED*****");
+    LogWriterUtils.getLogWriter().info("*****REGISTRATION TEST ENDED*****");
   }
   
   /**
@@ -532,7 +535,7 @@ public class PartitionedRegionCreationDUnitTest extends
    */
   public void testPartitionRegionPersistenceConflicts() throws Throwable
   {
-    addExpectedException("IllegalStateException");
+    IgnoredException.addIgnoredException("IllegalStateException");
     final String name = getUniqueName();
     // Cache cache = getCache();
     Host host = Host.getHost(0);
@@ -540,13 +543,13 @@ public class PartitionedRegionCreationDUnitTest extends
     VM dataStore1 = host.getVM(1);
     VM accessor0 = host.getVM(2);
     VM accessor1 = host.getVM(3);
-    getLogWriter().info("*****PERSISTENCE CONFLICTS TEST STARTED*****");
+    LogWriterUtils.getLogWriter().info("*****PERSISTENCE CONFLICTS TEST STARTED*****");
     accessor0.invoke(getCacheSerializableRunnableForPRPersistence(name, 0, false, false));
     accessor1.invoke(getCacheSerializableRunnableForPRPersistence(name, 0, true, true));
     dataStore0.invoke(getCacheSerializableRunnableForPRPersistence(name, 100, true, false));
     dataStore1.invoke(getCacheSerializableRunnableForPRPersistence(name, 100, false, true));
 
-     getLogWriter().info("*****PERSISTENCE CONFLICTS TEST ENDED*****");
+     LogWriterUtils.getLogWriter().info("*****PERSISTENCE CONFLICTS TEST ENDED*****");
   }
 
   /**
@@ -626,7 +629,7 @@ public class PartitionedRegionCreationDUnitTest extends
                 + name + " configs do not exists in  region - "
                 + root.getName());
         }
-        getLogWriter().info(" PartitionedRegionCreationTest PartionedRegionRegistrationTest() Successfully Complete ..  ");
+        LogWriterUtils.getLogWriter().info(" PartitionedRegionCreationTest PartionedRegionRegistrationTest() Successfully Complete ..  ");
       }
     };
     return (CacheSerializableRunnable)registerPrRegion;
@@ -692,11 +695,11 @@ public class PartitionedRegionCreationDUnitTest extends
             getCache().getLogger().warning(
                 "Creation caught IllegalStateException", ex);
             if (exceptionType.equals("GLOBAL"))
-              getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for scope = GLOBAL");
+              LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for scope = GLOBAL");
             if (exceptionType.equals("REDUNDANCY"))
-              getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for 0 > redundancy  > 3  ");
+              LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for 0 > redundancy  > 3  ");
             if (exceptionType.equals("DIFFREG"))
-              getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for regions with diff scope ");
+              LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for regions with diff scope ");
           }
           assertNotNull("Partitioned Region " + regionName + " not in cache",
               cache.getRegion(regionName));
@@ -730,11 +733,11 @@ public class PartitionedRegionCreationDUnitTest extends
               getCache().getLogger().warning(
                   "Creation caught IllegalStateException", ex);
               if (exceptionType.equals("GLOBAL"))
-                getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for scope = GLOBAL");
+                LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for scope = GLOBAL");
               if (exceptionType.equals("REDUNDANCY"))
-                getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for 0 > redundancy  > 3  ");
+                LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for 0 > redundancy  > 3  ");
               if (exceptionType.equals("DIFFREG"))
-                getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for regions with diff scope ");
+                LogWriterUtils.getLogWriter().info("PartitionedRegionCreationDUnitTest:testPartitionedRegionCreationExceptions()  Got a Correct exception for regions with diff scope ");
             }
             assertNotNull("Partitioned Region " + rName + " not in cache",
                 cache.getRegion(rName));
@@ -867,7 +870,7 @@ public class PartitionedRegionCreationDUnitTest extends
       RegionAttributes regionAttribs = attr.create();
       PartitionedRegion accessor = (PartitionedRegion)cache.createRegion(
           "PR1", regionAttribs);
-      getLogWriter().info("Region created in VM1.");
+      LogWriterUtils.getLogWriter().info("Region created in VM1.");
       assertEquals(accessor.getTotalNumberOfBuckets(),
           PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_DEFAULT);
       try {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDUnitTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDUnitTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDUnitTestCase.java
index 498d835..cd6e980 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDUnitTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDUnitTestCase.java
@@ -31,6 +31,7 @@ import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 import com.gemstone.gemfire.internal.logging.LogWriterImpl;
 import com.gemstone.gemfire.internal.logging.PureLogWriter;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.standalone.DUnitLauncher;
 
@@ -91,15 +92,16 @@ public class PartitionedRegionDUnitTestCase extends CacheTestCase
    * Tear down a PartitionedRegionTestCase by cleaning up the existing cache (mainly
    * because we want to destroy any existing PartitionedRegions)
    */
-  public void tearDown2() throws Exception
-  {
-    try {
-      closeCache();
-      invokeInEveryVM(CacheTestCase.class, "closeCache");
-    } finally {
-      super.tearDown2();
-    }
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
+    preTearDownPartitionedRegionDUnitTest();
+    closeCache();
+    Invoke.invokeInEveryVM(CacheTestCase.class, "closeCache");
   }
+  
+  protected void preTearDownPartitionedRegionDUnitTest() throws Exception {
+  }
+  
   public static void caseSetUp() {
     DUnitLauncher.launchIfNeeded();
     // this makes sure we don't have any connection left over from previous tests
@@ -197,10 +199,10 @@ public class PartitionedRegionDUnitTestCase extends CacheTestCase
                 prPrefix + i,
                 PartitionedRegionTestHelper.createRegionAttrsForPR(redundancy,
                     localmaxMemory, recoveryDelay));
-            getLogWriter().info("Created Region  new  --- " + prPrefix + i);
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Created Region  new  --- " + prPrefix + i);
           } catch (RegionExistsException ignore) {}
         }
-        getLogWriter().info("getCreateMultiplePRregion() - Partition Regions Successfully Completed ");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("getCreateMultiplePRregion() - Partition Regions Successfully Completed ");
       }
     };
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDelayedRecoveryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDelayedRecoveryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDelayedRecoveryDUnitTest.java
index 1d89967..337e6ce 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDelayedRecoveryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDelayedRecoveryDUnitTest.java
@@ -26,7 +26,9 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserverAdapter;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -42,11 +44,9 @@ public class PartitionedRegionDelayedRecoveryDUnitTest extends CacheTestCase {
     super(name);
   }
   
-  
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    invokeInEveryVM(new SerializableRunnable() {
+  protected final void postTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       public void run() {
         InternalResourceManager.setResourceObserver(null);
       }
@@ -54,7 +54,6 @@ public class PartitionedRegionDelayedRecoveryDUnitTest extends CacheTestCase {
     InternalResourceManager.setResourceObserver(null);
   }
 
-
   public void testNoRecovery() throws Exception {
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
@@ -156,7 +155,7 @@ public class PartitionedRegionDelayedRecoveryDUnitTest extends CacheTestCase {
             fail("Redundancy recovery did not happen within 60 seconds");
           }
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         } finally {
           InternalResourceManager.setResourceObserver(null);
         }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDestroyDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDestroyDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDestroyDUnitTest.java
index 4733fff..e5a95cd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDestroyDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionDestroyDUnitTest.java
@@ -28,11 +28,14 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * This test aims to test the destroyRegion functionality.
@@ -77,7 +80,7 @@ public class PartitionedRegionDestroyDUnitTest extends
           cache.createRegion(PR_PREFIX + i,
               createRegionAttrsForPR(0, 200));
         }
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Successfully created " + MAX_REGIONS + " PartitionedRegions.");
       }
     };
@@ -156,7 +159,7 @@ public class PartitionedRegionDestroyDUnitTest extends
           }
         }
         catch (RegionDestroyedException e) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "RegionDestroyedException occured for Region = " + PR_PREFIX + j);
         }
         getCache().getLogger().info("<ExpectedException action=remove>" + 
@@ -164,14 +167,14 @@ public class PartitionedRegionDestroyDUnitTest extends
       }
     });
 
-    DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async1, 30 * 1000);
     if(async1.exceptionOccurred()) {
-      fail("async1 failed", async1.getException());
+      Assert.fail("async1 failed", async1.getException());
     }
     final String expectedExceptions = "com.gemstone.gemfire.distributed.internal.ReplyException"; 
     addExceptionTag(expectedExceptions);
     
-    pause(1000); // give async a chance to grab the regions...
+    Wait.pause(1000); // give async a chance to grab the regions...
     
     vm0.invoke(new CacheSerializableRunnable("destroyPRRegions") {
 
@@ -225,18 +228,18 @@ public class PartitionedRegionDestroyDUnitTest extends
 
         // Assert that all PartitionedRegions are gone
         assertEquals(0, rootRegion.size());
-        getLogWriter().info("allPartitionedRegions size() =" + rootRegion.size());
+        LogWriterUtils.getLogWriter().info("allPartitionedRegions size() =" + rootRegion.size());
         assertEquals("ThePrIdToPR Map size is:"+PartitionedRegion.prIdToPR.size()+" instead of 0", MAX_REGIONS, PartitionedRegion.prIdToPR.size());
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "PartitionedRegion.prIdToPR.size() ="
                 + PartitionedRegion.prIdToPR.size());
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "# of Subregions of root Region after destroy call = "
                 + rootRegion.subregions(false).size());
         Iterator itr = (rootRegion.subregions(false)).iterator();
         while (itr.hasNext()) {
           Region rg = (Region)itr.next();
-          getLogWriter().info("Root Region SubRegionName = " + rg.getName());
+          LogWriterUtils.getLogWriter().info("Root Region SubRegionName = " + rg.getName());
 //          assertEquals("REGION NAME FOUND:"+rg.getName(),-1, rg.getName().indexOf(
 //              PartitionedRegionHelper.BUCKET_2_NODE_TABLE_PREFIX));
           assertEquals("regionFound that should be gone!:"+rg.getName(),-1, rg.getName().indexOf(

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEntryCountDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEntryCountDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEntryCountDUnitTest.java
index dbaa433..985656d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEntryCountDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEntryCountDUnitTest.java
@@ -23,6 +23,7 @@ import com.gemstone.gemfire.cache.EvictionAttributes;
 import com.gemstone.gemfire.cache.PartitionAttributesFactory;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
@@ -85,7 +86,7 @@ public class PartitionedRegionEntryCountDUnitTest extends CacheTestCase {
           assertNotNull(pr);
         }
         catch (final CacheException ex) {
-          fail("While creating Partitioned region", ex);
+          Assert.fail("While creating Partitioned region", ex);
         }
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEvictionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEvictionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEvictionDUnitTest.java
index 5fe0b45..0319171 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEvictionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionEvictionDUnitTest.java
@@ -46,11 +46,13 @@ import com.gemstone.gemfire.internal.cache.control.HeapMemoryMonitor;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceType;
 import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
 import com.gemstone.gemfire.internal.cache.lru.HeapLRUCapacityController;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class PartitionedRegionEvictionDUnitTest extends CacheTestCase {
   public PartitionedRegionEvictionDUnitTest(final String name) {
@@ -160,7 +162,7 @@ public class PartitionedRegionEvictionDUnitTest extends CacheTestCase {
               return excuse;
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+          Wait.waitForCriterion(wc, 60000, 1000, true);
             
           int entriesEvicted = 0;
           
@@ -298,7 +300,7 @@ public class PartitionedRegionEvictionDUnitTest extends CacheTestCase {
               return excuse;
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+          Wait.waitForCriterion(wc, 60000, 1000, true);
           
           entriesEvicted = ((AbstractLRURegionMap)pr.entries)._getLruList().stats()
               .getEvictions();
@@ -362,7 +364,7 @@ public class PartitionedRegionEvictionDUnitTest extends CacheTestCase {
           assertNotNull(pr);
         }
         catch (final CacheException ex) {
-          fail("While creating Partitioned region", ex);
+          Assert.fail("While creating Partitioned region", ex);
         }
       }
     };
@@ -543,7 +545,7 @@ public class PartitionedRegionEvictionDUnitTest extends CacheTestCase {
           assertNotNull(pr);
         }
         catch (final CacheException ex) {
-          fail("While creating Partitioned region", ex);
+          Assert.fail("While creating Partitioned region", ex);
         }
       }
     };
@@ -661,7 +663,7 @@ public class PartitionedRegionEvictionDUnitTest extends CacheTestCase {
           assertNotNull(pr);
         }
         catch (final CacheException ex) {
-          fail("While creating Partitioned region", ex);
+          Assert.fail("While creating Partitioned region", ex);
         }
       }
     };
@@ -1723,7 +1725,7 @@ public class PartitionedRegionEvictionDUnitTest extends CacheTestCase {
           assertNotNull(pr);
         }
         catch (final CacheException ex) {
-          fail("While creating Partitioned region", ex);
+          Assert.fail("While creating Partitioned region", ex);
         }
       }
     };

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHADUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHADUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHADUnitTest.java
index a787a6f..3f4edf1 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHADUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHADUnitTest.java
@@ -36,11 +36,13 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserver;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceObserverAdapter;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -102,7 +104,7 @@ public class PartitionedRegionHADUnitTest extends PartitionedRegionDUnitTestCase
           }
           assertNotNull(partitionedregion);
         } catch (InterruptedException e) {
-          fail("interrupted",e);
+          Assert.fail("interrupted",e);
         } finally {
           InternalResourceManager.setResourceObserver(null);
         }
@@ -219,7 +221,7 @@ public class PartitionedRegionHADUnitTest extends PartitionedRegionDUnitTestCase
             fail("recovery didn't happen in 60 seconds");
           }
         } catch (InterruptedException e) {
-          fail("recovery wait interrupted", e);
+          Assert.fail("recovery wait interrupted", e);
         } finally {
           InternalResourceManager.setResourceObserver(null);
         }
@@ -246,14 +248,14 @@ public class PartitionedRegionHADUnitTest extends PartitionedRegionDUnitTestCase
         public void run2() throws CacheException {
           getCache().getLogger().info("<ExpectedException action=add>" + 
               expectedExceptions + "</ExpectedException>");
-          getLogWriter().info("<ExpectedException action=add>" + 
+          LogWriterUtils.getLogWriter().info("<ExpectedException action=add>" + 
                   expectedExceptions + "</ExpectedException>");
         }
       };
     SerializableRunnable removeExpectedExceptions = 
       new CacheSerializableRunnable("removeExpectedExceptions") {
         public void run2() throws CacheException {
-          getLogWriter().info("<ExpectedException action=remove>" + 
+          LogWriterUtils.getLogWriter().info("<ExpectedException action=remove>" + 
                     expectedExceptions + "</ExpectedException>");	
           getCache().getLogger().info("<ExpectedException action=remove>" + 
               expectedExceptions + "</ExpectedException>");
@@ -271,7 +273,7 @@ public class PartitionedRegionHADUnitTest extends PartitionedRegionDUnitTestCase
           for (int k = 0; k < 10; k++) {
             pr.put(j + PR_PREFIX + k, PR_PREFIX + k);
           }
-          getLogWriter().info("VM0 Done put successfully for PR = " + PR_PREFIX
+          LogWriterUtils.getLogWriter().info("VM0 Done put successfully for PR = " + PR_PREFIX
               + j);
         }
       }
@@ -288,7 +290,7 @@ public class PartitionedRegionHADUnitTest extends PartitionedRegionDUnitTestCase
           for (int k = 10; k < 20; k++) {
             pr.put(j + PR_PREFIX + k, PR_PREFIX + k);
           }
-          getLogWriter().info("VM1 Done put successfully for PR = " + PR_PREFIX
+          LogWriterUtils.getLogWriter().info("VM1 Done put successfully for PR = " + PR_PREFIX
               + j);
         }
       }
@@ -297,7 +299,7 @@ public class PartitionedRegionHADUnitTest extends PartitionedRegionDUnitTestCase
     // dataStore1.invoke(addExpectedExceptions);
     AsyncInvocation async0 = dataStore0.invokeAsync(dataStore0Puts);
     // AsyncInvocation  async1 = dataStore1.invokeAsync(dataStore1Puts);
-    DistributedTestCase.join(async0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async0, 30 * 1000);
     // async1.join();
     dataStore0.invoke(removeExpectedExceptions);
     // dataStore1.invoke(removeExpectedExceptions);
@@ -317,11 +319,11 @@ public class PartitionedRegionHADUnitTest extends PartitionedRegionDUnitTestCase
     
     async0 = dataStore0.invokeAsync(dataStore0Puts);
     // async1 = dataStore1.invokeAsync(dataStore1Puts);
-    DistributedTestCase.join(async0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async0, 30 * 1000);
     // async1.join();
     
     if (async0.exceptionOccurred()) {
-      fail("async0 failed", async0.getException());
+      Assert.fail("async0 failed", async0.getException());
     }
     // assertFalse(async1.exceptionOccurred());
     
@@ -378,7 +380,7 @@ public class PartitionedRegionHADUnitTest extends PartitionedRegionDUnitTestCase
       // This accessor should NOT have picked up any buckets.
       assertFalse(vm3LBRsize != 0);
       int vm2B2Nsize = ((Integer)dataStore2.invoke(validateBucketsOnNode)).intValue();
-      getLogWriter().info("vm2B2Nsize = " + vm2B2Nsize);
+      LogWriterUtils.getLogWriter().info("vm2B2Nsize = " + vm2B2Nsize);
       assertEquals(vm2B2Nsize, vm2LBRsize);
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java
index 726423d..69bebdf 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionHAFailureAndRecoveryDUnitTest.java
@@ -32,11 +32,14 @@ import com.gemstone.gemfire.cache30.CertifiableTestCacheListener;
 import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.NanoTimer;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -87,7 +90,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
     final int redundancy = 1;
     createPartitionRegionAsynch("testMetaDataCleanupOnSinglePRNodeFail_",
         startIndexForRegion, endIndexForRegion, localMaxMemory, redundancy, -1);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnSinglePRNodeFail() - PartitionedRegion's created at all VM nodes");
     
@@ -97,7 +100,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
     // disconnect vm0.
     DistributedMember dsMember = (DistributedMember)vmArr[0].invoke(this, "disconnectMethod");
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testMetaDataCleanupOnSinglePRNodeFail() - VM = " + dsMember
             + " disconnected from the distributed system ");
     
@@ -105,7 +108,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
     vmArr[1].invoke(validateNodeFailMetaDataCleanUp(dsMember));
     vmArr[2].invoke(validateNodeFailMetaDataCleanUp(dsMember));
     vmArr[3].invoke(validateNodeFailMetaDataCleanUp(dsMember));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnSinglePRNodeFail() - Validation of Failed node config metadata complete");
 
@@ -114,11 +117,11 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
     vmArr[2].invoke(validateNodeFailbucket2NodeCleanUp(dsMember));
     vmArr[3].invoke(validateNodeFailbucket2NodeCleanUp(dsMember));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnSinglePRNodeFail() - Validation of Failed node bucket2Node Region metadata complete");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnSinglePRNodeFail() Completed Successfuly ..........");
   }
@@ -133,7 +136,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
         Cache c = getCache();
         Region rootReg = PartitionedRegionHelper.getPRRoot(c);
 //        Region allPRs = PartitionedRegionHelper.getPRConfigRegion(rootReg, c);
-        rootReg.getAttributesMutator().addCacheListener(new CertifiableTestCacheListener(getLogWriter()));
+        rootReg.getAttributesMutator().addCacheListener(new CertifiableTestCacheListener(LogWriterUtils.getLogWriter()));
       }
     };
   
@@ -196,7 +199,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
     final int redundancy = 1;
     createPartitionRegionAsynch("testMetaDataCleanupOnMultiplePRNodeFail_",
         startIndexForRegion, endIndexForRegion, localMaxMemory, redundancy, -1);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnMultiplePRNodeFail() - PartitionedRegion's created at all VM nodes");
     
@@ -205,7 +208,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
     // disconnect vm0
     DistributedMember dsMember = (DistributedMember)vmArr[0].invoke(this, "disconnectMethod");
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testMetaDataCleanupOnMultiplePRNodeFail() - VM = " + dsMember
             + " disconnected from the distributed system ");
 
@@ -228,7 +231,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
     //  disconnect vm1
     DistributedMember dsMember2 = (DistributedMember)vmArr[1].invoke(this, "disconnectMethod");
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testMetaDataCleanupOnMultiplePRNodeFail() - VM = " + dsMember2
             + " disconnected from the distributed system ");
 
@@ -251,18 +254,18 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
     vmArr[2].invoke(validateNodeFailMetaDataCleanUp(dsMember2));
     vmArr[3].invoke(validateNodeFailMetaDataCleanUp(dsMember2));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnMultiplePRNodeFail() - Validation of Failed nodes config metadata complete");
 
     vmArr[2].invoke(validateNodeFailbucket2NodeCleanUp(dsMember2));
     vmArr[3].invoke(validateNodeFailbucket2NodeCleanUp(dsMember2));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnMultiplePRNodeFail() - Validation of Failed nodes bucket2Node Region metadata complete");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testMetaDataCleanupOnMultiplePRNodeFail() Completed Successfuly ..........");
   }
@@ -290,8 +293,8 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
         assertEquals(2, cls.length);
         CertifiableTestCacheListener ctcl = (CertifiableTestCacheListener) cls[1];
         
-        getLogWriter().info("Listener update (" + ctcl.updates.size() + "): " + ctcl.updates) ;
-        getLogWriter().info("Listener destroy: (" + ctcl.destroys.size() + "): " + ctcl.destroys) ;
+        LogWriterUtils.getLogWriter().info("Listener update (" + ctcl.updates.size() + "): " + ctcl.updates) ;
+        LogWriterUtils.getLogWriter().info("Listener destroy: (" + ctcl.destroys.size() + "): " + ctcl.destroys) ;
 
         Iterator itrator = rootReg.keySet().iterator();
         for (Iterator itr = itrator; itr.hasNext();) {
@@ -378,7 +381,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
     DistributedMember dsMember = ((InternalDistributedSystem)getCache()
         .getDistributedSystem()).getDistributionManager().getId();
     getCache().getDistributedSystem().disconnect();
-    getLogWriter().info("disconnectMethod() completed ..");
+    LogWriterUtils.getLogWriter().info("disconnectMethod() completed ..");
     return dsMember;
   }
   
@@ -396,12 +399,12 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
           redundancy, localMaxMemory, recoveryDelay));
     }
     for (int count2 = 0; count2 < async.length; count2++) {
-        DistributedTestCase.join(async[count2], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count2], 30 * 1000);
      }
     
     for (int count2 = 0; count2 < async.length; count2++) {
       if (async[count2].exceptionOccurred()) {
-        fail("exception during " + count2, async[count2].getException());
+        Assert.fail("exception during " + count2, async[count2].getException());
       }
     }  
   }
@@ -446,17 +449,17 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
         assertEquals(bucketOwners.size(), redundantCopies + 1);
         DistributedMember bucketOwner = (DistributedMember) bucketOwners.iterator().next();
         assertNotNull(bucketOwner);
-        getLogWriter().info("Selected distributed member " + bucketOwner + " to disconnect because it hosts bucketId " + bucketId);
+        LogWriterUtils.getLogWriter().info("Selected distributed member " + bucketOwner + " to disconnect because it hosts bucketId " + bucketId);
         return bucketOwner;
       }
     });
     assertNotNull(bucketHost);
     
     // Disconnect the selected host 
-    Map stillHasDS = invokeInEveryVM(new SerializableCallable("Disconnect provided bucketHost") {
+    Map stillHasDS = Invoke.invokeInEveryVM(new SerializableCallable("Disconnect provided bucketHost") {
       public Object call() throws Exception {
         if (getSystem().getDistributedMember().equals(bucketHost)) {
-          getLogWriter().info("Disconnecting distributed member " + getSystem().getDistributedMember());
+          LogWriterUtils.getLogWriter().info("Disconnecting distributed member " + getSystem().getDistributedMember());
           disconnectFromDS();
           return Boolean.FALSE;
         }
@@ -491,7 +494,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
                   TimeUnit.MILLISECONDS.sleep(250);
                 }
                 catch (InterruptedException e) {
-                  fail("Interrupted, ah!", e);
+                  Assert.fail("Interrupted, ah!", e);
                 }
               }
             }
@@ -519,7 +522,7 @@ public class PartitionedRegionHAFailureAndRecoveryDUnitTest extends
                     assertEquals(pr.getRedundantCopies() + 1, owners.size());
                     break; // retry loop
                   } catch (ForceReattemptException retryIt) {
-                    getLogWriter().info("Need to retry validation for bucket in PR " + pr, retryIt);
+                    LogWriterUtils.getLogWriter().info("Need to retry validation for bucket in PR " + pr, retryIt);
                   }
                 } while (true); // retry loop
               } // bucketId loop

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java
index 4bbdbe4..28e1bfb 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryDUnitTest.java
@@ -34,6 +34,7 @@ import com.gemstone.gemfire.cache.util.ObjectSizer;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.cache.lru.Sizeable;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -177,7 +178,7 @@ public class PartitionedRegionLocalMaxMemoryDUnitTest extends
             i++;
           }
           assertEquals(1, pr.getDataStore().localBucket2RegionMap.size());
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
           "putObjectInPartitionRegion() - Put operation done successfully");
         }
         else {
@@ -190,7 +191,7 @@ public class PartitionedRegionLocalMaxMemoryDUnitTest extends
             fail("Bucket gets created even if no memory is available");
           }
           catch (PartitionedRegionStorageException e) {
-            getLogWriter()
+            LogWriterUtils.getLogWriter()
             .info(
             "putObjectInPartitionRegion()- got correct PartitionedRegionStorageException while creating bucket when no memory is available");
           }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryOffHeapDUnitTest.java
index a9b7619..8508587 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionLocalMaxMemoryOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.EvictionAttributes;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -38,7 +39,7 @@ public class PartitionedRegionLocalMaxMemoryOffHeapDUnitTest extends Partitioned
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownPartitionedRegionDUnitTest() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -48,12 +49,8 @@ public class PartitionedRegionLocalMaxMemoryOffHeapDUnitTest extends Partitioned
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java
index 6ace0a5..22d1fd7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionMultipleDUnitTest.java
@@ -20,9 +20,11 @@ import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -93,22 +95,22 @@ public class PartitionedRegionMultipleDUnitTest extends
     /** creationg and performing put(),get() operations on Partition Region */
     createMultiplePartitionRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionPutAndGet() - Partition Regions Successfully Created ");
     validateMultiplePartitionedRegions(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionPutAndGet() - Partition Regions Successfully Validated ");
     putInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionPutAndGet() - Put() Operation done Successfully in Partition Regions ");
     getInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionPutAndGet() - Partition Regions Successfully Validated ");
   }
@@ -148,38 +150,38 @@ public class PartitionedRegionMultipleDUnitTest extends
      */
     createMultiplePartitionRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Partition Regions Successfully Created ");
     validateMultiplePartitionedRegions(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Partition Regions Successfully Validated ");
     putInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Put() Operation done Successfully in Partition Regions ");
     destroyInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Destroy(Key) Operation done Successfully in Partition Regions ");
     getDestroyedEntryInMultiplePartitionedRegion(vm0, vm1, vm2, vm3,
         startIndexForRegion, endIndexForRegion, afterPutFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Get() Operation after destoy keys done Successfully in Partition Regions ");
     putDestroyedEntryInMultiplePartitionedRegion(vm0, vm1, vm2, vm3,
         startIndexForRegion, endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Put() Operation after destroy keys done Successfully in Partition Regions ");
     afterPutFlag = 1;
     getDestroyedEntryInMultiplePartitionedRegion(vm0, vm1, vm2, vm3,
         startIndexForRegion, endIndexForRegion, afterPutFlag);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyKeys() - Get() Operation after Put() done Successfully in Partition Regions ");
   }
@@ -210,22 +212,22 @@ public class PartitionedRegionMultipleDUnitTest extends
     /** creating Partition Regions and testing for the APIs contains() */
     createMultiplePartitionRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyAndContainsAPI() - Partition Regions Successfully Created ");
     validateMultiplePartitionedRegions(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyAndContainsAPI() - Partition Regions Successfully Validated ");
     putInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyAndContainsAPI() - Put() Operation done Successfully in Partition Regions ");
     destroyInMultiplePartitionedRegion(vm0, vm1, vm2, vm3, startIndexForRegion,
         endIndexForRegion);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyAndContainsAPI() - Destroy(Key) Operation done Successfully in Partition Regions ");
     async[0] = vm0.invokeAsync(validateContainsAPIForPartitionRegion(
@@ -238,16 +240,16 @@ public class PartitionedRegionMultipleDUnitTest extends
         startIndexForRegion, endIndexForRegion));
 
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 120 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 120 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
    }
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPartitionedRegionDestroyAndContainsAPI() - Validation of Contains APIs done Successfully in Partition Regions ");
   }
@@ -298,12 +300,12 @@ public class PartitionedRegionMultipleDUnitTest extends
         startIndexForRegion, endIndexForRegion));
 
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
   }
@@ -334,12 +336,12 @@ public class PartitionedRegionMultipleDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
   }
@@ -367,12 +369,12 @@ public class PartitionedRegionMultipleDUnitTest extends
         endIndexForRegion));
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) { 
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
     }
      
     for (int count = 0; count < AsyncInvocationArrSize; count++) { 
       if (async[count].exceptionOccurred()) {
-        fail("Failed due to exception: "+ async[count].getException(),
+        Assert.fail("Failed due to exception: "+ async[count].getException(),
             async[count].getException());
       }
     }  
@@ -406,12 +408,12 @@ public class PartitionedRegionMultipleDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
     }
 
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
    }   
   }
@@ -464,7 +466,7 @@ public class PartitionedRegionMultipleDUnitTest extends
             }
           }
 
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
               .info(
                   "validateContainsAPIForPartitionRegion() - Get() Validations done Successfully in Partition Region "
                       + pr.getName());
@@ -479,7 +481,7 @@ public class PartitionedRegionMultipleDUnitTest extends
             }
           }
 
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
               .info(
                   "validateContainsAPIForPartitionRegion() - containsKey() Validations done Successfully in Partition Region "
                       + pr.getName());
@@ -494,7 +496,7 @@ public class PartitionedRegionMultipleDUnitTest extends
               assertTrue(conKey);
             }
           }
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
               .info(
                   "validateContainsAPIForPartitionRegion() - containsValueForKey() Validations done Successfully in Partition Region "
                       + pr.getName());
@@ -508,7 +510,7 @@ public class PartitionedRegionMultipleDUnitTest extends
               assertTrue(conKey);
             }
           }
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
               .info(
                   "validateContainsAPIForPartitionRegion() - containsValue() Validations done Successfully in Partition Region "
                       + pr.getName());
@@ -543,9 +545,9 @@ public class PartitionedRegionMultipleDUnitTest extends
         startIndexForRegion, endIndexForRegion, afterPutFlag));
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
         if (async[count].exceptionOccurred()) {
-          fail("exception during " + count, async[count].getException());
+          Assert.fail("exception during " + count, async[count].getException());
         }
     }
     
@@ -586,12 +588,12 @@ public class PartitionedRegionMultipleDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-        DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+        ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("exception during " + count, async[count].getException());
+        Assert.fail("exception during " + count, async[count].getException());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionOffHeapEvictionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionOffHeapEvictionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionOffHeapEvictionDUnitTest.java
index 042e1f7..002a5f6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionOffHeapEvictionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionOffHeapEvictionDUnitTest.java
@@ -23,6 +23,7 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.control.InternalResourceManager.ResourceType;
 import com.gemstone.gemfire.internal.cache.control.OffHeapMemoryMonitor;
 import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 public class PartitionedRegionOffHeapEvictionDUnitTest extends
@@ -33,7 +34,7 @@ public class PartitionedRegionOffHeapEvictionDUnitTest extends
   }  
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -43,12 +44,8 @@ public class PartitionedRegionOffHeapEvictionDUnitTest extends
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java
index 496026d..f35b39a 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionPRIDDUnitTest.java
@@ -23,9 +23,11 @@ import java.util.*;
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache30.*;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -83,7 +85,7 @@ public class PartitionedRegionPRIDDUnitTest extends
     // Create 1/2 * MAX_REGIONS regions in VM 0,1,2 with scope D_ACK.
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, redundancy, prPrefix);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPRIDGenerationInMultiplePartitionRegion() - Partition regions on 3 nodes successfully created");
 
@@ -99,7 +101,7 @@ public class PartitionedRegionPRIDDUnitTest extends
     // VM 3 contains regions from id MAX_REGIONS to 2*MAX_REGIONS only.
     createPartitionRegion(vmList, startIndexForRegion, endIndexForRegion,
         localMaxMemory, pr2_redundancy, prPrefix);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "testPRIDGenerationInMultiplePartitionRegion() - Partition regions on 4 nodes successfully created");
     // validating PRID generation for multiple partition regions    
@@ -116,12 +118,12 @@ public class PartitionedRegionPRIDDUnitTest extends
 
     /** main thread is waiting for the other threads to complete */
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
-      DistributedTestCase.join(async[count], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[count], 30 * 1000);
     }
     
     for (int count = 0; count < AsyncInvocationArrSize; count++) {
       if (async[count].exceptionOccurred()) {
-        fail("VM " + count 
+        Assert.fail("VM " + count 
             + " encountered this exception during async invocation", 
             async[count].getException());
       }
@@ -206,10 +208,10 @@ public class PartitionedRegionPRIDDUnitTest extends
         if (prIdPRSet.size() != PartitionedRegion.prIdToPR.size())
           fail("Duplicate PRID are generated in prIdToPR");
 
-        getLogWriter().info("Size of allPartition region : " + prIdSet.size());
-        getLogWriter()
+        LogWriterUtils.getLogWriter().info("Size of allPartition region : " + prIdSet.size());
+        LogWriterUtils.getLogWriter()
             .info("Size of prIdToPR region     : " + prIdPRSet.size());
-        getLogWriter().info("PRID generated successfully");
+        LogWriterUtils.getLogWriter().info("PRID generated successfully");
       }
     };
     return validatePRID;
@@ -233,12 +235,12 @@ public class PartitionedRegionPRIDDUnitTest extends
       numNodes++;
     }
     for (int i = 0; i < numNodes; i++) {
-      DistributedTestCase.join(async[i], 30 * 1000, getLogWriter());
+      ThreadUtils.join(async[i], 30 * 1000);
     }
     
     for (int i = 0; i < numNodes; i++) {
       if (async[i].exceptionOccurred()) {
-        fail("VM " + i 
+        Assert.fail("VM " + i 
             + " encountered this exception during async invocation", 
             async[i].getException());
       }



[56/62] [abbrv] incubator-geode git commit: GEODE-948: gemfire-modules-assembly should not publish a jar artifact

Posted by je...@apache.org.
GEODE-948: gemfire-modules-assembly should not publish a jar artifact


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/8af28584
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/8af28584
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/8af28584

Branch: refs/heads/feature/GEODE-17
Commit: 8af2858470bc7c7b866a865195a4ff95d43189d2
Parents: 9fff1eb
Author: Jens Deppe <jd...@pivotal.io>
Authored: Tue Feb 9 08:39:18 2016 -0800
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Tue Feb 9 16:17:13 2016 -0800

----------------------------------------------------------------------
 extensions/gemfire-modules-assembly/build.gradle |  7 +++++++
 gemfire-assembly/build.gradle                    |  9 +--------
 gradle/utilities.gradle                          | 10 ++++++++++
 3 files changed, 18 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8af28584/extensions/gemfire-modules-assembly/build.gradle
----------------------------------------------------------------------
diff --git a/extensions/gemfire-modules-assembly/build.gradle b/extensions/gemfire-modules-assembly/build.gradle
index 270e167..b20ddff 100644
--- a/extensions/gemfire-modules-assembly/build.gradle
+++ b/extensions/gemfire-modules-assembly/build.gradle
@@ -31,6 +31,13 @@ dependencies {
 }
 
 jar.enabled = false
+extraArchive {
+  sources = false
+  javadoc = false
+  tests = false
+}
+
+disableMavenPublishing()
 
 def getJarArtifact(module) {
   project(module).configurations.archives.artifacts.findAll {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8af28584/gemfire-assembly/build.gradle
----------------------------------------------------------------------
diff --git a/gemfire-assembly/build.gradle b/gemfire-assembly/build.gradle
index c77d909..72e97c5 100755
--- a/gemfire-assembly/build.gradle
+++ b/gemfire-assembly/build.gradle
@@ -26,14 +26,7 @@ extraArchive {
   tests = false
 }
 
-// We are not publishing anything from this project into maven and using two distributions seems
-// to break the nexus plugin.  So we delete the nexus tasks but we have to replace the upload task
-// with an empty task of the same name...argh.
-afterEvaluate {
-  tasks.remove(install)
-  tasks.remove(uploadArchives)
-  task uploadArchives << { }
-}
+disableMavenPublishing()
 
 // Gradle doesn't automatically remove the jar artifact even though we disabled it
 // this causes publishing to fail.  So we nuke all the disabled artifacts from all configurations.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/8af28584/gradle/utilities.gradle
----------------------------------------------------------------------
diff --git a/gradle/utilities.gradle b/gradle/utilities.gradle
index 18aef20..6a6df04 100644
--- a/gradle/utilities.gradle
+++ b/gradle/utilities.gradle
@@ -27,5 +27,15 @@ allprojects {
       def parts = project.name.split("/")
       return parts[parts.length - 1];
     }
+
+    disableMavenPublishing = {
+      // Use this closure when a project should not publish anything to maven. It appears that the nexus
+      // upload task still needs to exist, which is why an empty one is created.
+      afterEvaluate {
+        tasks.remove(install)
+        tasks.remove(uploadArchives)
+        task uploadArchives << { }
+      }
+    }
   }
 }


[42/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml90DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml90DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml90DUnitTest.java
index a3df9e9..21e6c61 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml90DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXml90DUnitTest.java
@@ -29,6 +29,7 @@ import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
 import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.ResourceManagerCreation;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 
 
 public class CacheXml90DUnitTest extends CacheXml81DUnitTest {
@@ -90,7 +91,7 @@ public class CacheXml90DUnitTest extends CacheXml81DUnitTest {
     assertNotNull(regionBefore);
     assertEquals(true, regionBefore.getAttributes().getOffHeap());
 
-    ExpectedException expectedException = CacheTestCase.addExpectedException(LocalizedStrings.
+    IgnoredException expectedException = IgnoredException.addIgnoredException(LocalizedStrings.
         LocalRegion_THE_REGION_0_WAS_CONFIGURED_TO_USE_OFF_HEAP_MEMORY_BUT_OFF_HEAP_NOT_CONFIGURED.toLocalizedString("/"+regionName));
     try {
       testXml(cache);
@@ -124,7 +125,7 @@ public class CacheXml90DUnitTest extends CacheXml81DUnitTest {
     assertNotNull(subRegionBefore);
     assertEquals(true, subRegionBefore.getAttributes().getOffHeap());
 
-    ExpectedException expectedException = CacheTestCase.addExpectedException(LocalizedStrings.
+    IgnoredException expectedException = IgnoredException.addIgnoredException(LocalizedStrings.
         LocalRegion_THE_REGION_0_WAS_CONFIGURED_TO_USE_OFF_HEAP_MEMORY_BUT_OFF_HEAP_NOT_CONFIGURED.toLocalizedString("/"+rootRegionName+"/"+subRegionName));
     try {
       testXml(cache);
@@ -181,7 +182,7 @@ public class CacheXml90DUnitTest extends CacheXml81DUnitTest {
       rmc.setEvictionOffHeapPercentage(high);
       rmc.setCriticalOffHeapPercentage(low);
       cache.setResourceManagerCreation(rmc);
-      ExpectedException expectedException = CacheTestCase.addExpectedException(LocalizedStrings.MemoryMonitor_EVICTION_PERCENTAGE_LTE_CRITICAL_PERCENTAGE.toLocalizedString());
+      IgnoredException expectedException = IgnoredException.addIgnoredException(LocalizedStrings.MemoryMonitor_EVICTION_PERCENTAGE_LTE_CRITICAL_PERCENTAGE.toLocalizedString());
       try {
         testXml(cache);
         assertTrue(false);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXmlTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXmlTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXmlTestCase.java
index 555f75a..dd127cb 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXmlTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CacheXmlTestCase.java
@@ -29,6 +29,7 @@ import com.gemstone.gemfire.internal.cache.xmlcache.CacheCreation;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXml;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
 import com.gemstone.gemfire.internal.cache.xmlcache.ClientCacheCreation;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.util.test.TestUtil;
 
 public class CacheXmlTestCase extends CacheTestCase {
@@ -48,9 +49,13 @@ public class CacheXmlTestCase extends CacheTestCase {
     disconnectAllFromDS();
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
     this.xmlFile = null;    
-    super.tearDown2();
+  }
+  
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS();
   }
 
@@ -123,7 +128,7 @@ public class CacheXmlTestCase extends CacheTestCase {
 
     } catch (IOException ex) {
       String s = "While generating XML";
-      fail(s, ex);
+      Assert.fail(s, ex);
     }
 
     setXmlFile(file);
@@ -146,7 +151,7 @@ public class CacheXmlTestCase extends CacheTestCase {
           useSchema, version);
       CacheXmlGenerator.generate(cache, new PrintWriter(sw, true),
           useSchema, version);
-      fail(sw.toString(), re);
+      Assert.fail(sw.toString(), re);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CertifiableTestCacheListener.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CertifiableTestCacheListener.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CertifiableTestCacheListener.java
index c1f53c7..65dd6a2 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CertifiableTestCacheListener.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/CertifiableTestCacheListener.java
@@ -26,8 +26,8 @@ import java.util.*;
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.cache.EntryEvent;
 import com.gemstone.gemfire.internal.cache.xmlcache.Declarable2;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class CertifiableTestCacheListener extends TestCacheListener implements Declarable2 {
   final public Set destroys = Collections.synchronizedSet(new HashSet());
@@ -100,7 +100,7 @@ public class CertifiableTestCacheListener extends TestCacheListener implements D
         return "Waiting for key creation: " + key;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -113,7 +113,7 @@ public class CertifiableTestCacheListener extends TestCacheListener implements D
         return "Waiting for key destroy: " + key;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -126,7 +126,7 @@ public class CertifiableTestCacheListener extends TestCacheListener implements D
         return "Waiting for key invalidate: " + key;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
   
@@ -139,7 +139,7 @@ public class CertifiableTestCacheListener extends TestCacheListener implements D
         return "Waiting for key update: " + key;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+    Wait.waitForCriterion(ev, MAX_TIME, 200, true);
     return true;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClearMultiVmCallBkDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClearMultiVmCallBkDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClearMultiVmCallBkDUnitTest.java
index 849768a..1d5b621 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClearMultiVmCallBkDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClearMultiVmCallBkDUnitTest.java
@@ -37,6 +37,7 @@ import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -68,16 +69,16 @@ public class ClearMultiVmCallBkDUnitTest extends DistributedTestCase{
       VM vm1 = host.getVM(1);
       vm0.invoke(ClearMultiVmCallBkDUnitTest.class, "createCache");
       vm1.invoke(ClearMultiVmCallBkDUnitTest.class, "createCache");
-      getLogWriter().fine("Cache created in successfully");
+      LogWriterUtils.getLogWriter().fine("Cache created in successfully");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(ClearMultiVmCallBkDUnitTest.class, "closeCache");
-        vm1.invoke(ClearMultiVmCallBkDUnitTest.class, "closeCache");
-        
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(ClearMultiVmCallBkDUnitTest.class, "closeCache");
+      vm1.invoke(ClearMultiVmCallBkDUnitTest.class, "closeCache");
     }
     
     public static void createCache(){
@@ -131,10 +132,10 @@ public class ClearMultiVmCallBkDUnitTest extends DistributedTestCase{
             vm0.invoke(ClearMultiVmCallBkDUnitTest.class, "putMethod", objArr);
             
         }
-        getLogWriter().fine("Did all puts successfully");
+        LogWriterUtils.getLogWriter().fine("Did all puts successfully");
         
         vm0.invoke(ClearMultiVmCallBkDUnitTest.class,"clearMethod");
-        getLogWriter().fine("Did clear successfully");
+        LogWriterUtils.getLogWriter().fine("Did clear successfully");
         
         while(afterClear){
         }       
@@ -157,10 +158,10 @@ public class ClearMultiVmCallBkDUnitTest extends DistributedTestCase{
             vm0.invoke(ClearMultiVmCallBkDUnitTest.class, "putMethod", objArr);
             vm1.invoke(ClearMultiVmCallBkDUnitTest.class, "getMethod", objArr);
         }
-        getLogWriter().fine("Did all puts successfully");
+        LogWriterUtils.getLogWriter().fine("Did all puts successfully");
         //vm0.invoke(ClearMultiVmCallBkDUnitTest.class,"putMethod");
         vm1.invoke(ClearMultiVmCallBkDUnitTest.class,"clearMethod");
-        getLogWriter().fine("Did clear successfully");
+        LogWriterUtils.getLogWriter().fine("Did clear successfully");
         
         while(afterClear){
         }       
@@ -230,7 +231,7 @@ public class ClearMultiVmCallBkDUnitTest extends DistributedTestCase{
     static class ListenerCallBk extends CacheListenerAdapter {
   
         public void afterRegionClear(RegionEvent event){
-            getLogWriter().fine("In afterClear:: CacheListener Callback");
+            LogWriterUtils.getLogWriter().fine("In afterClear:: CacheListener Callback");
             try {
                 int i = 7;
                 region.put(""+i, "inAfterClear");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClearMultiVmDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClearMultiVmDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClearMultiVmDUnitTest.java
index 5a87127..8553fe6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClearMultiVmDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClearMultiVmDUnitTest.java
@@ -36,10 +36,13 @@ import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.UnsupportedOperationInTransactionException;
 import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -72,15 +75,15 @@ public class ClearMultiVmDUnitTest extends DistributedTestCase{
       vm1.invoke(ClearMultiVmDUnitTest.class, "createCache");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(ClearMultiVmDUnitTest.class, "closeCache");
-        vm1.invoke(ClearMultiVmDUnitTest.class, "closeCache");
-        cache = null;
-        invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
-        
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(ClearMultiVmDUnitTest.class, "closeCache");
+      vm1.invoke(ClearMultiVmDUnitTest.class, "closeCache");
+      cache = null;
+      Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
     }
     
     public static void createCache(){
@@ -211,15 +214,15 @@ public class ClearMultiVmDUnitTest extends DistributedTestCase{
         
         AsyncInvocation as1 = vm0.invokeAsync(ClearMultiVmDUnitTest.class, "firstVM");
         AsyncInvocation as2 = vm1.invokeAsync(ClearMultiVmDUnitTest.class, "secondVM");
-        DistributedTestCase.join(as1, 30 * 1000, getLogWriter());
-        DistributedTestCase.join(as2, 30 * 1000, getLogWriter());
+        ThreadUtils.join(as1, 30 * 1000);
+        ThreadUtils.join(as2, 30 * 1000);
         
         if(as1.exceptionOccurred()){
-          fail("as1 failed", as1.getException());
+          Assert.fail("as1 failed", as1.getException());
         }
         
         if(as2.exceptionOccurred()){
-          fail("as2 failed", as2.getException());
+          Assert.fail("as2 failed", as2.getException());
         }
         
         int j = vm0.invokeInt(ClearMultiVmDUnitTest.class, "sizeMethod");
@@ -327,9 +330,9 @@ public class ClearMultiVmDUnitTest extends DistributedTestCase{
             }
         });
         
-        DistributedTestCase.join(async1, 30 * 1000, getLogWriter());
+        ThreadUtils.join(async1, 30 * 1000);
         if(async1.exceptionOccurred()){
-          fail("async1 failed", async1.getException());
+          Assert.fail("async1 failed", async1.getException());
         }
         
         SerializableRunnable validate = new

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
index 9251a56..dbbaaa8 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientMembershipDUnitTest.java
@@ -54,11 +54,14 @@ import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 import com.gemstone.gemfire.management.membership.ClientMembership;
 import com.gemstone.gemfire.management.membership.ClientMembershipEvent;
 import com.gemstone.gemfire.management.membership.ClientMembershipListener;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests the ClientMembership API including ClientMembershipListener.
@@ -83,8 +86,8 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     super.setUp();
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     InternalClientMembership.unregisterAllListeners();
   }
 
@@ -104,7 +107,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
   }
   
   protected int getAcceptsInProgress() {
@@ -119,9 +122,9 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       never arrives. 
    */
   public void testConnectionTimeout() throws Exception {
-    addExpectedException("failed accepting client connection");
+    IgnoredException.addIgnoredException("failed accepting client connection");
     final Host host = Host.getHost(0);
-    final String hostName = getServerHostName(host);
+    final String hostName = NetworkUtils.getServerHostName(host);
     final VM vm0 = host.getVM(0);
     System.setProperty(AcceptorImpl.ACCEPT_TIMEOUT_PROPERTY_NAME, "1000");
     try {
@@ -132,7 +135,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       SerializableRunnable createMeanSocket = new CacheSerializableRunnable("Connect to server with socket") {
         public void run2() throws CacheException {
           getCache(); // create a cache so we have stats
-          getLogWriter().info("connecting to cache server with socket");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("connecting to cache server with socket");
           try {
             InetAddress addr = InetAddress.getByName(hostName);
             meanSocket = new Socket(addr, port);
@@ -144,7 +147,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       };
       SerializableRunnable closeMeanSocket = new CacheSerializableRunnable("close mean socket") {
         public void run2() throws CacheException {
-          getLogWriter().info("closing mean socket");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("closing mean socket");
           try {
             meanSocket.close();
           }
@@ -155,28 +158,28 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
       assertEquals(0, getAcceptsInProgress());
       
-      getLogWriter().info("creating mean socket");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("creating mean socket");
       vm0.invoke(createMeanSocket);
       try {
-        getLogWriter().info("waiting to see it connect on server");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("waiting to see it connect on server");
         waitForAcceptsInProgressToBe(1);
       } finally {
-        getLogWriter().info("closing mean socket");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("closing mean socket");
         vm0.invoke(closeMeanSocket);
       }
-      getLogWriter().info("waiting to see accept to go away on server");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("waiting to see accept to go away on server");
       waitForAcceptsInProgressToBe(0);
 
       // now try it without a close. Server should timeout the mean connect
-      getLogWriter().info("creating mean socket 2");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("creating mean socket 2");
       vm0.invoke(createMeanSocket);
       try {
-        getLogWriter().info("waiting to see it connect on server 2");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("waiting to see it connect on server 2");
         waitForAcceptsInProgressToBe(1);
-        getLogWriter().info("waiting to see accept to go away on server without us closing");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("waiting to see accept to go away on server without us closing");
         waitForAcceptsInProgressToBe(0);
       } finally {
-        getLogWriter().info("closing mean socket 2");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("closing mean socket 2");
         vm0.invoke(closeMeanSocket);
       }
 
@@ -734,7 +737,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
    */
   public void testClientMembershipEventsInClient() throws Exception {
     getSystem();
-    addExpectedException("IOException");
+    IgnoredException.addIgnoredException("IOException");
     final boolean[] fired = new boolean[3];
     final DistributedMember[] member = new DistributedMember[3];
     final String[] memberId = new String[3];
@@ -743,7 +746,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // create and register ClientMembershipListener in controller vm...
     ClientMembershipListener listener = new ClientMembershipListener() {
       public synchronized void memberJoined(ClientMembershipEvent event) {
-        getLogWriter().info("[testClientMembershipEventsInClient] memberJoined: " + event);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] memberJoined: " + event);
         fired[JOINED] = true;
         member[JOINED] = event.getMember();
         memberId[JOINED] = event.getMemberId();
@@ -751,11 +754,11 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         notifyAll();
       }
       public synchronized void memberLeft(ClientMembershipEvent event) {
-        getLogWriter().info("[testClientMembershipEventsInClient] memberLeft: " + event);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] memberLeft: " + event);
 //        fail("Please update testClientMembershipEventsInClient to handle memberLeft for BridgeServer.");
       }
       public synchronized void memberCrashed(ClientMembershipEvent event) {
-        getLogWriter().info("[testClientMembershipEventsInClient] memberCrashed: " + event);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] memberCrashed: " + event);
         fired[CRASHED] = true;
         member[CRASHED] = event.getMember();
         memberId[CRASHED] = event.getMemberId();
@@ -773,7 +776,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     vm0.invoke(new CacheSerializableRunnable("Create BridgeServer") {
       public void run2() throws CacheException {
         try {
-          getLogWriter().info("[testClientMembershipEventsInClient] Create BridgeServer");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] Create BridgeServer");
           getSystem();
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.LOCAL);
@@ -800,9 +803,9 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     String serverMemberId = (String) vm0.invoke(ClientMembershipDUnitTest.class,
                                                 "getMemberId");
 
-    getLogWriter().info("[testClientMembershipEventsInClient] ports[0]=" + ports[0]);
-    getLogWriter().info("[testClientMembershipEventsInClient] serverMember=" + serverMember);
-    getLogWriter().info("[testClientMembershipEventsInClient] serverMemberId=" + serverMemberId);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] ports[0]=" + ports[0]);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] serverMember=" + serverMember);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] serverMemberId=" + serverMemberId);
 
     assertFalse(fired[JOINED]);
     assertNull(member[JOINED]);
@@ -818,7 +821,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertFalse(isClient[CRASHED]);
     
     // sanity check...
-    getLogWriter().info("[testClientMembershipEventsInClient] sanity check");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] sanity check");
     DistributedMember test = new TestDistributedMember("test");
     InternalClientMembership.notifyJoined(test, SERVER);
     synchronized(listener) {
@@ -842,7 +845,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     resetArraysForTesting(fired, member, memberId, isClient);
     
     // create bridge client in controller vm...
-    getLogWriter().info("[testClientMembershipEventsInClient] create bridge client");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] create bridge client");
     Properties config = new Properties();
     config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     config.setProperty(DistributionConfig.LOCATORS_NAME, "");
@@ -852,12 +855,12 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       getCache();
       AttributesFactory factory = new AttributesFactory();
       factory.setScope(Scope.LOCAL);
-      ClientServerTestCase.configureConnectionPool(factory, getServerHostName(Host.getHost(0)), ports, true, -1, -1, null);
+      ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(Host.getHost(0)), ports, true, -1, -1, null);
       createRegion(name, factory.create());
       assertNotNull(getRootRegion().getSubregion(name));
     }
     catch (CacheException ex) {
-      fail("While creating Region on Edge", ex);
+      Assert.fail("While creating Region on Edge", ex);
     }
     synchronized(listener) {
       if (!fired[JOINED] && !fired[CRASHED]) {
@@ -865,7 +868,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       }
     }
     
-    getLogWriter().info("[testClientMembershipEventsInClient] assert client detected server join");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] assert client detected server join");
     
     // first check the getCurrentServers() result
     ClientCache clientCache = (ClientCache)getCache();
@@ -894,7 +897,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
 
     vm0.invoke(new SerializableRunnable("Stop BridgeServer") {
       public void run() {
-        getLogWriter().info("[testClientMembershipEventsInClient] Stop BridgeServer");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] Stop BridgeServer");
         stopBridgeServers(getCache());
       }
     });
@@ -904,7 +907,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       }
     }
     
-    getLogWriter().info("[testClientMembershipEventsInClient] assert client detected server departure");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] assert client detected server departure");
     assertFalse(fired[JOINED]);
     assertNull(member[JOINED]);
     assertNull(memberId[JOINED]);
@@ -925,7 +928,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     vm0.invoke(new CacheSerializableRunnable("Recreate BridgeServer") {
       public void run2() throws CacheException {
         try {
-          getLogWriter().info("[testClientMembershipEventsInClient] restarting BridgeServer");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] restarting BridgeServer");
           startBridgeServer(ports[0]);
         }
         catch(IOException e) {
@@ -940,7 +943,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       }
     }
     
-    getLogWriter().info("[testClientMembershipEventsInClient] assert client detected server recovery");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInClient] assert client detected server recovery");
     assertTrue(fired[JOINED]);
     assertNotNull(member[JOINED]);
     assertNotNull(memberId[JOINED]);
@@ -969,7 +972,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     // create and register ClientMembershipListener in controller vm...
     ClientMembershipListener listener = new ClientMembershipListener() {
       public synchronized void memberJoined(ClientMembershipEvent event) {
-        getLogWriter().info("[testClientMembershipEventsInServer] memberJoined: " + event);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] memberJoined: " + event);
         fired[JOINED] = true;
         member[JOINED] = event.getMember();
         memberId[JOINED] = event.getMemberId();
@@ -978,7 +981,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         assertFalse(fired[LEFT] || fired[CRASHED]);
       }
       public synchronized void memberLeft(ClientMembershipEvent event) {
-        getLogWriter().info("[testClientMembershipEventsInServer] memberLeft: " + event);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] memberLeft: " + event);
         fired[LEFT] = true;
         member[LEFT] = event.getMember();
         memberId[LEFT] = event.getMemberId();
@@ -987,7 +990,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         assertFalse(fired[JOINED] || fired[CRASHED]);
       }
       public synchronized void memberCrashed(ClientMembershipEvent event) {
-        getLogWriter().info("[testClientMembershipEventsInServer] memberCrashed: " + event);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] memberCrashed: " + event);
         fired[CRASHED] = true;
         member[CRASHED] = event.getMember();
         memberId[CRASHED] = event.getMemberId();
@@ -1003,7 +1006,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     final int[] ports = new int[1];
 
     // create BridgeServer in controller vm...
-    getLogWriter().info("[testClientMembershipEventsInServer] Create BridgeServer");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] Create BridgeServer");
     getSystem();
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
@@ -1016,9 +1019,9 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     String serverMemberId = getMemberId();
     DistributedMember serverMember = getDistributedMember();
 
-    getLogWriter().info("[testClientMembershipEventsInServer] ports[0]=" + ports[0]);
-    getLogWriter().info("[testClientMembershipEventsInServer] serverMemberId=" + serverMemberId);
-    getLogWriter().info("[testClientMembershipEventsInServer] serverMember=" + serverMember);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] ports[0]=" + ports[0]);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] serverMemberId=" + serverMemberId);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] serverMember=" + serverMember);
 
     assertFalse(fired[JOINED]);
     assertNull(member[JOINED]);
@@ -1034,7 +1037,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertFalse(isClient[CRASHED]);
     
     // sanity check...
-    getLogWriter().info("[testClientMembershipEventsInServer] sanity check");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] sanity check");
     DistributedMember test = new TestDistributedMember("test");
     InternalClientMembership.notifyJoined(test, CLIENT);
     synchronized(listener) {
@@ -1060,14 +1063,14 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     SerializableRunnable createConnectionPool =
     new CacheSerializableRunnable("Create connectionPool") {
       public void run2() throws CacheException {
-        getLogWriter().info("[testClientMembershipEventsInServer] create bridge client");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] create bridge client");
         Properties config = new Properties();
         config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
         config.setProperty(DistributionConfig.LOCATORS_NAME, "");
         getSystem(config);
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
-        ClientServerTestCase.configureConnectionPool(factory, getServerHostName(host), ports, true, -1, 2, null);
+        ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, true, -1, 2, null);
         createRegion(name, factory.create());
         assertNotNull(getRootRegion().getSubregion(name));
       }
@@ -1086,7 +1089,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       }
     }
     
-    getLogWriter().info("[testClientMembershipEventsInServer] assert server detected client join");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] assert server detected client join");
     assertTrue(fired[JOINED]);
     assertEquals(member[JOINED] + " should equal " + clientMember,
       clientMember, member[JOINED]);
@@ -1107,7 +1110,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     
     vm0.invoke(new SerializableRunnable("Stop bridge client") {
       public void run() {
-        getLogWriter().info("[testClientMembershipEventsInServer] Stop bridge client");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] Stop bridge client");
         getRootRegion().getSubregion(name).close();
         Map m = PoolManager.getAll();
         Iterator mit = m.values().iterator();
@@ -1124,7 +1127,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       }
     }
     
-    getLogWriter().info("[testClientMembershipEventsInServer] assert server detected client left");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] assert server detected client left");
     assertFalse(fired[JOINED]);
     assertNull(member[JOINED]);
     assertNull(memberId[JOINED]);
@@ -1150,7 +1153,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       }
     }
     
-    getLogWriter().info("[testClientMembershipEventsInServer] assert server detected client re-join");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] assert server detected client re-join");
     assertTrue(fired[JOINED]);
     assertEquals(clientMember, member[JOINED]);
     assertEquals(clientMemberId, memberId[JOINED]);
@@ -1171,7 +1174,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     try {
       vm0.invoke(new SerializableRunnable("Stop bridge client") {
         public void run() {
-          getLogWriter().info("[testClientMembershipEventsInServer] Stop bridge client");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] Stop bridge client");
           getRootRegion().getSubregion(name).close();
           Map m = PoolManager.getAll();
           Iterator mit = m.values().iterator();
@@ -1188,7 +1191,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         }
       }
       
-      getLogWriter().info("[testClientMembershipEventsInServer] assert server detected client crashed");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testClientMembershipEventsInServer] assert server detected client crashed");
       assertFalse(fired[JOINED]);
       assertNull(member[JOINED]);
       assertNull(memberId[JOINED]);
@@ -1217,7 +1220,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
    * timeout.
    */
   private void pauseForClientToJoin() {
-    pause(2000);
+    Wait.pause(2000);
   }
   
   /** 
@@ -1308,10 +1311,10 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     final String name = this.getUniqueName();
     final int[] ports = new int[1];
     
-    addExpectedException("ConnectException");
+    IgnoredException.addIgnoredException("ConnectException");
 
     // create BridgeServer in controller vm...
-    getLogWriter().info("[testGetConnectedClients] Create BridgeServer");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedClients] Create BridgeServer");
     getSystem();
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
@@ -1323,14 +1326,14 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertTrue(ports[0] != 0);
     String serverMemberId = getMemberId();
 
-    getLogWriter().info("[testGetConnectedClients] ports[0]=" + ports[0]);
-    getLogWriter().info("[testGetConnectedClients] serverMemberId=" + serverMemberId);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedClients] ports[0]=" + ports[0]);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedClients] serverMemberId=" + serverMemberId);
 
     final Host host = Host.getHost(0);
     SerializableRunnable createPool =
     new CacheSerializableRunnable("Create connection pool") {
       public void run2() throws CacheException {
-        getLogWriter().info("[testGetConnectedClients] create bridge client");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedClients] create bridge client");
         Properties config = new Properties();
         config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
         config.setProperty(DistributionConfig.LOCATORS_NAME, "");
@@ -1340,7 +1343,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
         getSystem(config);
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
-        Pool p = ClientServerTestCase.configureConnectionPool(factory, getServerHostName(host), ports, true, -1, -1, null);
+        Pool p = ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, true, -1, -1, null);
         createRegion(name, factory.create());
         assertNotNull(getRootRegion().getSubregion(name));
         assertTrue(p.getServers().size() > 0);
@@ -1376,7 +1379,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
           return true;
         }
       };
-      waitForCriterion(wc, 30000, 100, false);
+      Wait.waitForCriterion(wc, 30000, 100, false);
     }
     
     Map connectedClients = InternalClientMembership.getConnectedClients(false);
@@ -1384,10 +1387,10 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertEquals(clientMemberIds.size(), connectedClients.size());
     for (Iterator iter = connectedClients.keySet().iterator(); iter.hasNext();) {
       String connectedClient = (String)iter.next();
-      getLogWriter().info("[testGetConnectedClients] checking for client " + connectedClient);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedClients] checking for client " + connectedClient);
       assertTrue(clientMemberIds.contains(connectedClient));
       Object[] result = (Object[])connectedClients.get(connectedClient);
-      getLogWriter().info("[testGetConnectedClients] result: " + 
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedClients] result: " + 
                           (result==null? "none"
                               : String.valueOf(result[0])+"; connections="+result[1]));
     }
@@ -1408,7 +1411,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       vm.invoke(new CacheSerializableRunnable("Create bridge server") {
         public void run2() throws CacheException {
           // create BridgeServer in controller vm...
-          getLogWriter().info("[testGetConnectedServers] Create BridgeServer");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedServers] Create BridgeServer");
           getSystem();
           AttributesFactory factory = new AttributesFactory();
           factory.setScope(Scope.LOCAL);
@@ -1421,15 +1424,15 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
             testGetConnectedServers_port = startBridgeServer(0);
           }
           catch (IOException e) {
-            getLogWriter().error("startBridgeServer threw IOException", e);
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().error("startBridgeServer threw IOException", e);
             fail("startBridgeServer threw IOException " + e.getMessage());
           }
           
           assertTrue(testGetConnectedServers_port != 0);
       
-          getLogWriter().info("[testGetConnectedServers] port=" + 
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedServers] port=" + 
             ports[whichVM]);
-          getLogWriter().info("[testGetConnectedServers] serverMemberId=" + 
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedServers] serverMemberId=" + 
             getDistributedMember());
         }
       });
@@ -1438,7 +1441,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       assertTrue(ports[whichVM] != 0);
     }
     
-    getLogWriter().info("[testGetConnectedServers] create bridge client");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedServers] create bridge client");
     Properties config = new Properties();
     config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     config.setProperty(DistributionConfig.LOCATORS_NAME, "");
@@ -1449,10 +1452,10 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     factory.setScope(Scope.LOCAL);
 
     for (int i = 0; i < ports.length; i++) {
-      getLogWriter().info("[testGetConnectedServers] creating connectionpool for " + 
-        getServerHostName(host) + " " + ports[i]);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedServers] creating connectionpool for " + 
+        NetworkUtils.getServerHostName(host) + " " + ports[i]);
       int[] thisServerPorts = new int[] { ports[i] };
-      ClientServerTestCase.configureConnectionPoolWithName(factory, getServerHostName(host), thisServerPorts, false, -1, -1, null,"pooly"+i);
+      ClientServerTestCase.configureConnectionPoolWithName(factory, NetworkUtils.getServerHostName(host), thisServerPorts, false, -1, -1, null,"pooly"+i);
       Region region = createRegion(name+"_"+i, factory.create());
       assertNotNull(getRootRegion().getSubregion(name+"_"+i));
       region.get("KEY-1");
@@ -1478,7 +1481,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
           return true;
         }
       };
-      waitForCriterion(wc, 60000, 100, false);
+      Wait.waitForCriterion(wc, 60000, 100, false);
     }
 
     {
@@ -1491,7 +1494,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     assertEquals(host.getVMCount(), connectedServers.size());
     for (Iterator iter = connectedServers.keySet().iterator(); iter.hasNext();) {
       String connectedServer = (String) iter.next();
-      getLogWriter().info("[testGetConnectedServers]  value for connectedServer: " + 
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetConnectedServers]  value for connectedServer: " + 
                           connectedServers.get(connectedServer));
     }
   }
@@ -1516,7 +1519,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       vm.invoke(new CacheSerializableRunnable("Create bridge server") {
         public void run2() throws CacheException {
           // create BridgeServer in controller vm...
-          getLogWriter().info("[testGetNotifiedClients] Create BridgeServer");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetNotifiedClients] Create BridgeServer");
           getSystem();
           AttributesFactory factory = new AttributesFactory();
           Region region = createRegion(name, factory.create());
@@ -1528,15 +1531,15 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
             testGetNotifiedClients_port = startBridgeServer(0);
           }
           catch (IOException e) {
-            getLogWriter().error("startBridgeServer threw IOException", e);
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().error("startBridgeServer threw IOException", e);
             fail("startBridgeServer threw IOException " + e.getMessage());
           }
           
           assertTrue(testGetNotifiedClients_port != 0);
       
-          getLogWriter().info("[testGetNotifiedClients] port=" + 
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetNotifiedClients] port=" + 
             ports[whichVM]);
-          getLogWriter().info("[testGetNotifiedClients] serverMemberId=" + 
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetNotifiedClients] serverMemberId=" + 
             getMemberId());
         }
       });
@@ -1545,7 +1548,7 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
       assertTrue(ports[whichVM] != 0);
     }
     
-    getLogWriter().info("[testGetNotifiedClients] create bridge client");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetNotifiedClients] create bridge client");
     Properties config = new Properties();
     config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     config.setProperty(DistributionConfig.LOCATORS_NAME, "");
@@ -1555,8 +1558,8 @@ public class ClientMembershipDUnitTest extends ClientServerTestCase {
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
 
-    getLogWriter().info("[testGetNotifiedClients] creating connection pool");
-    ClientServerTestCase.configureConnectionPool(factory, getServerHostName(host), ports, true, -1, -1, null);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testGetNotifiedClients] creating connection pool");
+    ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, true, -1, -1, null);
     Region region = createRegion(name, factory.create());
     assertNotNull(getRootRegion().getSubregion(name));
     region.registerInterest("KEY-1");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientRegisterInterestDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientRegisterInterestDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientRegisterInterestDUnitTest.java
index 958b863..a734a10 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientRegisterInterestDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientRegisterInterestDUnitTest.java
@@ -25,9 +25,12 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.cache.client.SubscriptionNotEnabledException;
 
 /**
@@ -42,8 +45,8 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     super(name);
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS(); // cleans up bridge server and client and lonerDS
   }
   
@@ -60,7 +63,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     final VM vm = Host.getHost(0).getVM(whichVM);
     vm.invoke(new CacheSerializableRunnable("Create bridge server") {
       public void run2() throws CacheException {
-        getLogWriter().info("[testBug35381] Create BridgeServer");
+        LogWriterUtils.getLogWriter().info("[testBug35381] Create BridgeServer");
         getSystem();
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
@@ -73,21 +76,21 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
           bridgeServerPort = startBridgeServer(0);
         }
         catch (IOException e) {
-          getLogWriter().error("startBridgeServer threw IOException", e);
+          LogWriterUtils.getLogWriter().error("startBridgeServer threw IOException", e);
           fail("startBridgeServer threw IOException " + e.getMessage());
         }
         
         assertTrue(bridgeServerPort != 0);
     
-        getLogWriter().info("[testBug35381] port=" + bridgeServerPort);
-        getLogWriter().info("[testBug35381] serverMemberId=" + getMemberId());
+        LogWriterUtils.getLogWriter().info("[testBug35381] port=" + bridgeServerPort);
+        LogWriterUtils.getLogWriter().info("[testBug35381] serverMemberId=" + getMemberId());
       }
     });
     ports[whichVM] = vm.invokeInt(ClientRegisterInterestDUnitTest.class, 
                                   "getBridgeServerPort");
     assertTrue(ports[whichVM] != 0);
     
-    getLogWriter().info("[testBug35381] create bridge client");
+    LogWriterUtils.getLogWriter().info("[testBug35381] create bridge client");
     Properties config = new Properties();
     config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     config.setProperty(DistributionConfig.LOCATORS_NAME, "");
@@ -97,9 +100,9 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
 
-    getLogWriter().info("[testBug35381] creating connection pool");
+    LogWriterUtils.getLogWriter().info("[testBug35381] creating connection pool");
     boolean establishCallbackConnection = false; // SOURCE OF BUG 35381
-    ClientServerTestCase.configureConnectionPool(factory, getServerHostName(host), ports, establishCallbackConnection, -1, -1, null);
+    ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, establishCallbackConnection, -1, -1, null);
     Region region = createRegion(name, factory.create());
     assertNotNull(getRootRegion().getSubregion(name));
     try {
@@ -145,7 +148,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     final VM firstServerVM = Host.getHost(0).getVM(firstServerIdx);
     firstServerVM.invoke(new CacheSerializableRunnable("Create first bridge server") {
       public void run2() throws CacheException {
-        getLogWriter().info("[testRegisterInterestFailover] Create first bridge server");
+        LogWriterUtils.getLogWriter().info("[testRegisterInterestFailover] Create first bridge server");
         getSystem();
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
@@ -160,15 +163,15 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
           bridgeServerPort = startBridgeServer(0);
         }
         catch (IOException e) {
-          getLogWriter().error("startBridgeServer threw IOException", e);
+          LogWriterUtils.getLogWriter().error("startBridgeServer threw IOException", e);
           fail("startBridgeServer threw IOException " + e.getMessage());
         }
         
         assertTrue(bridgeServerPort != 0);
     
-        getLogWriter().info("[testRegisterInterestFailover] " +
+        LogWriterUtils.getLogWriter().info("[testRegisterInterestFailover] " +
           "firstServer port=" + bridgeServerPort);
-        getLogWriter().info("[testRegisterInterestFailover] " +
+        LogWriterUtils.getLogWriter().info("[testRegisterInterestFailover] " +
           "firstServer memberId=" + getMemberId());
       }
     });
@@ -178,7 +181,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     final VM secondServerVM = Host.getHost(0).getVM(secondServerIdx);
     secondServerVM.invoke(new CacheSerializableRunnable("Create second bridge server") {
       public void run2() throws CacheException {
-        getLogWriter().info("[testRegisterInterestFailover] Create second bridge server");
+        LogWriterUtils.getLogWriter().info("[testRegisterInterestFailover] Create second bridge server");
         getSystem();
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
@@ -191,15 +194,15 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
           bridgeServerPort = startBridgeServer(0);
         }
         catch (IOException e) {
-          getLogWriter().error("startBridgeServer threw IOException", e);
+          LogWriterUtils.getLogWriter().error("startBridgeServer threw IOException", e);
           fail("startBridgeServer threw IOException " + e.getMessage());
         }
         
         assertTrue(bridgeServerPort != 0);
     
-        getLogWriter().info("[testRegisterInterestFailover] " +
+        LogWriterUtils.getLogWriter().info("[testRegisterInterestFailover] " +
           "secondServer port=" + bridgeServerPort);
-        getLogWriter().info("[testRegisterInterestFailover] " +
+        LogWriterUtils.getLogWriter().info("[testRegisterInterestFailover] " +
           "secondServer memberId=" + getMemberId());
       }
     });
@@ -221,7 +224,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     });
     
     // create the bridge client
-    getLogWriter().info("[testBug35654] create bridge client");
+    LogWriterUtils.getLogWriter().info("[testBug35654] create bridge client");
     Properties config = new Properties();
     config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     config.setProperty(DistributionConfig.LOCATORS_NAME, "");
@@ -231,9 +234,9 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
 
-    getLogWriter().info("[testRegisterInterestFailover] creating connection pool");
+    LogWriterUtils.getLogWriter().info("[testRegisterInterestFailover] creating connection pool");
     boolean establishCallbackConnection = true;
-    final PoolImpl p = (PoolImpl)ClientServerTestCase.configureConnectionPool(factory, getServerHostName(host), ports, establishCallbackConnection, -1, -1, null);
+    final PoolImpl p = (PoolImpl)ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, establishCallbackConnection, -1, -1, null);
 
     final Region region1 = createRootRegion(regionName1, factory.create());
     final Region region2 = createRootRegion(regionName2, factory.create());
@@ -264,7 +267,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
         return "primary port remained invalid";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 10 * 1000, 200, true);
     assertEquals(ports[firstServerIdx], p.getPrimaryPort()); 
     
     // assert intial values
@@ -296,7 +299,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 10 * 1000, 200, true);
     assertEquals("VAL-1-1", region1.get(key1));
     assertEquals("VAL-1-1", region2.get(key2));
     assertEquals("VAL-1-1", region3.get(key3));
@@ -308,7 +311,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
           startBridgeServer(ports[secondServerIdx]);
         }
         catch (IOException e) {
-          getLogWriter().error("startBridgeServer threw IOException", e);
+          LogWriterUtils.getLogWriter().error("startBridgeServer threw IOException", e);
           fail("startBridgeServer threw IOException " + e.getMessage());
         }
       }
@@ -329,7 +332,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
         return "primary port never became " + ports[secondServerIdx];
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 100 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 100 * 1000, 200, true);
     
     try {
       assertEquals(null, region2.get(key2));
@@ -377,7 +380,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 100 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 100 * 1000, 200, true);
     assertEquals("VAL-2-2", region1.get(key1));
     assertEquals("VAL-0",   region2.get(key2));
     assertEquals("VAL-2-2", region3.get(key3));
@@ -413,7 +416,7 @@ public class ClientRegisterInterestDUnitTest extends ClientServerTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 100 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 100 * 1000, 200, true);
     assertEquals("VAL-2-3", region1.get(key1));
     assertEquals("VAL-2-2", region2.get(key2));
     assertEquals("VAL-2-3", region3.get(key3));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerCCEDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerCCEDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerCCEDUnitTest.java
index 181fea7..fb319b6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerCCEDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerCCEDUnitTest.java
@@ -44,10 +44,15 @@ import com.gemstone.gemfire.internal.cache.TombstoneService;
 import com.gemstone.gemfire.internal.cache.ha.HARegionQueue;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientNotifier;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * concurrency-control tests for client/server
@@ -63,7 +68,8 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
     HARegionQueue.setMessageSyncInterval(5);
   }
   
-  public void tearDown2() {
+  @Override
+  protected final void preTearDownCacheTestCase() {
     disconnectAllFromDS();
     HARegionQueue.setMessageSyncInterval(HARegionQueue.DEFAULT_MESSAGE_SYNC_INTERVAL);
   }
@@ -150,19 +156,19 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
     createEntries(vm0);
     destroyEntries(vm0);
     
-    getLogWriter().info("***************** register interest on all keys");
+    LogWriterUtils.getLogWriter().info("***************** register interest on all keys");
     createClientRegion(vm2, name, port, true);
     registerInterest(vm2);
     ensureAllTombstonesPresent(vm2);
     
-    getLogWriter().info("***************** clear cache and register interest on one key, Object0");
+    LogWriterUtils.getLogWriter().info("***************** clear cache and register interest on one key, Object0");
     clearLocalCache(vm2);
     registerInterestOneKey(vm2, "Object0");
     List<String> keys = new ArrayList(1);
     keys.add("Object0");
     ensureAllTombstonesPresent(vm2, keys);
 
-    getLogWriter().info("***************** clear cache and register interest on four keys");
+    LogWriterUtils.getLogWriter().info("***************** clear cache and register interest on four keys");
     clearLocalCache(vm2);
     keys = new ArrayList(4);
     for (int i=0; i<4; i++) {
@@ -171,12 +177,12 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
     registerInterest(vm2, keys);
     ensureAllTombstonesPresent(vm2, keys);
 
-    getLogWriter().info("***************** clear cache and register interest with regex on four keys");
+    LogWriterUtils.getLogWriter().info("***************** clear cache and register interest with regex on four keys");
     clearLocalCache(vm2);
     registerInterestRegex(vm2, "Object[0-3]");
     ensureAllTombstonesPresent(vm2, keys);
 
-    getLogWriter().info("***************** fetch entries with getAll()");
+    LogWriterUtils.getLogWriter().info("***************** fetch entries with getAll()");
     clearLocalCache(vm2);
     getAll(vm2);
     ensureAllTombstonesPresent(vm2);
@@ -203,19 +209,19 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
     createEntries(vm0);
     invalidateEntries(vm0);
     
-    getLogWriter().info("***************** register interest on all keys");
+    LogWriterUtils.getLogWriter().info("***************** register interest on all keys");
     createClientRegion(vm2, name, port, true);
     registerInterest(vm2);
     ensureAllInvalidsPresent(vm2);
     
-    getLogWriter().info("***************** clear cache and register interest on one key, Object0");
+    LogWriterUtils.getLogWriter().info("***************** clear cache and register interest on one key, Object0");
     clearLocalCache(vm2);
     registerInterestOneKey(vm2, "Object0");
     List<String> keys = new ArrayList(1);
     keys.add("Object0");
     ensureAllInvalidsPresent(vm2, keys);
 
-    getLogWriter().info("***************** clear cache and register interest on four keys");
+    LogWriterUtils.getLogWriter().info("***************** clear cache and register interest on four keys");
     clearLocalCache(vm2);
     keys = new ArrayList(4);
     for (int i=0; i<4; i++) {
@@ -224,12 +230,12 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
     registerInterest(vm2, keys);
     ensureAllInvalidsPresent(vm2, keys);
 
-    getLogWriter().info("***************** clear cache and register interest with regex on four keys");
+    LogWriterUtils.getLogWriter().info("***************** clear cache and register interest with regex on four keys");
     clearLocalCache(vm2);
     registerInterestRegex(vm2, "Object[0-3]");
     ensureAllInvalidsPresent(vm2, keys);
 
-    getLogWriter().info("***************** fetch entries with getAll()");
+    LogWriterUtils.getLogWriter().info("***************** fetch entries with getAll()");
     clearLocalCache(vm2);
     getAll(vm2);
     ensureAllInvalidsPresent(vm2);
@@ -383,7 +389,7 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
       //other bucket might be in vm1
       forceGC(vm1);
     }
-    pause(5000); // better chance that WaitCriteria will succeed 1st time if we pause a bit
+    Wait.pause(5000); // better chance that WaitCriteria will succeed 1st time if we pause a bit
     checkClientReceivedGC(vm2);
     checkClientReceivedGC(vm3);
     checkServerQueuesEmpty(vm0);
@@ -440,7 +446,7 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
             assertTrue("expected key object_"+i+" to be in the cache but it isn't", TestRegion.containsKey("object_"+i));
           }
         } catch (NullPointerException e) {
-          fail("caught NPE", e);
+          Assert.fail("caught NPE", e);
         }
       }
     });
@@ -476,8 +482,8 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
           
           @Override
           public boolean done() {
-            getLogWriter().info("tombstone count = " + TestRegion.getTombstoneCount());
-            getLogWriter().info("region size = " + TestRegion.size());
+            LogWriterUtils.getLogWriter().info("tombstone count = " + TestRegion.getTombstoneCount());
+            LogWriterUtils.getLogWriter().info("region size = " + TestRegion.size());
             return TestRegion.getTombstoneCount() == 0 && TestRegion.size() == 0;
           }
           
@@ -486,7 +492,7 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
             return "waiting for garbage collection to occur";
           }
         };
-        waitForCriterion(wc, 60000, 2000, true);
+        Wait.waitForCriterion(wc, 60000, 2000, true);
         return null;
       }
     });
@@ -512,7 +518,7 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
 //                  if (first) {
 //                    ((LocalRegion)proxy.getHARegion()).dumpBackingMap();
 //                  }
-                  getLogWriter().info("queue size ("+size+") is still > 0 for " + proxy.getProxyID()); 
+                  LogWriterUtils.getLogWriter().info("queue size ("+size+") is still > 0 for " + proxy.getProxyID()); 
                   return false;
                 }
               }
@@ -520,7 +526,7 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
             // also ensure that server regions have been cleaned up
             int regionEntryCount = TestRegion.getRegionMap().size();
             if (regionEntryCount > 0) {
-              getLogWriter().info("TestRegion has unexpected entries - all should have been GC'd but we have " + regionEntryCount);
+              LogWriterUtils.getLogWriter().info("TestRegion has unexpected entries - all should have been GC'd but we have " + regionEntryCount);
               TestRegion.dumpBackingMap();
               return false;
             }
@@ -532,7 +538,7 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
             return "waiting for queue removal messages to clear client queues";
           }
         };
-        waitForCriterion(wc, 60000, 2000, true);
+        Wait.waitForCriterion(wc, 60000, 2000, true);
         return null;
       }
     });
@@ -543,7 +549,7 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
     vm.invoke(new SerializableCallable("check that GC did not happen") {
       public Object call() throws Exception {
         if (TestRegion.getTombstoneCount() == 0) {
-          getLogWriter().warning("region has no tombstones");
+          LogWriterUtils.getLogWriter().warning("region has no tombstones");
 //          TestRegion.dumpBackingMap();
           throw new AssertionFailedError("expected to find tombstones but region is empty");
         }
@@ -584,9 +590,9 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
     SerializableCallable createRegion = new SerializableCallable() {
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(vm.getHost()), port);
+        cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port);
         cf.setPoolSubscriptionEnabled(true);
-        cf.set("log-level", getDUnitLogLevel());
+        cf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache cache = getClientCache(cf);
         ClientRegionFactory crf = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY);
         crf.setConcurrencyChecksEnabled(ccEnabled);
@@ -605,14 +611,14 @@ public class ClientServerCCEDUnitTest extends CacheTestCase {
     SerializableCallable createRegion = new SerializableCallable() {
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(vm.getHost()), port1);
-        cf.addPoolServer(getServerHostName(vm.getHost()), port2);
+        cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port1);
+        cf.addPoolServer(NetworkUtils.getServerHostName(vm.getHost()), port2);
         cf.setPoolSubscriptionEnabled(true);
         cf.setPoolSubscriptionRedundancy(1);
         // bug #50683 - secondary durable queue retains all GC messages
         cf.set("durable-client-id", ""+vm.getPid());
         cf.set("durable-client-timeout", "" + 200);
-        cf.set("log-level", getDUnitLogLevel());
+        cf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache cache = getClientCache(cf);
         ClientRegionFactory crf = cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY);
         crf.setConcurrencyChecksEnabled(ccEnabled);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerTestCase.java
index 6a3c5e0..5c2f8a2 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ClientServerTestCase.java
@@ -35,7 +35,10 @@ import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Provides helper methods for testing clients and servers. This
@@ -59,10 +62,14 @@ public class ClientServerTestCase extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    preTearDownClientServerTestCase();
     // this makes sure we don't leave anything for the next tests
     disconnectAllFromDS();
   }
+  
+  protected void preTearDownClientServerTestCase() throws Exception {
+  }
 
   public ClientServerTestCase(String name) {
     super(name);
@@ -201,7 +208,7 @@ public class ClientServerTestCase extends CacheTestCase {
       boolean threadLocalCnxs, int lifetimeTimeout, int statisticInterval) {
 
     if(AUTO_LOAD_BALANCE) {
-      pf.addLocator(host,getDUnitLocatorPort());
+      pf.addLocator(host,DistributedTestUtils.getDUnitLocatorPort());
     } else {
       for(int z=0;z<ports.length;z++) {
         pf.addServer(host,ports[z]);
@@ -299,7 +306,7 @@ public class ClientServerTestCase extends CacheTestCase {
     int waitMillis = 10000;
     int interval = 100;
     boolean throwException = true;
-    waitForCriterion(w, waitMillis, interval, throwException);
+    Wait.waitForCriterion(w, waitMillis, interval, throwException);
     return system.getMemberId();
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ConcurrentLeaveDuringGIIDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ConcurrentLeaveDuringGIIDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ConcurrentLeaveDuringGIIDUnitTest.java
index 9ec99a2..c2f06c1 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ConcurrentLeaveDuringGIIDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/ConcurrentLeaveDuringGIIDUnitTest.java
@@ -26,9 +26,12 @@ import com.gemstone.gemfire.internal.cache.InitialImageOperation;
 import com.gemstone.gemfire.internal.cache.InitialImageOperation.GIITestHook;
 import com.gemstone.gemfire.internal.cache.InitialImageOperation.GIITestHookType;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.RegionMap;
 
@@ -101,7 +104,7 @@ public class ConcurrentLeaveDuringGIIDUnitTest extends CacheTestCase {
             return "waiting for GII test hook to be invoked";
           }
         };
-        waitForCriterion(wc, 20000, 500, true);
+        Wait.waitForCriterion(wc, 20000, 500, true);
         return getCache().getDistributedSystem().getDistributedMember();
       }
     };
@@ -128,7 +131,7 @@ public class ConcurrentLeaveDuringGIIDUnitTest extends CacheTestCase {
             return "waiting for region " + regionName + " to contain keyFromX";
           }
         };
-        waitForCriterion(wc, 20000, 1000, true);
+        Wait.waitForCriterion(wc, 20000, 1000, true);
       }
     });
     
@@ -150,11 +153,11 @@ public class ConcurrentLeaveDuringGIIDUnitTest extends CacheTestCase {
             return "waiting for region " + regionName + " to initialize";
           }
         };
-        waitForCriterion(wc, 20000, 1000, true);
+        Wait.waitForCriterion(wc, 20000, 1000, true);
         // ensure that the RVV has recorded the event
         DistributedRegion r = (DistributedRegion)getCache().getRegion(regionName);
         if (!r.getVersionVector().contains(Xid, 1)) {
-          getLogWriter().info("r's version vector is " + r.getVersionVector().fullToString());
+          LogWriterUtils.getLogWriter().info("r's version vector is " + r.getVersionVector().fullToString());
           ((LocalRegion)r).dumpBackingMap();
         }
         assertTrue(r.containsKey("keyFromX"));
@@ -177,7 +180,7 @@ public class ConcurrentLeaveDuringGIIDUnitTest extends CacheTestCase {
           }
         };
         // if the test fails here then a sync from B to A was not performed
-        waitForCriterion(wc, 20000, 500, true);
+        Wait.waitForCriterion(wc, 20000, 500, true);
         // if the test fails here something is odd because the sync was done
         // but the RVV doesn't know about it
         assertTrue(((LocalRegion)r).getVersionVector().contains(Xid, 1));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DiskRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DiskRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DiskRegionDUnitTest.java
index 6247ee1..817943a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DiskRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DiskRegionDUnitTest.java
@@ -47,11 +47,13 @@ import com.gemstone.gemfire.internal.cache.DiskRegionStats;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.lru.LRUCapacityController;
 import com.gemstone.gemfire.internal.cache.lru.LRUStatistics;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests the functionality of cache regions whose contents may be
@@ -141,7 +143,7 @@ public class DiskRegionDUnitTest extends CacheTestCase {
 
     flush(region);
     
-    getLogWriter().info("DEBUG: writes=" + diskStats.getWrites()
+    LogWriterUtils.getLogWriter().info("DEBUG: writes=" + diskStats.getWrites()
         + " reads=" + diskStats.getReads()
         + " evictions=" + lruStats.getEvictions()
         + " total=" + total
@@ -160,7 +162,7 @@ public class DiskRegionDUnitTest extends CacheTestCase {
     assertNotNull(value);
     assertEquals(0, ((int[]) value)[0]);
 
-    getLogWriter().info("DEBUG: writes=" + diskStats.getWrites()
+    LogWriterUtils.getLogWriter().info("DEBUG: writes=" + diskStats.getWrites()
         + " reads=" + diskStats.getReads()
         + " evictions=" + lruStats.getEvictions()
         + " total=" + total
@@ -249,7 +251,7 @@ public class DiskRegionDUnitTest extends CacheTestCase {
               return "waiting for evictions to exceed 6";
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 5 * 1000, 200, true);
+          Wait.waitForCriterion(ev, 5 * 1000, 200, true);
           //DiskRegionStats diskStats = dr.getStats();
           //assertTrue(diskStats.getWrites() > 6);
         }
@@ -372,7 +374,7 @@ public class DiskRegionDUnitTest extends CacheTestCase {
 //          DiskRegion dr = region.getDiskRegion();
           LRUStatistics lruStats = getLRUStats(region);
           for (int i = 0; lruStats.getEvictions() < 10; i++) {
-            getLogWriter().info("Put " + i);
+            LogWriterUtils.getLogWriter().info("Put " + i);
             region.put(new Integer(i), new byte[1]);
           }
 
@@ -433,7 +435,7 @@ public class DiskRegionDUnitTest extends CacheTestCase {
 
     long evictions = lruStats.getEvictions();
 
-    getLogWriter().info("Destroying memory resident entries");
+    LogWriterUtils.getLogWriter().info("Destroying memory resident entries");
     // Destroying each of these guys should have no effect on the disk
     for (int i = total - 1; i >= evictions; i--) {
       region.destroy(new Integer(i));
@@ -444,7 +446,7 @@ public class DiskRegionDUnitTest extends CacheTestCase {
 
 //    long startRemoves = diskStats.getRemoves();
 
-    getLogWriter().info("Destroying disk-resident entries.  evictions=" + evictions);
+    LogWriterUtils.getLogWriter().info("Destroying disk-resident entries.  evictions=" + evictions);
     
     // Destroying each of these guys should cause a removal from disk
     for (int i = ((int) evictions) - 1; i >= 0; i--) {
@@ -456,7 +458,7 @@ public class DiskRegionDUnitTest extends CacheTestCase {
 
     assertEquals(evictions, lruStats.getEvictions());
     
-    getLogWriter().info("keys remaining in region: " + region.keys().size());
+    LogWriterUtils.getLogWriter().info("keys remaining in region: " + region.keys().size());
     assertEquals(0, region.keys().size());
   }
 
@@ -968,7 +970,7 @@ public class DiskRegionDUnitTest extends CacheTestCase {
 //          DiskRegion dr = region.getDiskRegion();
           LRUStatistics lruStats = getLRUStats(region);
           for (int i = 0; lruStats.getEvictions() < 10; i++) {
-            getLogWriter().info("Put " + i);
+            LogWriterUtils.getLogWriter().info("Put " + i);
             region.put(new Integer(i), new byte[1]);
           }
 
@@ -997,7 +999,7 @@ public class DiskRegionDUnitTest extends CacheTestCase {
               return "value for key remains: " + key;
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 500, 200, true);
+          Wait.waitForCriterion(ev, 500, 200, true);
         }
       });
 
@@ -1021,7 +1023,7 @@ public class DiskRegionDUnitTest extends CacheTestCase {
               return "verify update";
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 500, 200, true);
+          Wait.waitForCriterion(ev, 500, 200, true);
         }
       });
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistAckMapMethodsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistAckMapMethodsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistAckMapMethodsDUnitTest.java
index 216cdb8..44f7f06 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistAckMapMethodsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistAckMapMethodsDUnitTest.java
@@ -43,6 +43,8 @@ import com.gemstone.gemfire.cache.util.CacheWriterAdapter;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -78,14 +80,15 @@ public class DistAckMapMethodsDUnitTest extends DistributedTestCase{
       vm1.invoke(DistAckMapMethodsDUnitTest.class, "createCache");
     }
     
-    public void tearDown2(){
-        Host host = Host.getHost(0);
-        VM vm0 = host.getVM(0);
-        VM vm1 = host.getVM(1);
-        vm0.invoke(DistAckMapMethodsDUnitTest.class, "closeCache");
-        vm1.invoke(DistAckMapMethodsDUnitTest.class, "closeCache");
-        cache = null;
-        invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+    @Override
+    protected final void preTearDown() throws Exception {
+      Host host = Host.getHost(0);
+      VM vm0 = host.getVM(0);
+      VM vm1 = host.getVM(1);
+      vm0.invoke(DistAckMapMethodsDUnitTest.class, "closeCache");
+      vm1.invoke(DistAckMapMethodsDUnitTest.class, "closeCache");
+      cache = null;
+      Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
     }
     
     public static void createCache(){
@@ -197,8 +200,8 @@ public class DistAckMapMethodsDUnitTest extends DistributedTestCase{
         vm0.invoke(DistAckMapMethodsDUnitTest.class, "putMethod", objArr);
         obj1 = vm1.invoke(DistAckMapMethodsDUnitTest.class, "getMethod", objArr);//to make sure that vm1 region has the entry
         obj2 = vm1.invoke(DistAckMapMethodsDUnitTest.class, "removeMethod", objArr);
-        getLogWriter().fine("111111111"+obj1);
-        getLogWriter().fine("2222222222"+obj2);
+        LogWriterUtils.getLogWriter().fine("111111111"+obj1);
+        LogWriterUtils.getLogWriter().fine("2222222222"+obj2);
         if (obj1 == null)
           fail("region1.getMethod returned null");
         if(!(obj1.equals(obj2))){

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckOverflowRegionCCEOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckOverflowRegionCCEOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckOverflowRegionCCEOffHeapDUnitTest.java
index 5971d5c..2323b0e 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckOverflowRegionCCEOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckOverflowRegionCCEOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -38,7 +39,7 @@ public class DistributedAckOverflowRegionCCEOffHeapDUnitTest extends Distributed
   }
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -49,8 +50,7 @@ public class DistributedAckOverflowRegionCCEOffHeapDUnitTest extends Distributed
       }
     };
     checkOrphans.run();
-    invokeInEveryVM(checkOrphans);
-    super.tearDown2();
+    Invoke.invokeInEveryVM(checkOrphans);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckPersistentRegionCCEDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckPersistentRegionCCEDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckPersistentRegionCCEDUnitTest.java
index aec7c8b..aaae52b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckPersistentRegionCCEDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckPersistentRegionCCEDUnitTest.java
@@ -41,7 +41,6 @@ import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
 
 import java.io.IOException;
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckPersistentRegionCCEOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckPersistentRegionCCEOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckPersistentRegionCCEOffHeapDUnitTest.java
index 24c386a..52a4313 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckPersistentRegionCCEOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache30/DistributedAckPersistentRegionCCEOffHeapDUnitTest.java
@@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.RegionAttributes;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -38,7 +39,7 @@ public class DistributedAckPersistentRegionCCEOffHeapDUnitTest extends Distribut
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -48,12 +49,8 @@ public class DistributedAckPersistentRegionCCEOffHeapDUnitTest extends Distribut
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override



[37/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
index de60132..545a0ea 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorDUnitTest.java
@@ -52,10 +52,17 @@ import com.gemstone.gemfire.internal.logging.LocalLogWriter;
 import com.gemstone.gemfire.internal.tcp.Connection;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests the ability of the {@link Locator} API to start and stop
@@ -87,21 +94,21 @@ public class LocatorDUnitTest extends DistributedTestCase {
     super.setUp();
     port1 = -1;
     port2 = -1;
-    addExpectedException("Removing shunned member");
+    IgnoredException.addIgnoredException("Removing shunned member");
   }
   
   @Override
-  public void tearDown2() {
+  protected final void preTearDown() throws Exception {
     if (Locator.hasLocator()) {
       Locator.getLocator().stop();
     }
     // delete locator state files so they don't accidentally
     // get used by other tests
     if (port1 > 0) {
-      deleteLocatorStateFile(port1);
+      DistributedTestUtils.deleteLocatorStateFile(port1);
     }
     if (port2 > 0) {
-      deleteLocatorStateFile(port2);
+      DistributedTestUtils.deleteLocatorStateFile(port2);
     }
   }
   
@@ -124,13 +131,13 @@ public class LocatorDUnitTest extends DistributedTestCase {
     VM vm3 = host.getVM(3);
     
     port1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    deleteLocatorStateFile(port1);
+    DistributedTestUtils.deleteLocatorStateFile(port1);
 
-    final String locators = getServerHostName(host) + "[" + port1 + "]";
+    final String locators = NetworkUtils.getServerHostName(host) + "[" + port1 + "]";
     final Properties properties = new Properties();
     properties.put("mcast-port", "0");
     properties.put("start-locator", locators);
-    properties.put("log-level", getDUnitLogLevel());
+    properties.put("log-level", LogWriterUtils.getDUnitLogLevel());
     properties.put("security-peer-auth-init","com.gemstone.gemfire.distributed.AuthInitializer.create");
     properties.put("security-peer-authenticator","com.gemstone.gemfire.distributed.MyAuthenticator.create");
     properties.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
@@ -141,7 +148,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
         DistributionManager.NORMAL_DM_TYPE, system.getDistributedMember().getVmKind());
     
     properties.remove("start-locator");
-    properties.put("log-level", getDUnitLogLevel());
+    properties.put("log-level", LogWriterUtils.getDUnitLogLevel());
     properties.put("locators", locators);
     SerializableRunnable startSystem = new SerializableRunnable("start system") {
       public void run() {
@@ -184,7 +191,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
         public void run() {
           final DistributedLockService service = DistributedLockService.getServiceNamed("test service");
           service.lock("foo3", 0, 0);
-          waitForCriterion(new WaitCriterion() {
+          Wait.waitForCriterion(new WaitCriterion() {
             @Override
             public boolean done() {
               return service.isLockGrantor();
@@ -200,13 +207,13 @@ public class LocatorDUnitTest extends DistributedTestCase {
       });
   
       properties.put("start-locator", locators);
-      properties.put("log-level", getDUnitLogLevel());
+      properties.put("log-level", LogWriterUtils.getDUnitLogLevel());
       system = (InternalDistributedSystem)DistributedSystem.connect(properties);
       System.out.println("done connecting distributed system");
       
       assertEquals("should be the coordinator", system.getDistributedMember(), MembershipManagerHelper.getCoordinator(system));
       NetView view = MembershipManagerHelper.getMembershipManager(system).getView();
-      getLogWriter().info("view after becoming coordinator is " + view);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("view after becoming coordinator is " + view);
       assertNotSame("should not be the first member in the view ("+view+")", system.getDistributedMember(), view.get(0));
       
       service = DistributedLockService.create("test service", system);
@@ -254,9 +261,9 @@ public class LocatorDUnitTest extends DistributedTestCase {
     this.port1 = port1;
     final int port2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     this.port2 = port2; // for cleanup in tearDown2
-    deleteLocatorStateFile(port1);
-    deleteLocatorStateFile(port2);
-    final String host0 = getServerHostName(host); 
+    DistributedTestUtils.deleteLocatorStateFile(port1);
+    DistributedTestUtils.deleteLocatorStateFile(port2);
+    final String host0 = NetworkUtils.getServerHostName(host); 
     final String locators = host0 + "[" + port1 + "]," +
                             host0 + "[" + port2 + "]";
     final Properties properties = new Properties();
@@ -265,7 +272,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
     properties.put("enable-network-partition-detection", "false");
     properties.put("disable-auto-reconnect", "true");
     properties.put("member-timeout", "2000");
-    properties.put("log-level", getDUnitLogLevel());
+    properties.put("log-level", LogWriterUtils.getDUnitLogLevel());
     properties.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
 
     SerializableCallable startLocator1 = new SerializableCallable("start locator1") {
@@ -302,14 +309,14 @@ public class LocatorDUnitTest extends DistributedTestCase {
     AsyncInvocation async2 = null;
     try {
       async2 = loc2.invokeAsync(startLocator2);
-      pause(2000);
+      Wait.pause(2000);
       async1 = loc1.invokeAsync(startLocator1);
     } finally {
       try {
         if (async1 != null) {
           async1.join(45000);
           if (async1.isAlive()) {
-            dumpAllStacks();
+            ThreadUtils.dumpAllStacks();
           }
           if (async2 != null) {
             async2.join();
@@ -364,8 +371,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
     VM vm3 = host.getVM(3);
     
     port1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    deleteLocatorStateFile(port1);
-    final String locators = getServerHostName(host) + "[" + port1 + "]";
+    DistributedTestUtils.deleteLocatorStateFile(port1);
+    final String locators = NetworkUtils.getServerHostName(host) + "[" + port1 + "]";
     final Properties properties = new Properties();
     properties.put("mcast-port", "0");
     properties.put("locators", locators);
@@ -454,7 +461,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, timeout, 200, true);
+    Wait.waitForCriterion(ev, timeout, 200, true);
   }
   
   /**
@@ -475,7 +482,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
    * (which is now the sole remaining member) shuts itself down.
    */
   public void testLeadAndCoordFailure() throws Exception {
-    addExpectedException("Possible loss of quorum due");
+    IgnoredException.addIgnoredException("Possible loss of quorum due");
     disconnectAllFromDS();
     Host host = Host.getHost(0);
     VM vm1 = host.getVM(1);
@@ -487,8 +494,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
     this.port1 = port1;
     final int port2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     this.port2 = port2; // for cleanup in tearDown2()
-    deleteLocatorStateFile(port1, port2);
-    final String host0 = getServerHostName(host); 
+    DistributedTestUtils.deleteLocatorStateFile(port1, port2);
+    final String host0 = NetworkUtils.getServerHostName(host); 
     final String locators = host0 + "[" + port1 + "]," +
                             host0 + "[" + port2 + "]";
     final Properties properties = new Properties();
@@ -497,7 +504,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
     properties.put("enable-network-partition-detection", "true");
     properties.put("disable-auto-reconnect", "true");
     properties.put("member-timeout", "2000");
-    properties.put("log-level", getDUnitLogLevel());
+    properties.put("log-level", LogWriterUtils.getDUnitLogLevel());
 //    properties.put("log-level", "fine");
     properties.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
     
@@ -515,7 +522,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             Locator.startLocatorAndDS(port2, lf, properties);
           }
           catch (IOException ios) {
-            fail("Unable to start locator2", ios);
+            com.gemstone.gemfire.test.dunit.Assert.fail("Unable to start locator2", ios);
           }
         }
       });
@@ -546,7 +553,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
       assertEquals(sys.getDistributedMember(), MembershipManagerHelper.getCoordinator(sys));
       
       // crash the second vm and the locator.  Should be okay
-      crashDistributedSystem(vm2);
+      DistributedTestUtils.crashDistributedSystem(vm2);
       locvm.invoke(crashLocator);
       
       assertTrue("Distributed system should not have disconnected",
@@ -559,14 +566,14 @@ public class LocatorDUnitTest extends DistributedTestCase {
   
       // disconnect the first vm and demonstrate that the third vm and the
       // locator notice the failure and exit
-      crashDistributedSystem(vm1);
+      DistributedTestUtils.crashDistributedSystem(vm1);
 
       /* This vm is watching vm1, which is watching vm2 which is watching locvm.
        * It will take 3 * (3 * member-timeout) milliseconds to detect the full
        * failure and eject the lost members from the view.
        */
       
-      getLogWriter().info("waiting for my distributed system to disconnect due to partition detection");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("waiting for my distributed system to disconnect due to partition detection");
       WaitCriterion ev = new WaitCriterion() {
         public boolean done() {
           return !sys.isConnected();
@@ -575,7 +582,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 12 * 2000, 200, true);
+      Wait.waitForCriterion(ev, 12 * 2000, 200, true);
       if (sys.isConnected()) {
         fail("Distributed system did not disconnect as expected - network partition detection is broken");
       }
@@ -622,8 +629,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
     this.port1 = port1;
     final int port2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     this.port2 = port2;
-    deleteLocatorStateFile(port1, port2);
-    final String host0 = getServerHostName(host);
+    DistributedTestUtils.deleteLocatorStateFile(port1, port2);
+    final String host0 = NetworkUtils.getServerHostName(host);
     final String locators = host0 + "[" + port1 + "],"
           + host0 + "[" + port2 + "]";
     final Properties properties = new Properties();
@@ -649,7 +656,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             MembershipManagerHelper.inhibitForcedDisconnectLogging(true);
           }
           catch (IOException ios) {
-            fail("Unable to start locator2", ios);
+            com.gemstone.gemfire.test.dunit.Assert.fail("Unable to start locator2", ios);
           }
         }
       });
@@ -683,7 +690,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
       // crash the lead vm. Should be okay
       vm1.invoke(crashSystem);
 
-      pause(4 * 2000); // 4 x the member-timeout
+      Wait.pause(4 * 2000); // 4 x the member-timeout
       
       assertTrue("Distributed system should not have disconnected",
           isSystemConnected());
@@ -699,7 +706,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
       
       if (!Locator.getLocators().isEmpty()) {
         // log this for debugging purposes before throwing assertion error
-        getLogWriter().warning("found locator " + Locator.getLocators().iterator().next());
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().warning("found locator " + Locator.getLocators().iterator().next());
       }
       assertTrue("locator is not stopped", Locator.getLocators().isEmpty());
       
@@ -737,7 +744,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
       try {
         locvm.invoke(stopLocator);
       } catch (Exception e) {
-        getLogWriter().severe("failed to stop locator in vm 3", e);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().severe("failed to stop locator in vm 3", e);
       }
     }
   }
@@ -769,8 +776,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
     this.port1 = port1;
     final int port2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     this.port2 = port2;
-    deleteLocatorStateFile(port1, port2);
-    final String host0 = getServerHostName(host);
+    DistributedTestUtils.deleteLocatorStateFile(port1, port2);
+    final String host0 = NetworkUtils.getServerHostName(host);
     final String locators = host0 + "[" + port1 + "]," + host0 + "[" + port2 + "]";
 
     final Properties properties = new Properties();
@@ -779,7 +786,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
     properties.put("enable-network-partition-detection", "true");
     properties.put("disable-auto-reconnect", "true");
     properties.put("member-timeout", "2000");
-    properties.put("log-level", getDUnitLogLevel());
+    properties.put("log-level", LogWriterUtils.getDUnitLogLevel());
     properties.put(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
 
     SerializableRunnable stopLocator = getStopLocatorRunnable();
@@ -796,7 +803,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             Locator.startLocatorAndDS(port2, lf, properties);
           }
           catch (IOException ios) {
-            fail("Unable to start locator2", ios);
+            com.gemstone.gemfire.test.dunit.Assert.fail("Unable to start locator2", ios);
           }
         }
       });
@@ -847,7 +854,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
 
       vm2.invokeAsync(crashSystem);
 
-      pause(1000); // 4 x the member-timeout
+      Wait.pause(1000); // 4 x the member-timeout
       
       // request member removal for first peer from second peer.
       vm2.invoke(new SerializableRunnable("Request Member Removal") {
@@ -908,8 +915,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
     this.port1 = port1;
     final int port2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     this.port2 = port2;
-    deleteLocatorStateFile(port1, port2);
-    final String host0 = getServerHostName(host);
+    DistributedTestUtils.deleteLocatorStateFile(port1, port2);
+    final String host0 = NetworkUtils.getServerHostName(host);
     final String locators = host0 + "[" + port1 + "],"
           + host0 + "[" + port2 + "]";
     final Properties properties = new Properties();
@@ -945,7 +952,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             Locator.startLocatorAndDS(port2, lf, properties);
           }
           catch (IOException ios) {
-            fail("Unable to start locator1", ios);
+            com.gemstone.gemfire.test.dunit.Assert.fail("Unable to start locator1", ios);
           }
         }
       });
@@ -986,7 +993,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
       
       // crash the lead locator.  Should be okay
       locvm.invoke(crashLocator);
-      pause(10 * 1000);
+      Wait.pause(10 * 1000);
 
       assertTrue("Distributed system should not have disconnected",
           sys.isConnected());
@@ -1000,7 +1007,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
       // disconnect the first vm and demonstrate that the non-lead vm and the
       // locator notice the failure and continue to run
       vm1.invoke(disconnect);
-      pause(10 * 1000);
+      Wait.pause(10 * 1000);
       
       assertTrue("Distributed system should not have disconnected",
           vm2.invokeBoolean(LocatorDUnitTest.class, "isSystemConnected"));
@@ -1029,8 +1036,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
     Host host = Host.getHost(0);
     int port =
       AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    deleteLocatorStateFile(port1);
-    String locators = getServerHostName(host) + "[" + port + "]";
+    DistributedTestUtils.deleteLocatorStateFile(port1);
+    String locators = NetworkUtils.getServerHostName(host) + "[" + port + "]";
     Properties props = new Properties();
     props.setProperty("mcast-port", "0");
     props.setProperty("locators", locators);
@@ -1061,7 +1068,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
     } catch (Exception ex) {
       // if you see this fail, determine if unexpected exception is expected
       // if expected then add in a catch block for it above this catch
-      fail("Failed with unexpected exception", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("Failed with unexpected exception", ex);
     }
     finally {
       if (oldValue == null) {
@@ -1092,8 +1099,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
 
     final int port =
       AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    deleteLocatorStateFile(port1);
-    final String locators = getServerHostName(host) + "[" + port + "]";
+    DistributedTestUtils.deleteLocatorStateFile(port1);
+    final String locators = NetworkUtils.getServerHostName(host) + "[" + port + "]";
     final String uniqueName = getUniqueName();
     
     vm0.invoke(new SerializableRunnable("Start locator " + locators) {
@@ -1106,7 +1113,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
 
             Locator.startLocatorAndDS(port, logFile, locProps);
           } catch (IOException ex) {
-            fail("While starting locator on port " + port, ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("While starting locator on port " + port, ex);
           }
         }
       });
@@ -1132,7 +1139,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
     system = (InternalDistributedSystem)DistributedSystem.connect(props);
     
     final DistributedMember coord = MembershipManagerHelper.getCoordinator(system);
-    getLogWriter().info("coordinator before termination of locator is " + coord);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("coordinator before termination of locator is " + coord);
 
     vm0.invoke(getStopLocatorRunnable());
     
@@ -1146,9 +1153,9 @@ public class LocatorDUnitTest extends DistributedTestCase {
           MembershipManagerHelper.getCoordinator(system);
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 15 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 15 * 1000, 200, true);
     DistributedMember newCoord = MembershipManagerHelper.getCoordinator(system); 
-    getLogWriter().info("coordinator after shutdown of locator was " +
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("coordinator after shutdown of locator was " +
         newCoord);
     if (coord.equals(newCoord)) {
       fail("another member should have become coordinator after the locator was stopped");
@@ -1204,8 +1211,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
 
     final int port =
       AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    deleteLocatorStateFile(port1);
-    final String locators = getServerHostName(host) + "[" + port + "]";
+    DistributedTestUtils.deleteLocatorStateFile(port1);
+    final String locators = NetworkUtils.getServerHostName(host) + "[" + port + "]";
     
     vm0.invoke(getStartSBLocatorRunnable(port, getUniqueName()+"1"));
     try {
@@ -1229,7 +1236,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
     system = (InternalDistributedSystem)getSystem(props);
 
     final DistributedMember coord = MembershipManagerHelper.getCoordinator(system);
-    getLogWriter().info("coordinator before termination of locator is " + coord);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("coordinator before termination of locator is " + coord);
 
     vm0.invoke(getStopLocatorRunnable());
     
@@ -1243,9 +1250,9 @@ public class LocatorDUnitTest extends DistributedTestCase {
           MembershipManagerHelper.getCoordinator(system);
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 15000, 200, true);
+    Wait.waitForCriterion(ev, 15000, 200, true);
     DistributedMember newCoord = MembershipManagerHelper.getCoordinator(system); 
-    getLogWriter().info("coordinator after shutdown of locator was " +
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("coordinator after shutdown of locator was " +
         newCoord);
     if (newCoord == null || coord.equals(newCoord)) {
       fail("another member should have become coordinator after the locator was stopped: "
@@ -1267,7 +1274,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 5000, 200, true);
+    Wait.waitForCriterion(ev, 5000, 200, true);
     
     system.disconnect();
     LogWriter bgexecLogger =
@@ -1337,8 +1344,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
     this.port1 = port1;
     final int port2 = freeTCPPorts[1];
     this.port2 = port2;
-    deleteLocatorStateFile(port1, port2);
-    final String host0 = getServerHostName(host); 
+    DistributedTestUtils.deleteLocatorStateFile(port1, port2);
+    final String host0 = NetworkUtils.getServerHostName(host); 
     final String locators = host0 + "[" + port1 + "]," +
                             host0 + "[" + port2 + "]";
 
@@ -1355,7 +1362,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
         try {
           Locator.startLocatorAndDS(port1, logFile, dsProps);
         } catch (IOException ex) {
-          fail("While starting locator on port " + port1, ex);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While starting locator on port " + port1, ex);
         }
       }
     });
@@ -1370,7 +1377,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             Locator.startLocatorAndDS(port2, logFile, dsProps);
 
           } catch (IOException ex) {
-            fail("While starting locator on port " + port2, ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("While starting locator on port " + port2, ex);
           }
         }
       });
@@ -1409,7 +1416,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 10 * 1000, 200, true);
 
         // three applications plus
         assertEquals(5, system.getDM().getViewMembers().size());
@@ -1441,7 +1448,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
    */
   public void testMultipleMcastLocators() throws Exception {
     disconnectAllFromDS();
-    addExpectedException("Could not stop  Distribution Locator"); // shutdown timing issue in InternalLocator
+    IgnoredException.addIgnoredException("Could not stop  Distribution Locator"); // shutdown timing issue in InternalLocator
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -1453,10 +1460,10 @@ public class LocatorDUnitTest extends DistributedTestCase {
     this.port1 = port1;
     final int port2 = freeTCPPorts[1];
     this.port2 = port2;
-    deleteLocatorStateFile(port1, port2);
+    DistributedTestUtils.deleteLocatorStateFile(port1, port2);
     final int mcastport = AvailablePort.getRandomAvailablePort(AvailablePort.MULTICAST);
     
-    final String host0 = getServerHostName(host); 
+    final String host0 = NetworkUtils.getServerHostName(host); 
     final String locators = host0 + "[" + port1 + "]," +
                             host0 + "[" + port2 + "]";
     final String uniqueName = getUniqueName();
@@ -1468,7 +1475,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             Properties props = new Properties();
             props.setProperty("mcast-port", String.valueOf(mcastport));
             props.setProperty("locators", locators);
-            props.setProperty("log-level", getDUnitLogLevel());
+            props.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
             props.setProperty("mcast-ttl", "0");
             props.setProperty("enable-network-partition-detection", "true");
             props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
@@ -1476,7 +1483,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             Locator.startLocatorAndDS(port1, logFile, null, props);
           }
           catch (IOException ex) {
-            fail("While starting locator on port " + port1, ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("While starting locator on port " + port1, ex);
           }
         }
       });
@@ -1487,14 +1494,14 @@ public class LocatorDUnitTest extends DistributedTestCase {
             Properties props = new Properties();
             props.setProperty("mcast-port", String.valueOf(mcastport));
             props.setProperty("locators", locators);
-            props.setProperty("log-level", getDUnitLogLevel());
+            props.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
             props.setProperty("mcast-ttl", "0");
             props.setProperty("enable-network-partition-detection", "true");
             props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
             Locator.startLocatorAndDS(port2, logFile, null, props);
           }
           catch (IOException ex) {
-            fail("While starting locator on port " + port2, ex);
+            com.gemstone.gemfire.test.dunit.Assert.fail("While starting locator on port " + port2, ex);
           }
         }
       });
@@ -1505,7 +1512,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             Properties props = new Properties();
             props.setProperty("mcast-port", String.valueOf(mcastport));
             props.setProperty("locators", locators);
-            props.setProperty("log-level", getDUnitLogLevel());
+            props.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
             props.setProperty("mcast-ttl", "0");
             props.setProperty("enable-network-partition-detection", "true");
             DistributedSystem.connect(props);
@@ -1518,7 +1525,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
       Properties props = new Properties();
       props.setProperty("mcast-port", String.valueOf(mcastport));
       props.setProperty("locators", locators);
-      props.setProperty("log-level", getDUnitLogLevel());
+      props.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
       props.setProperty("mcast-ttl", "0");
       props.setProperty("enable-network-partition-detection", "true");
 
@@ -1529,7 +1536,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             return system.getDM().getViewMembers().size() == 5;
           }
           catch (Exception e) {
-            fail("unexpected exception", e);
+            com.gemstone.gemfire.test.dunit.Assert.fail("unexpected exception", e);
           }
           return false; // NOTREACHED
         }
@@ -1537,7 +1544,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
           return "waiting for 5 members - have " + system.getDM().getViewMembers().size();
         }
       };
-      DistributedTestCase.waitForCriterion(ev, WAIT2_MS, 200, true);
+      Wait.waitForCriterion(ev, WAIT2_MS, 200, true);
       system.disconnect();
 
       SerializableRunnable disconnect =
@@ -1573,12 +1580,12 @@ public class LocatorDUnitTest extends DistributedTestCase {
 
     port1 =
       AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    deleteLocatorStateFile(port1);
+    DistributedTestUtils.deleteLocatorStateFile(port1);
     File logFile = new File("");
     Locator locator = Locator.startLocator(port1, logFile);
     try {
 
-    final String locators = getServerHostName(host) + "[" + port1 + "]";
+    final String locators = NetworkUtils.getServerHostName(host) + "[" + port1 + "]";
 
     Properties props = new Properties();
     props.setProperty("mcast-port", "0");
@@ -1603,8 +1610,8 @@ public class LocatorDUnitTest extends DistributedTestCase {
     
     try {
       port1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-      deleteLocatorStateFile(port1);
-      final String locators = getServerHostName(host) + "[" + port1 + "]";
+      DistributedTestUtils.deleteLocatorStateFile(port1);
+      final String locators = NetworkUtils.getServerHostName(host) + "[" + port1 + "]";
       final Properties properties = new Properties();
       properties.put("mcast-port", "0");
       properties.put("locators", locators);
@@ -1680,7 +1687,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
     port1 =
       AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     File logFile1 = new File("");
-    deleteLocatorStateFile(port1);
+    DistributedTestUtils.deleteLocatorStateFile(port1);
     Locator locator1 = Locator.startLocator(port1, logFile1);
     
     try {
@@ -1689,7 +1696,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
       AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
     File logFile2 = new File("");
 
-    deleteLocatorStateFile(port2);
+    DistributedTestUtils.deleteLocatorStateFile(port2);
     
     try {
       Locator locator2 = Locator.startLocator(port2, logFile2);
@@ -1697,7 +1704,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
     } catch (IllegalStateException expected) {
     }
 
-    final String host0 = getServerHostName(host);
+    final String host0 = NetworkUtils.getServerHostName(host);
     final String locators = host0 + "[" + port1 + "]," +
                             host0 + "[" + port2 + "]";
 
@@ -1707,7 +1714,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
             Properties props = new Properties();
             props.setProperty("mcast-port", "0");
             props.setProperty("locators", locators);
-            props.setProperty("log-level", getDUnitLogLevel());
+            props.setProperty("log-level", LogWriterUtils.getDUnitLogLevel());
             DistributedSystem.connect(props);
           }
         };
@@ -1742,7 +1749,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
     disconnectAllFromDS();
     port1 =
       AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    deleteLocatorStateFile(port1);
+    DistributedTestUtils.deleteLocatorStateFile(port1);
     File logFile = new File("");
     File stateFile = new File("locator"+port1+"state.dat");
     VM vm0 = Host.getHost(0).getVM(0);
@@ -1754,7 +1761,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
       stateFile.delete();
     }
 
-    getLogWriter().info("Starting locator");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Starting locator");
     Locator locator = Locator.startLocatorAndDS(port1, logFile, p);
     try {
     
@@ -1766,10 +1773,10 @@ public class LocatorDUnitTest extends DistributedTestCase {
         };
     vm0.invoke(connect);
     
-    getLogWriter().info("Stopping locator");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Stopping locator");
     locator.stop();
     
-    getLogWriter().info("Starting locator");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Starting locator");
     locator = Locator.startLocatorAndDS(port1, logFile, p);
     
     vm0.invoke(new SerializableRunnable("disconnect") {
@@ -1828,10 +1835,10 @@ public class LocatorDUnitTest extends DistributedTestCase {
           System.setProperty("p2p.joinTimeout", "1000");
           Properties locProps = new Properties();
           locProps.put("mcast-port", "0");
-          locProps.put("log-level", getDUnitLogLevel());
+          locProps.put("log-level", LogWriterUtils.getDUnitLogLevel());
           Locator.startLocatorAndDS(port, logFile, locProps);
         } catch (IOException ex) {
-          fail("While starting locator on port " + port, ex);
+          com.gemstone.gemfire.test.dunit.Assert.fail("While starting locator on port " + port, ex);
         }
         finally {
           System.getProperties().remove(InternalLocator.LOCATORS_PREFERRED_AS_COORDINATORS);
@@ -1866,7 +1873,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
       long giveUp = System.currentTimeMillis() + 30000;
       if (cause instanceof ForcedDisconnectException) {
         while (unboundedWait && System.currentTimeMillis() < giveUp) {
-          pause(1000);
+          Wait.pause(1000);
         }
       } else {
         cause.printStackTrace();
@@ -1895,7 +1902,7 @@ public class LocatorDUnitTest extends DistributedTestCase {
     public void quorumLost(Set<InternalDistributedMember> failures,
         List<InternalDistributedMember> remaining) {
       quorumLostInvoked = true;
-      getLogWriter().info("quorumLost invoked in test code");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("quorumLost invoked in test code");
     }
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorJUnitTest.java
index c369f19..3c80d9a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/LocatorJUnitTest.java
@@ -51,7 +51,6 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ClientProxyMembershipID;
 import com.gemstone.gemfire.management.internal.JmxManagerAdvisor.JmxManagerProfile;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 @Category(IntegrationTest.class)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/SystemAdminDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/SystemAdminDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/SystemAdminDUnitTest.java
index 42d4ed1..64014de 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/SystemAdminDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/SystemAdminDUnitTest.java
@@ -29,7 +29,9 @@ import java.util.Properties;
 import com.gemstone.gemfire.distributed.internal.DistributionManager;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.SystemAdmin;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 
 public class SystemAdminDUnitTest extends DistributedTestCase {
 
@@ -43,9 +45,8 @@ public class SystemAdminDUnitTest extends DistributedTestCase {
     disconnect();
   }
   
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     disconnect();
   }
   
@@ -54,7 +55,7 @@ public class SystemAdminDUnitTest extends DistributedTestCase {
     system = null;
     InternalDistributedSystem sys = InternalDistributedSystem.getAnyInstance();
     if (sys != null && sys.isConnected()) {
-      getLogWriter().info("disconnecting(3)");
+      LogWriterUtils.getLogWriter().info("disconnecting(3)");
       sys.disconnect();
     }
   }
@@ -62,7 +63,7 @@ public class SystemAdminDUnitTest extends DistributedTestCase {
   public void testPrintStacks() throws Exception {
 
     // create a gemfire.properties that lets SystemAdmin find the dunit locator
-    Properties p = getAllDistributedSystemProperties(getDistributedSystemProperties());
+    Properties p = DistributedTestUtils.getAllDistributedSystemProperties(getDistributedSystemProperties());
     try {
       
       SystemAdmin.setDistributedSystemProperties(p);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/Bug40751DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/Bug40751DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/Bug40751DUnitTest.java
index cedf650..e98c608 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/Bug40751DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/Bug40751DUnitTest.java
@@ -36,6 +36,7 @@ import com.gemstone.gemfire.cache.SubscriptionAttributes;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.cache.lru.Sizeable;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -47,11 +48,9 @@ public class Bug40751DUnitTest extends CacheTestCase {
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS();
   }
-	
 	 
   public void testRR() {
     System.setProperty("p2p.nodirectBuffers", "true");
@@ -95,7 +94,7 @@ public class Bug40751DUnitTest extends CacheTestCase {
 
       vm1.invoke(createEmptyRegion);
     } finally {
-      invokeInEveryVM(new SerializableCallable() {
+      Invoke.invokeInEveryVM(new SerializableCallable() {
         public Object call() throws Exception {
           System.getProperties().remove("p2p.oldIO");
           System.getProperties().remove("p2p.nodirectBuffers");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/ConsoleDistributionManagerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/ConsoleDistributionManagerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/ConsoleDistributionManagerDUnitTest.java
index 6872f32..166d5ac 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/ConsoleDistributionManagerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/ConsoleDistributionManagerDUnitTest.java
@@ -42,10 +42,13 @@ import com.gemstone.gemfire.internal.admin.GfManagerAgentConfig;
 import com.gemstone.gemfire.internal.admin.GfManagerAgentFactory;
 import com.gemstone.gemfire.internal.admin.StatResource;
 import com.gemstone.gemfire.internal.admin.remote.RemoteTransportConfig;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This class tests the functionality of the {@linkplain com.gemstone.gemfire.internal.admin internal
@@ -64,13 +67,13 @@ public class ConsoleDistributionManagerDUnitTest
 //  private volatile Alert lastAlert = null;
 
   public void alert(Alert alert) {
-    getLogWriter().info("DEBUG: alert=" + alert);
+    LogWriterUtils.getLogWriter().info("DEBUG: alert=" + alert);
 //    this.lastAlert = alert;
   }
 
   public void setUp() throws Exception {
     boolean finishedSetup = false;
-    addExpectedException("Error occurred while reading system log");
+    IgnoredException.addIgnoredException("Error occurred while reading system log");
     try {
       if (firstTime) {
         disconnectFromDS(); //make sure there's no ldm lying around
@@ -94,7 +97,7 @@ public class ConsoleDistributionManagerDUnitTest
       }
       // create a GfManagerAgent in the master vm.
       this.agent = GfManagerAgentFactory.
-        getManagerAgent(new GfManagerAgentConfig(null, transport, getLogWriter(), Alert.SEVERE, this, null));
+        getManagerAgent(new GfManagerAgentConfig(null, transport, LogWriterUtils.getLogWriter(), Alert.SEVERE, this, null));
       if (!agent.isConnected()) {
         WaitCriterion ev = new WaitCriterion() {
           public boolean done() {
@@ -104,7 +107,7 @@ public class ConsoleDistributionManagerDUnitTest
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+        Wait.waitForCriterion(ev, 60 * 1000, 200, true);
       }
       finishedSetup = true;
     } finally {
@@ -119,7 +122,7 @@ public class ConsoleDistributionManagerDUnitTest
         catch (Throwable ignore) {
         }
         try {
-          super.tearDown2();
+          super.preTearDown();
         } 
         catch (VirtualMachineError e) {
           SystemFailure.initiateFailure(e);
@@ -141,21 +144,17 @@ public class ConsoleDistributionManagerDUnitTest
     }
   }
   
-  public void tearDown2() throws Exception {    
-
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
+    this.agent.disconnect();
+  }
+  
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     try {
-
-      this.agent.disconnect();
-      super.tearDown2();
-
-      // Clean up "admin-only" distribution manager
       disconnectFromDS(); //make sure there's no ldm lying around
-
-    }
-    finally {
-
+    } finally {
       DistributionManager.isDedicatedAdminVM = false;
-
     }
   }
 
@@ -189,7 +188,7 @@ public class ConsoleDistributionManagerDUnitTest
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     }
 
     //final Serializable controllerId = getSystem().getDistributionManager().getId(); //can't do this...
@@ -249,7 +248,7 @@ public class ConsoleDistributionManagerDUnitTest
       
       Region[] roots = apps[i].getRootRegions();
       if (roots.length == 0) {
-        getLogWriter().info("DEBUG: testApplications: apps[" + i + "]=" + apps[i] + " did not have a root region");
+        LogWriterUtils.getLogWriter().info("DEBUG: testApplications: apps[" + i + "]=" + apps[i] + " did not have a root region");
       } else {
         Region root = roots[0];
         assertNotNull(root);
@@ -290,7 +289,7 @@ public class ConsoleDistributionManagerDUnitTest
         assertTrue(!node.isPrimitiveOrString());
         EntryValueNode[] fields = node.getChildren();
         assertNotNull(fields);
-        getLogWriter().warning("The tests use StringBuffers for values which might be implmented differently in jdk 1.5");
+        LogWriterUtils.getLogWriter().warning("The tests use StringBuffers for values which might be implmented differently in jdk 1.5");
        // assertTrue(fields.length > 0);
         
         /// test destruction in the last valid app
@@ -314,7 +313,7 @@ public class ConsoleDistributionManagerDUnitTest
               return "Waited 20 seconds for region " + r.getFullPath() + "to be destroyed.";
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 20 * 1000, 200, true);
+          Wait.waitForCriterion(ev, 20 * 1000, 200, true);
         }
       }
     }
@@ -367,7 +366,7 @@ public class ConsoleDistributionManagerDUnitTest
     region.create(entryName, value);
     
     
-    getLogWriter().info("Put value " + value + " in entry " +
+    LogWriterUtils.getLogWriter().info("Put value " + value + " in entry " +
                         entryName + " in region '" +
                         region.getFullPath() +"'");
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionAdvisorDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionAdvisorDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionAdvisorDUnitTest.java
index 98bfbbd..8e238ac 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionAdvisorDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionAdvisorDUnitTest.java
@@ -25,6 +25,7 @@ import java.util.Set;
 import com.gemstone.gemfire.CancelCriterion;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -43,7 +44,7 @@ public class DistributionAdvisorDUnitTest extends DistributedTestCase {
   public void setUp() throws Exception {
     super.setUp();
     // connect to distributed system in every VM
-    invokeInEveryVM(new SerializableRunnable("DistributionAdvisorDUnitTest: SetUp") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("DistributionAdvisorDUnitTest: SetUp") {
       public void run() {
         getSystem();
       }
@@ -81,9 +82,9 @@ public class DistributionAdvisorDUnitTest extends DistributedTestCase {
                     new DistributionAdvisor.Profile[profileList.size()]);
   }
     
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     this.advisor.close();
-    super.tearDown2();
   }
   
     

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
index 3da075f..2164fdc 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/DistributionManagerDUnitTest.java
@@ -48,8 +48,12 @@ import com.gemstone.gemfire.distributed.internal.membership.gms.mgr.GMSMembershi
 import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This class tests the functionality of the {@link
@@ -147,7 +151,7 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
       mgr = MembershipManagerHelper.getMembershipManager(sys);
       sys.disconnect();
       InternalDistributedMember idm2 = mgr.getLocalMember();
-      getLogWriter().info("original ID=" + idm +
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("original ID=" + idm +
           " and after connecting=" + idm2);
       assertTrue("should not have used a different udp port",
           idm.getPort() == idm2.getPort());
@@ -172,7 +176,7 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
 
     try {
       InternalDistributedMember mbr = new InternalDistributedMember(
-        DistributedTestCase.getIPLiteral(), 12345);
+        NetworkUtils.getIPLiteral(), 12345);
 
       // first make sure we can't add this as a surprise member (bug #44566)
       
@@ -182,8 +186,8 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
 
       int oldViewId = mbr.getVmViewId();
       mbr.setVmViewId((int)mgr.getView().getViewId()-1);
-      getLogWriter().info("current membership view is " + mgr.getView());
-      getLogWriter().info("created ID " + mbr + " with view ID " + mbr.getVmViewId());
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("current membership view is " + mgr.getView());
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("created ID " + mbr + " with view ID " + mbr.getVmViewId());
       sys.getLogWriter().info("<ExpectedException action=add>attempt to add old member</ExpectedException>");
       sys.getLogWriter().info("<ExpectedException action=add>Removing shunned GemFire node</ExpectedException>");
       try {
@@ -283,7 +287,7 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
         public void run() {
           props.setProperty(DistributionConfig.NAME_NAME, "sleeper");
           getSystem(props);
-          addExpectedException("elapsed while waiting for replies");
+          IgnoredException.addIgnoredException("elapsed while waiting for replies");
           RegionFactory rf = new RegionFactory();
           Region r = rf.setScope(Scope.DISTRIBUTED_ACK)
             .setDataPolicy(DataPolicy.REPLICATE)
@@ -397,7 +401,7 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
    */
   public void testKickOutSickMember() throws Exception {
     disconnectAllFromDS();
-    addExpectedException("10 seconds have elapsed while waiting");
+    IgnoredException.addIgnoredException("10 seconds have elapsed while waiting");
     Host host = Host.getHost(0);
 //    VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
@@ -460,7 +464,7 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
             }
           };
           // if this fails it means the sick member wasn't kicked out and something is wrong
-          DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+          Wait.waitForCriterion(ev, 60 * 1000, 200, true);
           
           ev = new WaitCriterion() {
             public boolean done() {
@@ -470,7 +474,7 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
               return null;
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 20 * 1000, 200, false);
+          Wait.waitForCriterion(ev, 20 * 1000, 200, false);
           
           if (!myCache.isClosed()) {
             if (system.isConnected()) {
@@ -492,7 +496,7 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
               return "vm1's listener should have received afterRegionDestroyed notification";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 30 * 1000, 1000, true);
+          Wait.waitForCriterion(wc, 30 * 1000, 1000, true);
           
         }
       });
@@ -521,14 +525,14 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
     try {
       getSystem(props);
     } catch (IllegalArgumentException e) {
-      getLogWriter().info("caught expected exception (1)", e);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("caught expected exception (1)", e);
     }
     // use an invalid address
     props.setProperty(DistributionConfig.BIND_ADDRESS_NAME, "bruce.schuchardt");
     try {
       getSystem(props);
     } catch (IllegalArgumentException e) {
-      getLogWriter().info("caught expected exception (2_", e);
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("caught expected exception (2_", e);
     }
     // use a valid bind address
     props.setProperty(DistributionConfig.BIND_ADDRESS_NAME, InetAddress.getLocalHost().getCanonicalHostName());
@@ -561,12 +565,12 @@ public class DistributionManagerDUnitTest extends DistributedTestCase {
     t.setDaemon(true);
     t.start();
     
-    pause(2000);
+    Wait.pause(2000);
 
     NetView newView = new NetView(v, v.getViewId()+1);
     ((Manager)mgr).installView(newView);
 
-    pause(2000);
+    Wait.pause(2000);
     
     synchronized(passed) {
       Assert.assertTrue(passed[0]);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/ProductUseLogDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/ProductUseLogDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/ProductUseLogDUnitTest.java
index d6f45d1..149d6d8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/ProductUseLogDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/ProductUseLogDUnitTest.java
@@ -28,6 +28,7 @@ import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.distributed.Locator;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
@@ -75,7 +76,7 @@ public class ProductUseLogDUnitTest extends DistributedTestCase {
         try {
           server.start();
         } catch (IOException e) {
-          fail("failed to start server", e);
+          Assert.fail("failed to start server", e);
         }
       }
     });
@@ -100,7 +101,8 @@ public class ProductUseLogDUnitTest extends DistributedTestCase {
     return sb.toString();
   }
 
-  public void tearDown2() {
+  @Override
+  protected final void preTearDown() throws Exception {
     disconnectAllFromDS();
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/deadlock/GemFireDeadlockDetectorDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/deadlock/GemFireDeadlockDetectorDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/deadlock/GemFireDeadlockDetectorDUnitTest.java
index efdcb0c..bc3bee6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/deadlock/GemFireDeadlockDetectorDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/deadlock/GemFireDeadlockDetectorDUnitTest.java
@@ -33,8 +33,11 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.DistributedLockService;
 import com.gemstone.gemfire.distributed.LockServiceDestroyedException;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -50,12 +53,12 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
   
   
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS();
   }
 
   private void stopStuckThreads() {
-    invokeInEveryVM(new SerializableRunnable() {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       
       public void run() {
         for(Thread thread: stuckThreads) {
@@ -65,7 +68,7 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
             thread.join(30000);
             assertTrue(!thread.isAlive());
           } catch (InterruptedException e) {
-            fail("interrupted", e);
+            Assert.fail("interrupted", e);
           }
         }
       }
@@ -114,7 +117,7 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
     Thread.sleep(5000);
     GemFireDeadlockDetector detect = new GemFireDeadlockDetector();
     LinkedList<Dependency> deadlock = detect.find().findCycle();
-    getLogWriter().info("Deadlock=" + DeadlockDetector.prettyFormat(deadlock));
+    LogWriterUtils.getLogWriter().info("Deadlock=" + DeadlockDetector.prettyFormat(deadlock));
     assertEquals(8, deadlock.size());
     stopStuckThreads();
     async1.getResult(30000);
@@ -131,7 +134,7 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
         try {
           Thread.sleep(1000);
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         }
         ResultCollector collector = FunctionService.onMember(member).execute(new TestFunction());
         //wait the function to lock the lock on member.
@@ -161,7 +164,7 @@ public class GemFireDeadlockDetectorDUnitTest extends CacheTestCase {
     }
     
     assertTrue(deadlock != null);
-    getLogWriter().info("Deadlock=" + DeadlockDetector.prettyFormat(deadlock));
+    LogWriterUtils.getLogWriter().info("Deadlock=" + DeadlockDetector.prettyFormat(deadlock));
     assertEquals(4, deadlock.size());
     stopStuckThreads();
     disconnectAllFromDS();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/locks/CollaborationJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/locks/CollaborationJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/locks/CollaborationJUnitTest.java
index 991030e..6985044 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/locks/CollaborationJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/locks/CollaborationJUnitTest.java
@@ -30,8 +30,9 @@ import com.gemstone.gemfire.CancelCriterion;
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.SystemFailure;
 import com.gemstone.gemfire.internal.logging.LocalLogWriter;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 
@@ -104,7 +105,7 @@ public class CollaborationJUnitTest {
         return "waiting for thread";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 5 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 5 * 1000, 200, true);
     assertTrue(this.collaboration.hasCurrentTopic(threadA));
     
     // thread two blocks until one releeases
@@ -125,7 +126,7 @@ public class CollaborationJUnitTest {
               return "waiting for release";
             }
           };
-          DistributedTestCase.waitForCriterion(ev2, 20 * 1000, 200, true);
+          Wait.waitForCriterion(ev2, 20 * 1000, 200, true);
         }
         finally {
           collaboration.release();
@@ -145,7 +146,7 @@ public class CollaborationJUnitTest {
         return "waiting for thread b";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 5 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 5 * 1000, 200, true);
     
     // threadA holds topic and threadB is waiting...
     assertTrue(this.collaboration.hasCurrentTopic(threadA));
@@ -153,7 +154,7 @@ public class CollaborationJUnitTest {
 
     // let threadA release so that threadB gets lock
     this.flagTestBlocksUntilRelease = false;
-    DistributedTestCase.join(threadA, 30 * 1000, null);
+    ThreadUtils.join(threadA, 30 * 1000);
     
     // make sure threadB is doing what it's supposed to do...
     ev = new WaitCriterion() {
@@ -166,11 +167,11 @@ public class CollaborationJUnitTest {
         return "threadB";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 5 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 5 * 1000, 200, true);
     // threadB must have lock now... let threadB release
     assertTrue(this.collaboration.hasCurrentTopic(threadB));
     this.flagTestBlocksUntilRelease = false;
-    DistributedTestCase.join(threadB, 30 * 1000, null);
+    ThreadUtils.join(threadB, 30 * 1000);
 
     // collaboration should be free now    
     assertFalse(this.collaboration.hasCurrentTopic(threadA));
@@ -207,7 +208,7 @@ public class CollaborationJUnitTest {
               return null;
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+          Wait.waitForCriterion(ev, 60 * 1000, 200, true);
         }
         finally {
           collaboration.release();
@@ -225,7 +226,7 @@ public class CollaborationJUnitTest {
         return "wait for ThreadA";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 30 * 1000, 200, true);
     assertTrue(this.collaboration.hasCurrentTopic(threadA));
     assertTrue(this.collaboration.isCurrentTopic(topicA));
     
@@ -245,7 +246,7 @@ public class CollaborationJUnitTest {
               return null;
             }
           };
-          DistributedTestCase.waitForCriterion(ev2, 60 * 1000, 200, true);
+          Wait.waitForCriterion(ev2, 60 * 1000, 200, true);
         }
         finally {
           collaboration.release();
@@ -263,7 +264,7 @@ public class CollaborationJUnitTest {
         return "";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     assertTrue(this.collaboration.hasCurrentTopic(threadB));
     
     // thread three blocks for new topic
@@ -284,7 +285,7 @@ public class CollaborationJUnitTest {
               return null;
             }
           };
-          DistributedTestCase.waitForCriterion(ev2, 60 * 1000, 200, true);
+          Wait.waitForCriterion(ev2, 60 * 1000, 200, true);
         }
         finally {
           collaboration.release();
@@ -302,7 +303,7 @@ public class CollaborationJUnitTest {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     assertFalse(this.collaboration.hasCurrentTopic(threadC));
     assertFalse(this.collaboration.isCurrentTopic(topicB));
     
@@ -336,12 +337,12 @@ public class CollaborationJUnitTest {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     assertTrue(this.collaboration.hasCurrentTopic(threadD));
     
     // release threadA
     this.threadAFlag_TestLateComerJoinsIn = false;
-    DistributedTestCase.join(threadA, 30 * 1000, null);
+    ThreadUtils.join(threadA, 30 * 1000);
     assertFalse(this.collaboration.hasCurrentTopic(threadA));
     assertTrue(this.collaboration.hasCurrentTopic(threadB));
     assertFalse(this.collaboration.hasCurrentTopic(threadC));
@@ -351,7 +352,7 @@ public class CollaborationJUnitTest {
     
     // release threadB
     this.threadBFlag_TestLateComerJoinsIn = false;
-    DistributedTestCase.join(threadB, 30 * 1000, null);
+    ThreadUtils.join(threadB, 30 * 1000);
     assertFalse(this.collaboration.hasCurrentTopic(threadB));
     assertFalse(this.collaboration.hasCurrentTopic(threadC));
     assertTrue(this.collaboration.hasCurrentTopic(threadD));
@@ -360,7 +361,7 @@ public class CollaborationJUnitTest {
     
     // release threadD
     this.threadDFlag_TestLateComerJoinsIn = false;
-    DistributedTestCase.join(threadD, 30 * 1000, null);
+    ThreadUtils.join(threadD, 30 * 1000);
     ev = new WaitCriterion() {
       @Override
       public boolean done() {
@@ -371,7 +372,7 @@ public class CollaborationJUnitTest {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     assertTrue(this.collaboration.hasCurrentTopic(threadC));
     assertFalse(this.collaboration.hasCurrentTopic(threadD));
     assertFalse(this.collaboration.isCurrentTopic(topicA));
@@ -379,7 +380,7 @@ public class CollaborationJUnitTest {
     
     // release threadC
     this.threadCFlag_TestLateComerJoinsIn = false;
-    DistributedTestCase.join(threadC, 30 * 1000, null);
+    ThreadUtils.join(threadC, 30 * 1000);
     assertFalse(this.collaboration.hasCurrentTopic(threadC));
     assertFalse(this.collaboration.isCurrentTopic(topicA));
     assertFalse(this.collaboration.isCurrentTopic(topicB));
@@ -420,7 +421,7 @@ public class CollaborationJUnitTest {
                   return "other threads lining up";
                 }
               };
-              DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+              Wait.waitForCriterion(ev, 60 * 1000, 200, true);
               collaboration.release();
               released = true;
             }
@@ -454,7 +455,7 @@ public class CollaborationJUnitTest {
           return "waiting for numThreads * 10";
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 5 * 60 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 5 * 60 * 1000, 200, true);
     }
     finally {
       if (this.runTestFairnessStressfully) {
@@ -463,7 +464,7 @@ public class CollaborationJUnitTest {
     }
     
     for (int t = 0; t < threads.length; t++) {
-      DistributedTestCase.join(threads[t], 30 * 1000, null);
+      ThreadUtils.join(threads[t], 30 * 1000);
     }
     
     // assert that all topics are acquired in order
@@ -552,7 +553,7 @@ public class CollaborationJUnitTest {
               return null;
             }
           };
-          DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+          Wait.waitForCriterion(ev, 60 * 1000, 200, true);
         }
         finally {
           collaboration.release();
@@ -573,12 +574,12 @@ public class CollaborationJUnitTest {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 60 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 60 * 1000, 200, true);
     
     // after starting thread, hasCurrentTopic(thread) returns true
     assertTrue(this.collaboration.hasCurrentTopic(thread));
     this.flagTestThreadHasCurrentTopic = false;
-    DistributedTestCase.join(thread, 30 * 1000, null);
+    ThreadUtils.join(thread, 30 * 1000);
     
     // after thread finishes, hasCurrentTopic(thread) returns false
     assertTrue(!this.collaboration.hasCurrentTopic(thread));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java
index cda4e29..e44d761 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/membership/gms/MembershipManagerHelper.java
@@ -26,8 +26,8 @@ import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedM
 import com.gemstone.gemfire.distributed.internal.membership.MembershipManager;
 import com.gemstone.gemfire.distributed.internal.membership.gms.interfaces.Manager;
 import com.gemstone.gemfire.distributed.internal.membership.gms.mgr.GMSMembershipManager;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This helper class provides access to membership manager information that
@@ -149,7 +149,7 @@ public class MembershipManagerHelper
         return assMsg;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, timeout, 200, true);
+    Wait.waitForCriterion(ev, timeout, 200, true);
   }
   
   public static void crashDistributedSystem(final DistributedSystem msys) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitTest.java
index a6cc444..a66367b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/distributed/internal/tcpserver/TcpServerBackwardCompatDUnitTest.java
@@ -37,7 +37,10 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.Version;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 /**
@@ -62,7 +65,7 @@ public class TcpServerBackwardCompatDUnitTest extends DistributedTestCase {
   public void setUp() throws Exception {
     super.setUp();
     disconnectAllFromDS();
-    invokeInEveryVM(new CacheSerializableRunnable("Set TcpServer.isTesting true") {
+    Invoke.invokeInEveryVM(new CacheSerializableRunnable("Set TcpServer.isTesting true") {
       
       @Override
       public void run2() throws CacheException {
@@ -72,15 +75,14 @@ public class TcpServerBackwardCompatDUnitTest extends DistributedTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new CacheSerializableRunnable("Set TcpServer.isTesting true") {
+  protected final void preTearDown() throws Exception {
+    Invoke.invokeInEveryVM(new CacheSerializableRunnable("Set TcpServer.isTesting true") {
       
       @Override
       public void run2() throws CacheException {
         TcpServer.isTesting = false;
       }
     });
-    super.tearDown2();
   }
 
   /**
@@ -128,7 +130,7 @@ public class TcpServerBackwardCompatDUnitTest extends DistributedTestCase {
           
           Locator.startLocatorAndDS(port0, logFile0, props);
         } catch (IOException e) {
-          fail("Locator1 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("Locator1 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e);
         }
       }
     });
@@ -185,7 +187,7 @@ public class TcpServerBackwardCompatDUnitTest extends DistributedTestCase {
             }
           };
           
-          DistributedTestCase.waitForCriterion(ev, 1000, 200, true);
+          Wait.waitForCriterion(ev, 1000, 200, true);
           fail("this test must be fixed to work with the jgroups replacement");
           // TODO
 //          Vector members = client.getMembers("mygroup1", new IpAddress(InetAddress.getLocalHost(), port0), true, 5000);
@@ -194,7 +196,7 @@ public class TcpServerBackwardCompatDUnitTest extends DistributedTestCase {
 //          Assert.assertTrue(members.contains(new IpAddress(InetAddress.getLocalHost(), port1)));
 
         } catch (IOException e) {
-          fail("Locator1 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("Locator1 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e);
         }
       }
     });
@@ -247,7 +249,7 @@ public class TcpServerBackwardCompatDUnitTest extends DistributedTestCase {
 //          }
 
         } catch (IOException e) {
-          fail("Locator0 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e);
+          com.gemstone.gemfire.test.dunit.Assert.fail("Locator0 start failed with Gossip Version: " + TcpServer.GOSSIPVERSION + "!", e);
         }
       }
     });


[14/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthenticationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthenticationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthenticationDUnitTest.java
index ccdf782..911454a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthenticationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthenticationDUnitTest.java
@@ -32,7 +32,10 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 import security.DummyCredentialGenerator;
 
@@ -81,7 +84,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
     client1 = host.getVM(2);
     client2 = host.getVM(3);
     
-    addExpectedException("Connection refused: connect");
+    IgnoredException.addIgnoredException("Connection refused: connect");
 
     server1.invoke(SecurityTestUtil.class, "registerExpectedExceptions",
         new Object[] { serverExpectedExceptions });
@@ -212,11 +215,11 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       String authenticator = gen.getAuthenticator();
       String authInit = gen.getAuthInit();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testValidCredentials: Using scheme: " + gen.classCode());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testValidCredentials: Using authenticator: " + authenticator);
-      getLogWriter().info("testValidCredentials: Using authinit: " + authInit);
+      LogWriterUtils.getLogWriter().info("testValidCredentials: Using authinit: " + authInit);
 
       // Start the servers
       Integer locPort1 = SecurityTestUtil.getLocatorPort();
@@ -234,12 +237,12 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // Start the clients with valid credentials
       Properties credentials1 = gen.getValidCredentials(1);
       Properties javaProps1 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testValidCredentials: For first client credentials: " + credentials1
               + " : " + javaProps1);
       Properties credentials2 = gen.getValidCredentials(2);
       Properties javaProps2 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testValidCredentials: For second client credentials: "
               + credentials2 + " : " + javaProps2);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -278,11 +281,11 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       String authenticator = gen.getAuthenticator();
       String authInit = gen.getAuthInit();
 
-      getLogWriter()
+      LogWriterUtils.getLogWriter()
           .info("testNoCredentials: Using scheme: " + gen.classCode());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testNoCredentials: Using authenticator: " + authenticator);
-      getLogWriter().info("testNoCredentials: Using authinit: " + authInit);
+      LogWriterUtils.getLogWriter().info("testNoCredentials: Using authinit: " + authInit);
 
       // Start the servers
       Integer locPort1 = SecurityTestUtil.getLocatorPort();
@@ -300,7 +303,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // Start first client with valid credentials
       Properties credentials1 = gen.getValidCredentials(1);
       Properties javaProps1 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testNoCredentials: For first client credentials: " + credentials1
               + " : " + javaProps1);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -343,11 +346,11 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       String authenticator = gen.getAuthenticator();
       String authInit = gen.getAuthInit();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidCredentials: Using scheme: " + gen.classCode());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidCredentials: Using authenticator: " + authenticator);
-      getLogWriter()
+      LogWriterUtils.getLogWriter()
           .info("testInvalidCredentials: Using authinit: " + authInit);
 
       // Start the servers
@@ -366,7 +369,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // Start first client with valid credentials
       Properties credentials1 = gen.getValidCredentials(1);
       Properties javaProps1 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidCredentials: For first client credentials: "
               + credentials1 + " : " + javaProps1);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -382,7 +385,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // exception
       Properties credentials2 = gen.getInvalidCredentials(1);
       Properties javaProps2 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidCredentials: For second client credentials: "
               + credentials2 + " : " + javaProps2);
       client2.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -401,9 +404,9 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       Properties javaProps = gen.getJavaProperties();
       String authenticator = gen.getAuthenticator();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidAuthInit: Using scheme: " + gen.classCode());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidAuthInit: Using authenticator: " + authenticator);
 
       // Start the server
@@ -416,7 +419,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
 
       Properties credentials = gen.getValidCredentials(1);
       javaProps = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidAuthInit: For first client credentials: " + credentials
               + " : " + javaProps);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -437,9 +440,9 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       String authenticator = gen.getAuthenticator();
 
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testNoAuthInitWithCredentials: Using scheme: " + gen.classCode());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testNoAuthInitWithCredentials: Using authenticator: "
               + authenticator);
 
@@ -459,12 +462,12 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // Start the clients with valid credentials
       Properties credentials1 = gen.getValidCredentials(1);
       Properties javaProps1 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testNoAuthInitWithCredentials: For first client credentials: "
               + credentials1 + " : " + javaProps1);
       Properties credentials2 = gen.getValidCredentials(2);
       Properties javaProps2 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testNoAuthInitWithCredentials: For second client credentials: "
               + credentials2 + " : " + javaProps2);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -495,9 +498,9 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       Properties javaProps = gen.getJavaProperties();
       String authInit = gen.getAuthInit();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidAuthenticator: Using scheme: " + gen.classCode());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidAuthenticator: Using authinit: " + authInit);
 
       // Start the server with invalid authenticator
@@ -511,7 +514,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // Trying to create the region on client should throw a security exception
       Properties credentials = gen.getValidCredentials(1);
       javaProps = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidAuthenticator: For first client credentials: "
               + credentials + " : " + javaProps);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -523,7 +526,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // Also test with invalid credentials
       credentials = gen.getInvalidCredentials(1);
       javaProps = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidAuthenticator: For first client credentials: "
               + credentials + " : " + javaProps);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -543,10 +546,10 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       String authenticator = gen.getAuthenticator();
       String authInit = gen.getAuthInit();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testNoAuthenticatorWithCredentials: Using scheme: "
               + gen.classCode());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testNoAuthenticatorWithCredentials: Using authinit: " + authInit);
 
       // Start the servers with no authenticator
@@ -564,12 +567,12 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // valid/invalid credentials when none are required on the server side
       Properties credentials1 = gen.getValidCredentials(3);
       Properties javaProps1 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testNoAuthenticatorWithCredentials: For first client credentials: "
               + credentials1 + " : " + javaProps1);
       Properties credentials2 = gen.getInvalidCredentials(5);
       Properties javaProps2 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testNoAuthenticatorWithCredentials: For second client credentials: "
               + credentials2 + " : " + javaProps2);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -599,11 +602,11 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       String authenticator = gen.getAuthenticator();
       String authInit = gen.getAuthInit();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsWithFailover: Using scheme: " + gen.classCode());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsWithFailover: Using authenticator: " + authenticator);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsWithFailover: Using authinit: " + authInit);
 
       // Start the first server
@@ -622,12 +625,12 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // Start the clients with valid credentials
       Properties credentials1 = gen.getValidCredentials(5);
       Properties javaProps1 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsWithFailover: For first client credentials: "
               + credentials1 + " : " + javaProps1);
       Properties credentials2 = gen.getValidCredentials(6);
       Properties javaProps2 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsWithFailover: For second client credentials: "
               + credentials2 + " : " + javaProps2);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -680,7 +683,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // Verify that the creation of region throws security exception
       credentials1 = gen.getInvalidCredentials(7);
       javaProps1 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsWithFailover: For first client invalid credentials: "
               + credentials1 + " : " + javaProps1);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -708,12 +711,12 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       String authenticator = gen.getAuthenticator();
       String authInit = gen.getAuthInit();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsForNotifications: Using scheme: " + gen.classCode());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsForNotifications: Using authenticator: "
               + authenticator);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsForNotifications: Using authinit: " + authInit);
 
       // Start the first server
@@ -732,12 +735,12 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // Start the clients with valid credentials
       Properties credentials1 = gen.getValidCredentials(3);
       Properties javaProps1 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsForNotifications: For first client credentials: "
               + credentials1 + " : " + javaProps1);
       Properties credentials2 = gen.getValidCredentials(4);
       Properties javaProps2 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsForNotifications: For second client credentials: "
               + credentials2 + " : " + javaProps2);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -768,7 +771,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       server1.invoke(SecurityTestUtil.class, "closeCache");
 
       // Wait for failover to complete
-      pause(500);
+      Wait.pause(500);
 
       // Perform some create/update operations from client1
       client1.invoke(SecurityTestUtil.class, "doNPuts",
@@ -803,7 +806,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
       // Verify that the creation of region throws security exception
       credentials2 = gen.getInvalidCredentials(3);
       javaProps2 = gen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testCredentialsForNotifications: For second client invalid credentials: "
               + credentials2 + " : " + javaProps2);
       client2.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -843,7 +846,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
                 new Integer(SecurityTestUtil.AUTHFAIL_EXCEPTION) });
       }
       else {
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "testCredentialsForNotifications: Skipping invalid authenticator for scheme ["
                 + gen.classCode() + "] which has no authInit");
       }
@@ -874,7 +877,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
                 new Integer(SecurityTestUtil.AUTHREQ_EXCEPTION) });
       }
       else {
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "testCredentialsForNotifications: Skipping null authInit for scheme ["
                 + gen.classCode() + "] which has no authInit");
       }
@@ -934,7 +937,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
             new Object[] { new Integer(4) });
       }
       else {
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "testCredentialsForNotifications: Skipping scheme ["
                 + gen.classCode() + "] which has no authenticator");
       }
@@ -953,9 +956,7 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
   //////////////////////////////////////////////////////////////////////////////
   
   @Override
-  public void tearDown2() throws Exception {
-
-    super.tearDown2();
+  protected final void preTearDown() throws Exception {
     // close the clients first
     client1.invoke(SecurityTestUtil.class, "closeCache");
     client2.invoke(SecurityTestUtil.class, "closeCache");
@@ -963,5 +964,4 @@ public class ClientAuthenticationDUnitTest extends DistributedTestCase {
     server1.invoke(SecurityTestUtil.class, "closeCache");
     server2.invoke(SecurityTestUtil.class, "closeCache");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationDUnitTest.java
index c434a2a..2774e35 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationDUnitTest.java
@@ -34,6 +34,8 @@ import security.XmlAuthzCredentialGenerator;
 import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 import templates.security.UserPasswordAuthInit;
@@ -111,7 +113,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
           fail("executeRIOpBlock: Unknown client number " + clientNum);
           break;
       }
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "executeRIOpBlock: performing operation number ["
               + currentOp.getOpNum() + "]: " + currentOp);
       if ((opFlags & OpFlags.USE_OLDCONN) == 0) {
@@ -159,7 +161,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
                 extraAuthzProps });
         // Start the client with valid credentials but allowed or disallowed to
         // perform an operation
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "executeRIOpBlock: For client" + clientNum + credentialsTypeStr
                 + " credentials: " + opCredentials);
         if (useThisVM) {
@@ -212,10 +214,10 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
       String authInit = cGen.getAuthInit();
       String accessor = gen.getAuthorizationCallback();
 
-      getLogWriter().info("testAllowPutsGets: Using authinit: " + authInit);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info("testAllowPutsGets: Using authinit: " + authInit);
+      LogWriterUtils.getLogWriter().info(
           "testAllowPutsGets: Using authenticator: " + authenticator);
-      getLogWriter().info("testAllowPutsGets: Using accessor: " + accessor);
+      LogWriterUtils.getLogWriter().info("testAllowPutsGets: Using accessor: " + accessor);
 
       // Start servers with all required properties
       Properties serverProps = buildProperties(authenticator, accessor, false,
@@ -232,7 +234,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
           new OperationCode[] { OperationCode.PUT },
           new String[] { regionName }, 1);
       javaProps = cGen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testAllowPutsGets: For first client credentials: "
               + createCredentials);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -244,7 +246,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
           new OperationCode[] { OperationCode.GET },
           new String[] { regionName }, 2);
       javaProps = cGen.getJavaProperties();
-      getLogWriter()
+      LogWriterUtils.getLogWriter()
           .info(
               "testAllowPutsGets: For second client credentials: "
                   + getCredentials);
@@ -272,10 +274,10 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
       String authInit = cGen.getAuthInit();
       String accessor = gen.getAuthorizationCallback();
 
-      getLogWriter().info("testDisallowPutsGets: Using authinit: " + authInit);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info("testDisallowPutsGets: Using authinit: " + authInit);
+      LogWriterUtils.getLogWriter().info(
           "testDisallowPutsGets: Using authenticator: " + authenticator);
-      getLogWriter().info("testDisallowPutsGets: Using accessor: " + accessor);
+      LogWriterUtils.getLogWriter().info("testDisallowPutsGets: Using accessor: " + accessor);
 
       // Check that we indeed can obtain valid credentials not allowed to do
       // gets
@@ -283,7 +285,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
           new OperationCode[] { OperationCode.PUT },
           new String[] { regionName }, 1);
       Properties createJavaProps = cGen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testDisallowPutsGets: For first client credentials: "
               + createCredentials);
       Properties getCredentials = gen.getDisallowedCredentials(
@@ -291,7 +293,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
           new String[] { regionName }, 2);
       Properties getJavaProps = cGen.getJavaProperties();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testDisallowPutsGets: For second client disallowed GET credentials: "
               + getCredentials);
 
@@ -328,7 +330,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
           new OperationCode[] { OperationCode.GET },
           new String[] { regionName }, 5);
       getJavaProps = cGen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testDisallowPutsGets: For second client with GET credentials: "
               + getCredentials);
       client2.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -354,8 +356,8 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
       String authInit = cGen.getAuthInit();
       String accessor = gen.getAuthorizationCallback();
 
-      getLogWriter().info("testInvalidAccessor: Using authinit: " + authInit);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info("testInvalidAccessor: Using authinit: " + authInit);
+      LogWriterUtils.getLogWriter().info(
           "testInvalidAccessor: Using authenticator: " + authenticator);
 
       // Start server1 with invalid accessor
@@ -372,14 +374,14 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
           new OperationCode[] { OperationCode.PUT },
           new String[] { regionName }, 3);
       Properties createJavaProps = cGen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidAccessor: For first client CREATE credentials: "
               + createCredentials);
       Properties getCredentials = gen.getAllowedCredentials(
           new OperationCode[] { OperationCode.GET },
           new String[] { regionName }, 7);
       Properties getJavaProps = cGen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testInvalidAccessor: For second client GET credentials: "
               + getCredentials);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -396,7 +398,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
           new Integer(1), new Integer(SecurityTestUtil.AUTHFAIL_EXCEPTION) });
 
       // Now start server2 that has valid accessor
-      getLogWriter().info("testInvalidAccessor: Using accessor: " + accessor);
+      LogWriterUtils.getLogWriter().info("testInvalidAccessor: Using accessor: " + accessor);
       serverProps = buildProperties(authenticator, accessor, false,
           extraAuthProps, extraAuthzProps);
       server2.invoke(ClientAuthorizationTestBase.class, "createCacheServer",
@@ -431,11 +433,11 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
       String authInit = cGen.getAuthInit();
       String accessor = gen.getAuthorizationCallback();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testPutsGetsWithFailover: Using authinit: " + authInit);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testPutsGetsWithFailover: Using authenticator: " + authenticator);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testPutsGetsWithFailover: Using accessor: " + accessor);
 
       // Start servers with all required properties
@@ -454,7 +456,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
           new OperationCode[] { OperationCode.PUT },
           new String[] { regionName }, 1);
       Properties createJavaProps = cGen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testPutsGetsWithFailover: For first client credentials: "
               + createCredentials);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -466,7 +468,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
           new OperationCode[] { OperationCode.GET },
           new String[] { regionName }, 5);
       Properties getJavaProps = cGen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testPutsGetsWithFailover: For second client credentials: "
               + getCredentials);
       client2.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -499,7 +501,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
           new String[] { regionName }, 9);
       getJavaProps = cGen.getJavaProperties();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testPutsGetsWithFailover: For second client disallowed GET credentials: "
               + noGetCredentials);
 
@@ -624,10 +626,10 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
     String authInit = cGen.getAuthInit();
     String accessor = gen.getAuthorizationCallback();
 
-    getLogWriter().info("testAllOpsWithFailover: Using authinit: " + authInit);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info("testAllOpsWithFailover: Using authinit: " + authInit);
+    LogWriterUtils.getLogWriter().info(
         "testAllOpsWithFailover: Using authenticator: " + authenticator);
-    getLogWriter().info("testAllOpsWithFailover: Using accessor: " + accessor);
+    LogWriterUtils.getLogWriter().info("testAllOpsWithFailover: Using accessor: " + accessor);
 
     // Start servers with all required properties
     Properties serverProps = buildProperties(authenticator, accessor, false,
@@ -679,7 +681,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
 
   
   public void testAllOpsWithFailover() {
-    addExpectedException("Read timed out");
+    IgnoredException.addIgnoredException("Read timed out");
 
     OperationWithAction[] allOps = {
         // Test CREATE and verify with a GET
@@ -783,9 +785,7 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
   // End Region: Tests
 
   @Override
-  public void tearDown2() throws Exception {
-
-    super.tearDown2();
+  protected final void preTearDown() throws Exception {
     // close the clients first
     client1.invoke(SecurityTestUtil.class, "closeCache");
     client2.invoke(SecurityTestUtil.class, "closeCache");
@@ -794,5 +794,4 @@ public class ClientAuthorizationDUnitTest extends ClientAuthorizationTestBase {
     server1.invoke(SecurityTestUtil.class, "closeCache");
     server2.invoke(SecurityTestUtil.class, "closeCache");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationTestBase.java
index 4796203..665867f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientAuthorizationTestBase.java
@@ -59,8 +59,12 @@ import com.gemstone.gemfire.internal.AvailablePort.Keeper;
 import com.gemstone.gemfire.internal.cache.AbstractRegionEntry;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.util.Callable;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import security.DummyCredentialGenerator;
 import security.XmlAuthzCredentialGenerator;
@@ -263,7 +267,7 @@ public class ClientAuthorizationTestBase extends DistributedTestCase {
       policy = InterestResultPolicy.NONE;
     }
     final int numOps = indices.length;
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Got doOp for op: " + op.toString() + ", numOps: " + numOps
             + ", indices: " + indicesToString(indices) + ", expect: " + expectedResult);
     boolean exceptionOccured = false;
@@ -303,7 +307,7 @@ public class ClientAuthorizationTestBase extends DistributedTestCase {
               // server
               if ((flags & OpFlags.CHECK_NOKEY) > 0) {
                 AbstractRegionEntry entry = (AbstractRegionEntry)((LocalRegion)region).getRegionEntry(searchKey);
-                getLogWriter().info(""+keyNum+": key is " + searchKey + " and entry is " + entry);
+                LogWriterUtils.getLogWriter().info(""+keyNum+": key is " + searchKey + " and entry is " + entry);
                 assertFalse(region.containsKey(searchKey));
               }
               else {
@@ -561,7 +565,7 @@ public class ClientAuthorizationTestBase extends DistributedTestCase {
                 return null;
               }
             };
-            DistributedTestCase.waitForCriterion(ev, 3 * 1000, 200, true);
+            Wait.waitForCriterion(ev, 3 * 1000, 200, true);
             if ((flags & OpFlags.CHECK_FAIL) > 0) {
               assertEquals(0, listener.getNumUpdates());
             }
@@ -644,7 +648,7 @@ public class ClientAuthorizationTestBase extends DistributedTestCase {
               }
               catch (RegionDestroyedException ex) {
                 // harmless to ignore this
-                getLogWriter().info(
+                LogWriterUtils.getLogWriter().info(
                     "doOp: sub-region " + region.getFullPath()
                         + " already destroyed");
                 operationOmitted = true;
@@ -671,20 +675,20 @@ public class ClientAuthorizationTestBase extends DistributedTestCase {
             || ex instanceof QueryInvocationTargetException || ex instanceof CqException)
             && (expectedResult.intValue() == SecurityTestUtil.NOTAUTHZ_EXCEPTION)
             && (ex.getCause() instanceof NotAuthorizedException)) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "doOp: Got expected NotAuthorizedException when doing operation ["
                   + op + "] with flags " + OpFlags.description(flags) 
                   + ": " + ex.getCause());
           continue;
         }
         else if (expectedResult.intValue() == SecurityTestUtil.OTHER_EXCEPTION) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "doOp: Got expected exception when doing operation: "
                   + ex.toString());
           continue;
         }
         else {
-          fail("doOp: Got unexpected exception when doing operation. Policy = " 
+          Assert.fail("doOp: Got unexpected exception when doing operation. Policy = " 
               + policy + " flags = " + OpFlags.description(flags), ex);
         }
       }
@@ -724,7 +728,7 @@ public class ClientAuthorizationTestBase extends DistributedTestCase {
           fail("executeOpBlock: Unknown client number " + clientNum);
           break;
       }
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "executeOpBlock: performing operation number ["
               + currentOp.getOpNum() + "]: " + currentOp);
       if ((opFlags & OpFlags.USE_OLDCONN) == 0) {
@@ -760,7 +764,7 @@ public class ClientAuthorizationTestBase extends DistributedTestCase {
                 extraAuthzProps });
         // Start the client with valid credentials but allowed or disallowed to
         // perform an operation
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "executeOpBlock: For client" + clientNum + credentialsTypeStr
                 + " credentials: " + opCredentials);
         boolean setupDynamicRegionFactory = (opFlags & OpFlags.ENABLE_DRF) > 0;
@@ -844,9 +848,9 @@ public class ClientAuthorizationTestBase extends DistributedTestCase {
       String accessor = gen.getAuthorizationCallback();
       TestAuthzCredentialGenerator tgen = new TestAuthzCredentialGenerator(gen);
 
-      getLogWriter().info(testName + ": Using authinit: " + authInit);
-      getLogWriter().info(testName + ": Using authenticator: " + authenticator);
-      getLogWriter().info(testName + ": Using accessor: " + accessor);
+      LogWriterUtils.getLogWriter().info(testName + ": Using authinit: " + authInit);
+      LogWriterUtils.getLogWriter().info(testName + ": Using authenticator: " + authenticator);
+      LogWriterUtils.getLogWriter().info(testName + ": Using accessor: " + accessor);
 
       // Start servers with all required properties
       Properties serverProps = buildProperties(authenticator, accessor, false,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientMultiUserAuthzDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientMultiUserAuthzDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientMultiUserAuthzDUnitTest.java
index 9120a1f..f175d98 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientMultiUserAuthzDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/security/ClientMultiUserAuthzDUnitTest.java
@@ -35,6 +35,7 @@ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.execute.PRClientServerTestBase;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 
 public class ClientMultiUserAuthzDUnitTest extends ClientAuthorizationTestBase {
 
@@ -75,10 +76,10 @@ public class ClientMultiUserAuthzDUnitTest extends ClientAuthorizationTestBase {
       String authInit = cGen.getAuthInit();
       String accessor = gen.getAuthorizationCallback();
 
-      getLogWriter().info("testOps1: Using authinit: " + authInit);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info("testOps1: Using authinit: " + authInit);
+      LogWriterUtils.getLogWriter().info(
           "testOps1: Using authenticator: " + authenticator);
-      getLogWriter().info("testOps1: Using accessor: " + accessor);
+      LogWriterUtils.getLogWriter().info("testOps1: Using accessor: " + accessor);
 
       // Start servers with all required properties
       Properties serverProps = buildProperties(authenticator, accessor, false,
@@ -149,21 +150,21 @@ public class ClientMultiUserAuthzDUnitTest extends ClientAuthorizationTestBase {
         gen.getDisallowedCredentials(new OperationCode[] {client1OpCodes[1]},
             new String[] {regionName}, 1)};
     if (client1Credentials[0] == null || client1Credentials[0].size() == 0) {
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testOps1: Unable to obtain valid credentials with "
               + client1OpCodes[0].toString()
               + " permission; skipping this combination.");
       return false;
     }
     if (client1Credentials[1] == null || client1Credentials[1].size() == 0) {
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testOps1: Unable to obtain valid credentials with no "
               + client1OpCodes[0].toString()
               + " permission; skipping this combination.");
       return false;
     }
     javaProps = cGen.getJavaProperties();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testOps1: For first client credentials: " + client1Credentials[0]
             + "\n" + client1Credentials[1]);
     client1.invoke(SecurityTestUtil.class, "createCacheClientForMultiUserMode",
@@ -178,21 +179,21 @@ public class ClientMultiUserAuthzDUnitTest extends ClientAuthorizationTestBase {
         gen.getDisallowedCredentials(client2OpCodes,
             new String[] {regionName}, 9)};
     if (client2Credentials[0] == null || client2Credentials[0].size() == 0) {
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testOps1: Unable to obtain valid credentials with "
               + client2OpCodes[0].toString()
               + " permission; skipping this combination.");
       return false;
     }
     if (client2Credentials[1] == null || client2Credentials[1].size() == 0) {
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testOps1: Unable to obtain valid credentials with no "
               + client2OpCodes[0].toString()
               + " permission; skipping this combination.");
       return false;
     }
     javaProps = cGen.getJavaProperties();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testOps1: For second client credentials: " + client2Credentials[0]
             + "\n" + client2Credentials[1]);
     if (bothClientsInMultiuserMode) {
@@ -354,9 +355,9 @@ public class ClientMultiUserAuthzDUnitTest extends ClientAuthorizationTestBase {
       String authInit = cGen.getAuthInit();
       String accessor = gen.getAuthorizationCallback();
 
-      getLogWriter().info("testOps2: Using authinit: " + authInit);
-      getLogWriter().info("testOps2: Using authenticator: " + authenticator);
-      getLogWriter().info("testOps2: Using accessor: " + accessor);
+      LogWriterUtils.getLogWriter().info("testOps2: Using authinit: " + authInit);
+      LogWriterUtils.getLogWriter().info("testOps2: Using authenticator: " + authenticator);
+      LogWriterUtils.getLogWriter().info("testOps2: Using accessor: " + accessor);
 
       // Start servers with all required properties
       Properties serverProps = buildProperties(authenticator, accessor, false,
@@ -383,7 +384,7 @@ public class ClientMultiUserAuthzDUnitTest extends ClientAuthorizationTestBase {
       };
 
       javaProps = cGen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testOps2: For first client credentials: " + client1Credentials[0]
               + "\n" + client1Credentials[1]);
       client1.invoke(SecurityTestUtil.class,
@@ -400,7 +401,7 @@ public class ClientMultiUserAuthzDUnitTest extends ClientAuthorizationTestBase {
               new String[] {regionName}, 9)};
 
       javaProps = cGen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testOps2: For second client credentials: " + client2Credentials[0]
               + "\n" + client2Credentials[1]);
       client2.invoke(SecurityTestUtil.class,
@@ -484,10 +485,10 @@ public class ClientMultiUserAuthzDUnitTest extends ClientAuthorizationTestBase {
       String authInit = cGen.getAuthInit();
       String accessor = gen.getAuthorizationCallback();
 
-      getLogWriter().info("testOpsWithClientsInDifferentModes: Using authinit: " + authInit);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info("testOpsWithClientsInDifferentModes: Using authinit: " + authInit);
+      LogWriterUtils.getLogWriter().info(
           "testOpsWithClientsInDifferentModes: Using authenticator: " + authenticator);
-      getLogWriter().info("testOpsWithClientsInDifferentModes: Using accessor: " + accessor);
+      LogWriterUtils.getLogWriter().info("testOpsWithClientsInDifferentModes: Using accessor: " + accessor);
 
       // Start servers with all required properties
       Properties serverProps = buildProperties(authenticator, accessor, false,
@@ -521,9 +522,8 @@ public class ClientMultiUserAuthzDUnitTest extends ClientAuthorizationTestBase {
 
   // End Region: Tests
 
-  public void tearDown2() throws Exception {
-
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     client1.invoke(SecurityTestUtil.class, "closeCache");
     client2.invoke(SecurityTestUtil.class, "closeCache");
@@ -532,5 +532,4 @@ public class ClientMultiUserAuthzDUnitTest extends ClientAuthorizationTestBase {
     server1.invoke(SecurityTestUtil.class, "closeCache");
     server2.invoke(SecurityTestUtil.class, "closeCache");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/security/DeltaClientAuthorizationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/security/DeltaClientAuthorizationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/security/DeltaClientAuthorizationDUnitTest.java
index 42fe897..2b44631 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/security/DeltaClientAuthorizationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/security/DeltaClientAuthorizationDUnitTest.java
@@ -32,7 +32,9 @@ import com.gemstone.gemfire.cache.client.NoAvailableServersException;
 import com.gemstone.gemfire.cache.client.ServerConnectivityException;
 import com.gemstone.gemfire.cache.operations.OperationContext.OperationCode;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionLocalMaxMemoryDUnitTest.TestObject1;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 
 /**
  * @since 6.1
@@ -113,8 +115,8 @@ public class DeltaClientAuthorizationDUnitTest extends
     SecurityTestUtil.registerExpectedExceptions(clientExpectedExceptions);
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     client1.invoke(SecurityTestUtil.class, "closeCache");
     client2.invoke(SecurityTestUtil.class, "closeCache");
@@ -134,10 +136,10 @@ public class DeltaClientAuthorizationDUnitTest extends
       String authInit = cGen.getAuthInit();
       String accessor = gen.getAuthorizationCallback();
 
-      getLogWriter().info("testAllowPutsGets: Using authinit: " + authInit);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info("testAllowPutsGets: Using authinit: " + authInit);
+      LogWriterUtils.getLogWriter().info(
           "testAllowPutsGets: Using authenticator: " + authenticator);
-      getLogWriter().info("testAllowPutsGets: Using accessor: " + accessor);
+      LogWriterUtils.getLogWriter().info("testAllowPutsGets: Using accessor: " + accessor);
 
       // Start servers with all required properties
       Properties serverProps = buildProperties(authenticator, accessor, false,
@@ -154,7 +156,7 @@ public class DeltaClientAuthorizationDUnitTest extends
           new OperationCode[] { OperationCode.PUT },
           new String[] { regionName }, 1);
       javaProps = cGen.getJavaProperties();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testAllowPutsGets: For first client credentials: "
               + createCredentials);
       client1.invoke(ClientAuthenticationDUnitTest.class, "createCacheClient",
@@ -166,7 +168,7 @@ public class DeltaClientAuthorizationDUnitTest extends
           new OperationCode[] { OperationCode.GET },
           new String[] { regionName }, 2);
       javaProps = cGen.getJavaProperties();
-      getLogWriter()
+      LogWriterUtils.getLogWriter()
           .info(
               "testAllowPutsGets: For second client credentials: "
                   + getCredentials);
@@ -196,10 +198,10 @@ public class DeltaClientAuthorizationDUnitTest extends
     }
     catch (Exception ex) {
       if (expectedResult.intValue() == SecurityTestUtil.OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing puts: " + ex);
+        LogWriterUtils.getLogWriter().info("Got expected exception when doing puts: " + ex);
       }
       else {
-        fail("Got unexpected exception when doing puts", ex);
+        Assert.fail("Got unexpected exception when doing puts", ex);
       }
     }
     for (int index = 0; index < num.intValue(); ++index) {
@@ -214,50 +216,50 @@ public class DeltaClientAuthorizationDUnitTest extends
       }
       catch (NoAvailableServersException ex) {
         if (expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Got expected NoAvailableServers when doing puts: "
                   + ex.getCause());
           continue;
         }
         else {
-          fail("Got unexpected exception when doing puts", ex);
+          Assert.fail("Got unexpected exception when doing puts", ex);
         }
       }
       catch (ServerConnectivityException ex) {
         if ((expectedResult.intValue() == SecurityTestUtil.NOTAUTHZ_EXCEPTION)
             && (ex.getCause() instanceof NotAuthorizedException)) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Got expected NotAuthorizedException when doing puts: "
                   + ex.getCause());
           continue;
         }
         if ((expectedResult.intValue() == SecurityTestUtil.AUTHREQ_EXCEPTION)
             && (ex.getCause() instanceof AuthenticationRequiredException)) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Got expected AuthenticationRequiredException when doing puts: "
                   + ex.getCause());
           continue;
         }
         if ((expectedResult.intValue() == SecurityTestUtil.AUTHFAIL_EXCEPTION)
             && (ex.getCause() instanceof AuthenticationFailedException)) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Got expected AuthenticationFailedException when doing puts: "
                   + ex.getCause());
           continue;
         }
         else if (expectedResult.intValue() == SecurityTestUtil.OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing puts: " + ex);
+          LogWriterUtils.getLogWriter().info("Got expected exception when doing puts: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing puts", ex);
+          Assert.fail("Got unexpected exception when doing puts", ex);
         }
       }
       catch (Exception ex) {
         if (expectedResult.intValue() == SecurityTestUtil.OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing puts: " + ex);
+          LogWriterUtils.getLogWriter().info("Got expected exception when doing puts: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing puts", ex);
+          Assert.fail("Got unexpected exception when doing puts", ex);
         }
       }
     }
@@ -274,10 +276,10 @@ public class DeltaClientAuthorizationDUnitTest extends
     }
     catch (Exception ex) {
       if (expectedResult.intValue() == SecurityTestUtil.OTHER_EXCEPTION) {
-        getLogWriter().info("Got expected exception when doing gets: " + ex);
+        LogWriterUtils.getLogWriter().info("Got expected exception when doing gets: " + ex);
       }
       else {
-        fail("Got unexpected exception when doing gets", ex);
+        Assert.fail("Got unexpected exception when doing gets", ex);
       }
     }
     for (int index = 0; index < num.intValue(); ++index) {
@@ -295,36 +297,36 @@ public class DeltaClientAuthorizationDUnitTest extends
       }
       catch(NoAvailableServersException ex) {
         if(expectedResult.intValue() == SecurityTestUtil.NO_AVAILABLE_SERVERS) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Got expected NoAvailableServers when doing puts: "
               + ex.getCause());
           continue;
         }
         else {
-          fail("Got unexpected exception when doing puts", ex);
+          Assert.fail("Got unexpected exception when doing puts", ex);
         }
       }
       catch (ServerConnectivityException ex) {
         if ((expectedResult.intValue() == SecurityTestUtil.NOTAUTHZ_EXCEPTION)
             && (ex.getCause() instanceof NotAuthorizedException)) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Got expected NotAuthorizedException when doing gets: "
                   + ex.getCause());
           continue;
         }
         else if (expectedResult.intValue() == SecurityTestUtil.OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing gets: " + ex);
+          LogWriterUtils.getLogWriter().info("Got expected exception when doing gets: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing gets", ex);
+          Assert.fail("Got unexpected exception when doing gets", ex);
         }
       }
       catch (Exception ex) {
         if (expectedResult.intValue() == SecurityTestUtil.OTHER_EXCEPTION) {
-          getLogWriter().info("Got expected exception when doing gets: " + ex);
+          LogWriterUtils.getLogWriter().info("Got expected exception when doing gets: " + ex);
         }
         else {
-          fail("Got unexpected exception when doing gets", ex);
+          Assert.fail("Got unexpected exception when doing gets", ex);
         }
       }
       assertNotNull(value);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/security/DeltaClientPostAuthorizationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/security/DeltaClientPostAuthorizationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/security/DeltaClientPostAuthorizationDUnitTest.java
index 94b603e..fe3cec6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/security/DeltaClientPostAuthorizationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/security/DeltaClientPostAuthorizationDUnitTest.java
@@ -40,7 +40,10 @@ import com.gemstone.gemfire.cache.query.CqException;
 import com.gemstone.gemfire.cache.query.QueryInvocationTargetException;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.util.Callable;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -74,9 +77,8 @@ public class DeltaClientPostAuthorizationDUnitTest extends
     SecurityTestUtil.registerExpectedExceptions(clientExpectedExceptions);
   }
 
-  public void tearDown2() throws Exception {
-
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     client1.invoke(SecurityTestUtil.class, "closeCache");
     client2.invoke(SecurityTestUtil.class, "closeCache");
@@ -87,8 +89,8 @@ public class DeltaClientPostAuthorizationDUnitTest extends
   }
 
   public void testPutPostOpNotifications() throws Exception {
-    addExpectedException("Unexpected IOException");
-    addExpectedException("SocketException");
+    IgnoredException.addIgnoredException("Unexpected IOException");
+    IgnoredException.addIgnoredException("SocketException");
 
     OperationWithAction[] allOps = {
         // Test CREATE and verify with a GET
@@ -134,11 +136,11 @@ public class DeltaClientPostAuthorizationDUnitTest extends
       String accessor = gen.getAuthorizationCallback();
       TestAuthzCredentialGenerator tgen = new TestAuthzCredentialGenerator(gen);
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testAllOpsNotifications: Using authinit: " + authInit);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testAllOpsNotifications: Using authenticator: " + authenticator);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "testAllOpsNotifications: Using accessor: " + accessor);
 
       // Start servers with all required properties
@@ -217,7 +219,7 @@ public class DeltaClientPostAuthorizationDUnitTest extends
           fail("executeOpBlock: Unknown client number " + clientNum);
           break;
       }
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "executeOpBlock: performing operation number ["
               + currentOp.getOpNum() + "]: " + currentOp);
       if ((opFlags & OpFlags.USE_OLDCONN) == 0) {
@@ -253,7 +255,7 @@ public class DeltaClientPostAuthorizationDUnitTest extends
                 extraAuthzProps });
         // Start the client with valid credentials but allowed or disallowed to
         // perform an operation
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "executeOpBlock: For client" + clientNum + credentialsTypeStr
                 + " credentials: " + opCredentials);
         boolean setupDynamicRegionFactory = (opFlags & OpFlags.ENABLE_DRF) > 0;
@@ -374,7 +376,7 @@ public class DeltaClientPostAuthorizationDUnitTest extends
       policy = InterestResultPolicy.NONE;
     }
     final int numOps = indices.length;
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Got doOp for op: " + op.toString() + ", numOps: " + numOps
             + ", indices: " + indicesToString(indices) + ", expect: " + expectedResult);
     boolean exceptionOccured = false;
@@ -512,20 +514,20 @@ public class DeltaClientPostAuthorizationDUnitTest extends
             || ex instanceof QueryInvocationTargetException || ex instanceof CqException)
             && (expectedResult.intValue() == SecurityTestUtil.NOTAUTHZ_EXCEPTION)
             && (ex.getCause() instanceof NotAuthorizedException)) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "doOp: Got expected NotAuthorizedException when doing operation ["
                   + op + "] with flags " + OpFlags.description(flags) 
                   + ": " + ex.getCause());
           continue;
         }
         else if (expectedResult.intValue() == SecurityTestUtil.OTHER_EXCEPTION) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "doOp: Got expected exception when doing operation: "
                   + ex.toString());
           continue;
         }
         else {
-          fail("doOp: Got unexpected exception when doing operation. Policy = " 
+          Assert.fail("doOp: Got unexpected exception when doing operation. Policy = " 
               + policy + " flags = " + OpFlags.description(flags), ex);
         }
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/security/P2PAuthenticationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/security/P2PAuthenticationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/security/P2PAuthenticationDUnitTest.java
index 5ba7044..07bd7c7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/security/P2PAuthenticationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/security/P2PAuthenticationDUnitTest.java
@@ -40,7 +40,11 @@ import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManage
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Tests peer to peer authentication in Gemfire
@@ -94,7 +98,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
     Properties props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "26753");
     props.setProperty(DistributionConfig.LOCATORS_NAME, 
-                      DistributedTestCase.getIPLiteral() + "[" + port + "]");
+                      NetworkUtils.getIPLiteral() + "[" + port + "]");
     props.setProperty(DistributionConfig.SECURITY_PEER_AUTH_INIT_NAME,
         "templates.security.UserPasswordAuthInit.create");
     props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
@@ -111,7 +115,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
     props = new Properties();
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "26753");
     props.setProperty(DistributionConfig.LOCATORS_NAME, 
-                      DistributedTestCase.getIPLiteral() +"[" + port + "]");
+                      NetworkUtils.getIPLiteral() +"[" + port + "]");
     props.setProperty(DistributionConfig.SECURITY_PEER_AUTHENTICATOR_NAME,
         "templates.security.LdapUserAuthenticator.create");
     props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "false");
@@ -162,7 +166,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
     }
     String authInit = " Incorrect_AuthInitialize";
     int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String locators = DistributedTestCase.getIPLiteral() + "[" + port + "]";
+    final String locators = NetworkUtils.getIPLiteral() + "[" + port + "]";
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, locators);
     setProperty(props, DistributionConfig.SECURITY_PEER_AUTH_INIT_NAME,
@@ -173,7 +177,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
             getUniqueName(), new Integer(port), props, javaProps,
             expectedExceptions});
 
-    LogWriter dsLogger = createLogWriter(props);
+    LogWriter dsLogger = LogWriterUtils.createLogWriter(props);
     SecurityTestUtil.addExpectedExceptions(expectedExceptions, dsLogger);
     try {
       new SecurityTestUtil("tmp").createSystem(props, null);
@@ -200,7 +204,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
       props = new Properties();
     }
     int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String locators = DistributedTestCase.getIPLiteral() +"["+port+"]";
+    final String locators = NetworkUtils.getIPLiteral() +"["+port+"]";
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, locators);
     setProperty(props, DistributionConfig.SECURITY_PEER_AUTH_INIT_NAME,
@@ -211,7 +215,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
             getUniqueName(), new Integer(port), props, javaProps,
             expectedExceptions });
 
-    LogWriter dsLogger = createLogWriter(props);
+    LogWriter dsLogger = LogWriterUtils.createLogWriter(props);
     SecurityTestUtil.addExpectedExceptions(expectedExceptions, dsLogger);
     try {
       new SecurityTestUtil("tmp").createSystem(props, javaProps);
@@ -240,7 +244,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
       props = new Properties();
     }
     int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String locators = DistributedTestCase.getIPLiteral() +"["+port+"]";
+    final String locators = NetworkUtils.getIPLiteral() +"["+port+"]";
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, locators);
     setProperty(props, DistributionConfig.SECURITY_PEER_AUTH_INIT_NAME,
@@ -251,7 +255,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
             getUniqueName(), new Integer(port), props, javaProps,
             expectedExceptions });
 
-    LogWriter dsLogger = createLogWriter(props);
+    LogWriter dsLogger = LogWriterUtils.createLogWriter(props);
     SecurityTestUtil.addExpectedExceptions(expectedExceptions, dsLogger);
     try {
       new SecurityTestUtil("tmp").createSystem(props, null);
@@ -278,7 +282,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
       props = new Properties();
     }
     int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String locators = DistributedTestCase.getIPLiteral() +"["+port+"]";
+    final String locators = NetworkUtils.getIPLiteral() +"["+port+"]";
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, locators);
     setProperty(props, DistributionConfig.SECURITY_PEER_AUTH_INIT_NAME,
@@ -306,7 +310,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
       throws Exception {
 
     disconnectAllFromDS();
-    addExpectedException("Authentication failed");
+    IgnoredException.addIgnoredException("Authentication failed");
 
     CredentialGenerator gen = new DummyCredentialGenerator();
     Properties props = gen.getSystemProperties();
@@ -316,7 +320,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
       props = new Properties();
     }
     int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String locators = DistributedTestCase.getIPLiteral() +"["+port+"]";
+    final String locators = NetworkUtils.getIPLiteral() +"["+port+"]";
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     props.setProperty(DistributionConfig.LOCATORS_NAME, locators);
     setProperty(props, DistributionConfig.SECURITY_PEER_AUTH_INIT_NAME,
@@ -336,7 +340,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
       javaProps = gen.getJavaProperties();
       props.putAll(credentials);
 
-      LogWriter dsLogger = createLogWriter(props);
+      LogWriter dsLogger = LogWriterUtils.createLogWriter(props);
       SecurityTestUtil.addExpectedExceptions(expectedExceptions, dsLogger);
       try {
         new SecurityTestUtil("tmp").createSystem(props, javaProps);
@@ -398,7 +402,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
     // Start the locator with the LDAP authenticator
     Properties props = new Properties();
     int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String locators = DistributedTestCase.getIPLiteral() +"["+port+"]";
+    final String locators = NetworkUtils.getIPLiteral() +"["+port+"]";
     setProperty(props, DistributionConfig.SECURITY_PEER_AUTH_INIT_NAME,
         authInit);
     setProperty(props, DistributionConfig.SECURITY_PEER_AUTHENTICATOR_NAME,
@@ -446,7 +450,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
         props, javaProps });
 
     // wait for view propagation
-    pause(2000);
+    Wait.pause(2000);
     // Verify the number of members on all peers and locator
     locatorVM.invoke(P2PAuthenticationDUnitTest.class, "verifyMembers",
         new Object[] { new Integer(4) });
@@ -458,7 +462,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
 
     // Disconnect the first peer and check again
     disconnectFromDS();
-    pause(2000);
+    Wait.pause(2000);
     locatorVM.invoke(P2PAuthenticationDUnitTest.class, "verifyMembers",
         new Object[] { new Integer(3) });
     peer2.invoke(P2PAuthenticationDUnitTest.class, "verifyMembers",
@@ -468,7 +472,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
 
     // Disconnect the second peer and check again
     peer2.invoke(DistributedTestCase.class, "disconnectFromDS");
-    pause(2000);
+    Wait.pause(2000);
     locatorVM.invoke(P2PAuthenticationDUnitTest.class, "verifyMembers",
         new Object[] { new Integer(2) });
     peer3.invoke(P2PAuthenticationDUnitTest.class, "verifyMembers",
@@ -476,7 +480,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
 
     // Same for last peer
     peer3.invoke(DistributedTestCase.class, "disconnectFromDS");
-    pause(2000);
+    Wait.pause(2000);
     locatorVM.invoke(P2PAuthenticationDUnitTest.class, "verifyMembers",
         new Object[] { new Integer(1) });
 
@@ -512,7 +516,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
     // Start the locator with the Dummy authenticator
     Properties props = new Properties();
     int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String locators = DistributedTestCase.getIPLiteral() +"["+port+"]";
+    final String locators = NetworkUtils.getIPLiteral() +"["+port+"]";
     setProperty(props, DistributionConfig.SECURITY_PEER_AUTH_INIT_NAME,
         authInit);
     setProperty(props, DistributionConfig.SECURITY_PEER_AUTHENTICATOR_NAME,
@@ -548,7 +552,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
     props.putAll(credentials);
     props.putAll(extraProps);
 
-    LogWriter dsLogger = createLogWriter(props);
+    LogWriter dsLogger = LogWriterUtils.createLogWriter(props);
     SecurityTestUtil.addExpectedExceptions(
         new String[] { IllegalArgumentException.class.getName() }, dsLogger);
     try {
@@ -580,7 +584,7 @@ public class P2PAuthenticationDUnitTest extends DistributedTestCase {
         props, javaProps });
 
     // wait for view propagation
-    pause(2000);
+    Wait.pause(2000);
     // Verify the number of members on all peers and locator
     locatorVM.invoke(P2PAuthenticationDUnitTest.class, "verifyMembers",
         new Object[] { new Integer(4) });


[35/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerGetAllDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerGetAllDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerGetAllDUnitTest.java
index b34b115..f7f633c 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerGetAllDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerGetAllDUnitTest.java
@@ -40,8 +40,12 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -57,8 +61,7 @@ import com.gemstone.gemfire.test.dunit.VM;
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     disconnectAllFromDS();
   }
 
@@ -69,7 +72,7 @@ import com.gemstone.gemfire.test.dunit.VM;
     final String regionName = getUniqueName();
     final int mcastPort = 0; /* loner is ok for this test*/ //AvailablePort.getRandomAvailablePort(AvailablePort.JGROUPS);
     final int serverPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server.getHost());
 
     createBridgeServer(server, regionName, serverPort, false, false);
 
@@ -120,7 +123,7 @@ import com.gemstone.gemfire.test.dunit.VM;
     final VM client = host.getVM(1);
     final String regionName = getUniqueName();
     final int serverPort = AvailablePortHelper.getRandomAvailableTCPPort();
-    final String serverHost = getServerHostName(server.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server.getHost());
 
     createBridgeServer(server, regionName, serverPort, false, false, true/*offheap*/);
 
@@ -172,7 +175,7 @@ import com.gemstone.gemfire.test.dunit.VM;
     final VM client = host.getVM(1);
     final String regionName = getUniqueName();
     final int serverPort = AvailablePortHelper.getRandomAvailableTCPPort();
-    final String serverHost = getServerHostName(server.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server.getHost());
 
     createBridgeServer(server, regionName, serverPort, false, false, true/*offheap*/);
 
@@ -284,7 +287,7 @@ import com.gemstone.gemfire.test.dunit.VM;
     final String regionName = getUniqueName();
     final int mcastPort = 0; /* loner is ok for this test*/ //AvailablePort.getRandomAvailablePort(AvailablePort.JGROUPS);
     final int serverPort = AvailablePortHelper.getRandomAvailableTCPPort();
-    final String serverHost = getServerHostName(server.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server.getHost());
 
     createBridgeServer(server, regionName, serverPort, false, false);
 
@@ -395,7 +398,7 @@ import com.gemstone.gemfire.test.dunit.VM;
     final VM client = host.getVM(1);
     final String regionName = getUniqueName();
     final int serverPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server.getHost());
 
     createBridgeServer(server, regionName, serverPort, false, true);
 
@@ -457,7 +460,7 @@ import com.gemstone.gemfire.test.dunit.VM;
     int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     final int server1Port = ports[0];
     final int server2Port = ports[1];
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     createBridgeServer(server1, regionName, server1Port, true, false);
 
@@ -522,7 +525,7 @@ import com.gemstone.gemfire.test.dunit.VM;
     final VM client = host.getVM(1);
     final String regionName = getUniqueName();
     final int serverPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server.getHost());
 
     createBridgeServer(server, regionName, serverPort, false, false);
 
@@ -575,7 +578,7 @@ import com.gemstone.gemfire.test.dunit.VM;
     });
 
     // client may see "server unreachable" exceptions after this
-    addExpectedException("Server unreachable", client);
+    IgnoredException.addIgnoredException("Server unreachable", client);
     stopBridgeServer(server);
   }
   
@@ -585,7 +588,7 @@ import com.gemstone.gemfire.test.dunit.VM;
     final VM client = host.getVM(1);
     final String regionName = getUniqueName();
     final int serverPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String serverHost = getServerHostName(server.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server.getHost());
     final int numLocalValues = 101;
     
     createBridgeServerWithoutLoader(server, regionName, serverPort, false);
@@ -666,7 +669,7 @@ import com.gemstone.gemfire.test.dunit.VM;
       public void run2() throws CacheException {
         // Create DS
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         if (offheap) {
           config.setProperty(DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME, "350m");
         }
@@ -701,7 +704,7 @@ import com.gemstone.gemfire.test.dunit.VM;
           bridge.setMaxThreads(offheap ? 16 : getMaxThreads());
           bridge.start();
         } catch (Exception e) {
-          fail("While starting CacheServer", e);
+          Assert.fail("While starting CacheServer", e);
         }
       }
     });
@@ -730,7 +733,7 @@ import com.gemstone.gemfire.test.dunit.VM;
       public void run2() throws CacheException {
         // Create DS
         Properties config = new Properties();
-        config.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+        config.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
         getSystem(config);
 
         // Create Region
@@ -750,7 +753,7 @@ import com.gemstone.gemfire.test.dunit.VM;
           startBridgeServer(serverPort);
           System.out.println("Started bridger server ");
         } catch (Exception e) {
-          fail("While starting CacheServer", e);
+          Assert.fail("While starting CacheServer", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerInvalidAndDestroyedEntryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerInvalidAndDestroyedEntryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerInvalidAndDestroyedEntryDUnitTest.java
index a733753..e5ec301 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerInvalidAndDestroyedEntryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerInvalidAndDestroyedEntryDUnitTest.java
@@ -39,7 +39,9 @@ import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.tier.InterestType;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableCallableIF;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
@@ -141,7 +143,7 @@ public class ClientServerInvalidAndDestroyedEntryDUnitTest extends CacheTestCase
             server.start();
           }
           catch (IOException e) {
-            fail("Failed to start server ", e);
+            Assert.fail("Failed to start server ", e);
           }
         }
         if (usePR) {
@@ -190,10 +192,10 @@ public class ClientServerInvalidAndDestroyedEntryDUnitTest extends CacheTestCase
         myRegion.invalidate(key2);
       }
     });
-    getLogWriter().info("creating client cache");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("creating client cache");
     ClientCache c = new ClientCacheFactory()
                     .addPoolServer("localhost", serverPort)
-                    .set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel())
+                    .set(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel())
                     .create();
     Region myRegion = c.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);;
     if (useTX) {
@@ -204,7 +206,7 @@ public class ClientServerInvalidAndDestroyedEntryDUnitTest extends CacheTestCase
     assertNotNull(myRegion.get(notAffectedKey));
     
     // get of an invalid entry should return null and create the entry in an invalid state
-    getLogWriter().info("getting "+key1+" - should reach this cache and be INVALID");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("getting "+key1+" - should reach this cache and be INVALID");
     assertNull(myRegion.get(key1));
     assertTrue(myRegion.containsKey(key1));
     
@@ -261,7 +263,7 @@ public class ClientServerInvalidAndDestroyedEntryDUnitTest extends CacheTestCase
     // test that a listener is not invoked when there is already an invalidated
     // entry in the client cache
     UpdateListener listener = new UpdateListener();
-    listener.log = getLogWriter();
+    listener.log = com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter();
     myRegion.getAttributesMutator().addCacheListener(listener);
     myRegion.get(key1);
     assertEquals("expected no cache listener invocations",
@@ -308,10 +310,10 @@ public class ClientServerInvalidAndDestroyedEntryDUnitTest extends CacheTestCase
         myRegion.destroy(key2);
       }
     });
-    getLogWriter().info("creating client cache");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("creating client cache");
     ClientCache c = new ClientCacheFactory()
                     .addPoolServer("localhost", serverPort)
-                    .set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel())
+                    .set(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel())
                     .create();
     Region myRegion = c.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);;
     if (useTX) {
@@ -320,7 +322,7 @@ public class ClientServerInvalidAndDestroyedEntryDUnitTest extends CacheTestCase
     // get of a valid entry should work
     assertNotNull(myRegion.get(notAffectedKey));
     // get of an invalid entry should return null and create the entry in an invalid state
-    getLogWriter().info("getting "+key1+" - should reach this cache and be a TOMBSTONE");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("getting "+key1+" - should reach this cache and be a TOMBSTONE");
     assertNull(myRegion.get(key1));
     assertFalse(myRegion.containsKey(key1));
     RegionEntry entry;
@@ -373,7 +375,7 @@ public class ClientServerInvalidAndDestroyedEntryDUnitTest extends CacheTestCase
     keys.add(notAffectedKey); keys.add(key1); keys.add(key2);
     Map result = myRegion.getAll(keys);
     
-    getLogWriter().info("result of getAll = " + result);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("result of getAll = " + result);
     assertNotNull(result.get(notAffectedKey));
     assertNull(result.get(key1));
     assertNull(result.get(key2));
@@ -430,10 +432,10 @@ public class ClientServerInvalidAndDestroyedEntryDUnitTest extends CacheTestCase
         }
       }
     });
-    getLogWriter().info("creating client cache");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("creating client cache");
     ClientCache c = new ClientCacheFactory()
                     .addPoolServer("localhost", serverPort)
-                    .set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel())
+                    .set(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel())
                     .setPoolSubscriptionEnabled(true)
                     .create();
     
@@ -456,7 +458,7 @@ public class ClientServerInvalidAndDestroyedEntryDUnitTest extends CacheTestCase
           BucketRegion bucket = ((PartitionedRegion)myRegion).getBucketRegion(key10);
           if (bucket != null) {
             event.setRegion(bucket);
-            getLogWriter().info("performing local destroy in " + bucket + " ccEnabled="+bucket.concurrencyChecksEnabled + " rvv="+bucket.getVersionVector());
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("performing local destroy in " + bucket + " ccEnabled="+bucket.concurrencyChecksEnabled + " rvv="+bucket.getVersionVector());
             bucket.concurrencyChecksEnabled = false; // turn off cc so entry is removed
             bucket.mapDestroy(event, false, false, null);
             bucket.concurrencyChecksEnabled = true;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionCCEDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionCCEDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionCCEDUnitTest.java
index 0ac6f67..6f76fc8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionCCEDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionCCEDUnitTest.java
@@ -24,6 +24,8 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.partition.PartitionRegionHelper;
 import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
 import com.gemstone.gemfire.internal.cache.versions.VersionTag;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -37,10 +39,10 @@ public class ClientServerTransactionCCEDUnitTest extends
   
   public void setUp() throws Exception {
     super.setUp();
-    addExpectedException("Connection reset");
-    addExpectedException("SocketTimeoutException");
-    addExpectedException("ServerConnectivityException");
-    addExpectedException("Socket Closed");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("SocketTimeoutException");
+    IgnoredException.addIgnoredException("ServerConnectivityException");
+    IgnoredException.addIgnoredException("Socket Closed");
 
   }
   /**
@@ -97,7 +99,7 @@ public class ClientServerTransactionCCEDUnitTest extends
     for (Object key : clientTags.keySet()) {
       VersionTag serverTag = serverTags.get(key);
       serverTag.setMemberID(serverId);
-      getLogWriter().fine("SWAP:key:"+key+" clientVersion:"+clientTags.get(key)+" serverVersion:"+serverTag);
+      LogWriterUtils.getLogWriter().fine("SWAP:key:"+key+" clientVersion:"+clientTags.get(key)+" serverVersion:"+serverTag);
       assertEquals(clientTags.get(key), serverTags.get(key));
       serverTags.remove(key);
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
index b5a475e..add43a0 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
@@ -16,8 +16,9 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static com.gemstone.gemfire.test.dunit.LogWriterUtils.*;
+
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -31,16 +32,38 @@ import java.util.concurrent.TimeUnit;
 import javax.naming.Context;
 import javax.transaction.UserTransaction;
 
-import com.gemstone.gemfire.LogWriter;
-import com.gemstone.gemfire.cache.*;
-import org.junit.Ignore;
-
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.CacheFactory;
+import com.gemstone.gemfire.cache.CacheListener;
+import com.gemstone.gemfire.cache.CacheLoader;
+import com.gemstone.gemfire.cache.CacheLoaderException;
+import com.gemstone.gemfire.cache.CacheTransactionManager;
+import com.gemstone.gemfire.cache.CacheWriterException;
+import com.gemstone.gemfire.cache.CommitConflictException;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.EntryEvent;
+import com.gemstone.gemfire.cache.InterestPolicy;
+import com.gemstone.gemfire.cache.LoaderHelper;
+import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.Region.Entry;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.Scope;
+import com.gemstone.gemfire.cache.SubscriptionAttributes;
+import com.gemstone.gemfire.cache.TransactionDataNodeHasDepartedException;
+import com.gemstone.gemfire.cache.TransactionDataNotColocatedException;
+import com.gemstone.gemfire.cache.TransactionEvent;
+import com.gemstone.gemfire.cache.TransactionException;
+import com.gemstone.gemfire.cache.TransactionId;
+import com.gemstone.gemfire.cache.TransactionInDoubtException;
+import com.gemstone.gemfire.cache.TransactionWriter;
+import com.gemstone.gemfire.cache.TransactionWriterException;
+import com.gemstone.gemfire.cache.UnsupportedOperationInTransactionException;
 import com.gemstone.gemfire.cache.client.ClientCache;
 import com.gemstone.gemfire.cache.client.ClientCacheFactory;
 import com.gemstone.gemfire.cache.client.ClientRegionFactory;
 import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
-import com.gemstone.gemfire.cache.client.Pool;
 import com.gemstone.gemfire.cache.client.PoolFactory;
 import com.gemstone.gemfire.cache.client.PoolManager;
 import com.gemstone.gemfire.cache.execute.Execution;
@@ -65,11 +88,13 @@ import com.gemstone.gemfire.internal.cache.execute.data.OrderId;
 import com.gemstone.gemfire.internal.cache.execute.util.CommitFunction;
 import com.gemstone.gemfire.internal.cache.execute.util.RollbackFunction;
 import com.gemstone.gemfire.internal.cache.tx.ClientTXStateStub;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests the basic client-server transaction functionality
@@ -88,7 +113,7 @@ public class ClientServerTransactionDUnitTest extends RemoteTransactionDUnitTest
   
   public void setUp() throws Exception {
     super.setUp();
-    addExpectedException("java.net.SocketException");
+    IgnoredException.addIgnoredException("java.net.SocketException");
   }
 
   private Integer createRegionsAndStartServer(VM vm, boolean accessor) {
@@ -338,10 +363,10 @@ public class ClientServerTransactionDUnitTest extends RemoteTransactionDUnitTest
       public Object call() throws Exception {
         final TXManagerImpl txmgr = getGemfireCache().getTxManager();
         try {
-          waitForCriterion(new WaitCriterion() {
+          Wait.waitForCriterion(new WaitCriterion() {
             public boolean done() {
               Set states = txmgr.getTransactionsForClient((InternalDistributedMember)myId);
-              getLogWriter().info("found " + states.size() + " tx states for " + myId);
+              com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("found " + states.size() + " tx states for " + myId);
               return states.isEmpty();
             }
             public String description() {
@@ -398,17 +423,17 @@ public class ClientServerTransactionDUnitTest extends RemoteTransactionDUnitTest
         assertEquals(initSize, pr.size());
         assertEquals(initSize, r.size());
 
-        getLogWriter().info("Looking up transaction manager");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Looking up transaction manager");
         TXManagerImpl mgr = (TXManagerImpl) getCache().getCacheTransactionManager();
         Context ctx = getCache().getJNDIContext();
         UserTransaction utx = (UserTransaction)ctx.lookup("java:/UserTransaction");
-        getLogWriter().info("starting transaction");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("starting transaction");
         if (useJTA) {
           utx.begin();
         } else {
           mgr.begin();
         }
-        getLogWriter().info("done starting transaction");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("done starting transaction");
         for (int i = 0; i < MAX_ENTRIES; i++) {
           CustId custId = new CustId(i);
           Customer cust = new Customer("name"+suffix+i, "address"+suffix+i);
@@ -420,10 +445,10 @@ public class ClientServerTransactionDUnitTest extends RemoteTransactionDUnitTest
           Customer cust = new Customer("name"+suffix+i, "address"+suffix+i);
           assertEquals(cust, r.get(custId));
           assertEquals(cust, pr.get(custId));
-          getLogWriter().info("SWAP:get:"+r.get(custId));
-          getLogWriter().info("SWAP:get:"+pr.get(custId));
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("SWAP:get:"+r.get(custId));
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("SWAP:get:"+pr.get(custId));
         }
-        getLogWriter().info("suspending transaction");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("suspending transaction");
         if (!useJTA) {
           TXStateProxy tx = mgr.internalSuspend();
           if (prePopulateData) {
@@ -438,7 +463,7 @@ public class ClientServerTransactionDUnitTest extends RemoteTransactionDUnitTest
             assertNull(r.get(new CustId(i)));
             assertNull(pr.get(new CustId(i)));
           }
-          getLogWriter().info("resuming transaction");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("resuming transaction");
           mgr.resume(tx);
         }
         assertEquals(
@@ -447,13 +472,13 @@ public class ClientServerTransactionDUnitTest extends RemoteTransactionDUnitTest
         assertEquals(
             "pr sized should be " + MAX_ENTRIES + " but it is:" + pr.size(),
             MAX_ENTRIES, pr.size());
-        getLogWriter().info("committing transaction");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("committing transaction");
         if (useJTA) {
           utx.commit();
         } else {
           getCache().getCacheTransactionManager().commit();
         }
-        getLogWriter().info("done committing transaction");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("done committing transaction");
         assertEquals(
             "r sized should be " + MAX_ENTRIES + " but it is:" + r.size(),
             MAX_ENTRIES, r.size());
@@ -548,7 +573,7 @@ public class ClientServerTransactionDUnitTest extends RemoteTransactionDUnitTest
             return "waiting for hosted tx in progress to terminate";
           }
         };
-        waitForCriterion(w, 10000, 200, true);
+        Wait.waitForCriterion(w, 10000, 200, true);
         return null;
       }
     });
@@ -683,7 +708,7 @@ public class ClientServerTransactionDUnitTest extends RemoteTransactionDUnitTest
         Order order = (orderRegion.getAll(keys)).get(orderId);
         assertNotNull(order);
         mgr.rollback();
-        getLogWriter().info("entry for " + orderId + " = " + orderRegion.getEntry(orderId));
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("entry for " + orderId + " = " + orderRegion.getEntry(orderId));
         assertNull(orderRegion.getEntry(orderId));
         return null;
       }
@@ -970,7 +995,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
 //	        Region<OrderId, Order> orderRegion = getCache().getRegion(ORDER);
 //	        Region<CustId,Customer> refRegion = getCache().getRegion(D_REFERENCE);
 	        final ClientListener cl = (ClientListener) custRegion.getAttributes().getCacheListeners()[0];
-	        waitForCriterion(new WaitCriterion() {
+	        Wait.waitForCriterion(new WaitCriterion() {
                   
                   @Override
                   public boolean done() {
@@ -1405,7 +1430,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
     VM server2 = host.getVM(1);
     VM client = host.getVM(2);
     
-    addExpectedException("java.net.SocketException");
+    IgnoredException.addIgnoredException("java.net.SocketException");
     
     final int port1 = createRegionsAndStartServer(server1, true);
     final int port2 = createRegionsAndStartServer(server2, false);
@@ -2665,7 +2690,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
     final int port2 = createRegionsAndStartServer(server2, false);
     final int port = createRegionsAndStartServer(server1, true);
     
-    addExpectedException("ClassCastException");
+    IgnoredException.addIgnoredException("ClassCastException");
     SerializableRunnable suspectStrings = new SerializableRunnable("suspect string") {
       public void run() {
         InternalDistributedSystem.getLoggerI18n().convertToLogWriter().info(
@@ -2699,7 +2724,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
               );
 
           Region cust = getCache().getRegion(CUSTOMER);
-          getLogWriter().fine("SWAP:doing first get from client");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().fine("SWAP:doing first get from client");
           assertNull(cust.get(new CustId(0)));
           assertNull(cust.get(new CustId(1)));
           ArrayList args = new ArrayList();
@@ -2762,8 +2787,8 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
     doFunctionWithFailureWork(false);
   }
   private void doFunctionWithFailureWork(final boolean commit) {
-    addExpectedException("TransactionDataNodeHasDepartedException");
-    addExpectedException("ClassCastException");
+    IgnoredException.addIgnoredException("TransactionDataNodeHasDepartedException");
+    IgnoredException.addIgnoredException("ClassCastException");
     Host host = Host.getHost(0);
     VM server1 = host.getVM(0);
     VM server2 = host.getVM(1);
@@ -2946,7 +2971,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
         };
         // tx should timeout after 1 ms but to deal with loaded machines and thread
         // scheduling latency wait for 10 seconds before reporting an error.
-        DistributedTestCase.waitForCriterion(waitForTxTimeout, 10 * 1000, 10, true);
+        Wait.waitForCriterion(waitForTxTimeout, 10 * 1000, 10, true);
         try {
           mgr.resume(txId);
           fail("expected exception not thrown");
@@ -3353,7 +3378,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
                 return "expected:"+keys+" found:"+clientListener.keys;
               }
             };
-            DistributedTestCase.waitForCriterion(wc, 30*1000, 500, true);
+            Wait.waitForCriterion(wc, 30*1000, 500, true);
           }
         }
         assertTrue(foundListener);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentDestroySubRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentDestroySubRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentDestroySubRegionDUnitTest.java
index 0611f63..2a7c0b5 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentDestroySubRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentDestroySubRegionDUnitTest.java
@@ -23,6 +23,7 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.RegionDestroyedException;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache30.CacheTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
@@ -101,7 +102,7 @@ public class ConcurrentDestroySubRegionDUnitTest extends CacheTestCase {
         vm1.invoke(createChild);
       } catch(Exception e) {
         if(!(e.getCause() instanceof RegionDestroyedException)) {
-          fail("Wrong exception", e);
+          Assert.fail("Wrong exception", e);
         }
         RegionDestroyedException rde = (RegionDestroyedException) e.getCause();
         assertEquals("Error on loop " + i, "/region", rde.getRegionFullPath());
@@ -160,7 +161,7 @@ public class ConcurrentDestroySubRegionDUnitTest extends CacheTestCase {
         vm1.invoke(createChild);
       } catch(Exception e) {
         if(!(e.getCause() instanceof RegionDestroyedException)) {
-          fail("Wrong exception", e);
+          Assert.fail("Wrong exception", e);
         }
         RegionDestroyedException rde = (RegionDestroyedException) e.getCause();
         assertEquals("Error on loop " + i, "/region", rde.getRegionFullPath());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
index df0ea7f..44b89de 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
@@ -49,11 +49,14 @@ import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.internal.membership.MembershipManager;
 import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManagerHelper;
 import com.gemstone.gemfire.internal.AvailablePort;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * tests for the concurrentMapOperations. there are more tests in ClientServerMiscDUnitTest
@@ -145,7 +148,7 @@ public class ConcurrentMapOpsDUnitTest extends CacheTestCase {
           ccf.addPoolServer("localhost", port2);
         }
         ccf.setPoolSubscriptionEnabled(true);
-        ccf.set("log-level", getDUnitLogLevel());
+        ccf.set("log-level", LogWriterUtils.getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<Integer, String> crf = cCache
             .createClientRegionFactory(isEmpty ? ClientRegionShortcut.PROXY
@@ -275,7 +278,7 @@ public class ConcurrentMapOpsDUnitTest extends CacheTestCase {
                 return "Client expected to get "+MAX_ENTRIES+" creates, but got "+initialCreatesListener.numCreates.get();
               }
             };
-            DistributedTestCase.waitForCriterion(wc, 30*1000, 500, true);
+            Wait.waitForCriterion(wc, 30*1000, 500, true);
           }
         }
         if (!listenerFound) {
@@ -409,7 +412,7 @@ public class ConcurrentMapOpsDUnitTest extends CacheTestCase {
             return "timeout "+e;
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 30000, 1000, true);
+        Wait.waitForCriterion(wc, 30000, 1000, true);
         return null;
       }
     });
@@ -457,7 +460,7 @@ public class ConcurrentMapOpsDUnitTest extends CacheTestCase {
             return "timeout "+e.getMessage();
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 30000, 1000, true);
+        Wait.waitForCriterion(wc, 30000, 1000, true);
         return null;
       }
     });
@@ -957,7 +960,7 @@ public class ConcurrentMapOpsDUnitTest extends CacheTestCase {
             return r.containsKey(key);
           }
         };
-        waitForCriterion(w, 10000, 200, true);
+        Wait.waitForCriterion(w, 10000, 200, true);
         assertTrue(r.containsKeyOnServer(key));
         boolean result = r.remove(key, null);
 //        if (!result) {
@@ -1046,7 +1049,7 @@ public class ConcurrentMapOpsDUnitTest extends CacheTestCase {
     final int port2 = createRegionsAndStartServer(server2, true);
     final String regionName = usePR? PR_REG_NAME : REP_REG_NAME;
 
-    addExpectedException("java.net.SocketException");
+    IgnoredException.addIgnoredException("java.net.SocketException");
     
     createClientRegion(client, port1, false, port2);
     
@@ -1059,11 +1062,11 @@ public class ConcurrentMapOpsDUnitTest extends CacheTestCase {
     final DistributedMember server1ID = (DistributedMember)server1.invoke(getID);
     final DistributedMember server2ID = (DistributedMember)server2.invoke(getID);
     
-    Set<ExpectedException> exceptions = new HashSet<ExpectedException>();
-    exceptions.add(addExpectedException("Membership: requesting removal", server1));
-    exceptions.add(addExpectedException("Membership: requesting removal", server2));
-    exceptions.add(addExpectedException("ForcedDisconnect", server1));
-    exceptions.add(addExpectedException("ForcedDisconnect", server2));
+    Set<IgnoredException> exceptions = new HashSet<IgnoredException>();
+    exceptions.add(IgnoredException.addIgnoredException("Membership: requesting removal", server1));
+    exceptions.add(IgnoredException.addIgnoredException("Membership: requesting removal", server2));
+    exceptions.add(IgnoredException.addIgnoredException("ForcedDisconnect", server1));
+    exceptions.add(IgnoredException.addIgnoredException("ForcedDisconnect", server2));
     
     try {
 
@@ -1164,7 +1167,7 @@ public class ConcurrentMapOpsDUnitTest extends CacheTestCase {
       });
     } finally {
       disconnectAllFromDS();
-      for (ExpectedException ex: exceptions) {
+      for (IgnoredException ex: exceptions) {
         ex.remove();
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
index 55ce901..833ef11 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRegionOperationsJUnitTest.java
@@ -39,7 +39,7 @@ import org.junit.experimental.categories.Category;
 import com.gemstone.gemfire.cache.AttributesFactory;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.Scope;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -444,8 +444,8 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
     });
     t1.start();
     t2.start();
-    DistributedTestCase.join(t1, 30 * 1000, null);
-    DistributedTestCase.join(t2, 30 * 1000, null);
+    ThreadUtils.join(t1, 30 * 1000);
+    ThreadUtils.join(t2, 30 * 1000);
     assertTrue(!failure);
 
   }
@@ -540,19 +540,19 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
       this.timeToStop.set(true);
     }
     for (int i = 0; i < numberOfPutsThreads; i++) {
-      DistributedTestCase.join(putThreads[i], 10*1000, null);
+      ThreadUtils.join(putThreads[i], 10*1000);
     }
     for (int i = 0; i < numberOfGetsThreads; i++) {
-      DistributedTestCase.join(getThreads[i], 10*1000, null);
+      ThreadUtils.join(getThreads[i], 10*1000);
     }
     for (int i = 0; i < numberOfDestroysThreads; i++) {
-      DistributedTestCase.join(destroyThreads[i], 10*1000, null);
+      ThreadUtils.join(destroyThreads[i], 10*1000);
     }
     for (int i = 0; i < numberOfClearThreads; i++) {
-      DistributedTestCase.join(clearThreads[i], 10*1000, null);
+      ThreadUtils.join(clearThreads[i], 10*1000);
     }
     for (int i = 0; i < numberOfForceRollThreads; i++) {
-      DistributedTestCase.join(forceRollThreads[i], 10*1000, null);
+      ThreadUtils.join(forceRollThreads[i], 10*1000);
     }
 
     if (this.validate) {
@@ -824,7 +824,7 @@ public class ConcurrentRegionOperationsJUnitTest extends DiskRegionTestingBase
         );
 
     region.clear();
-    DistributedTestCase.join(th, 20 * 1000, null);
+    ThreadUtils.join(th, 20 * 1000);
     LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
     DiskStoreImpl.DEBUG_DELAY_JOINING_WITH_COMPACTOR = 500;
     CacheObserverHolder.setInstance(old);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRollingAndRegionOperationsJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRollingAndRegionOperationsJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRollingAndRegionOperationsJUnitTest.java
index 36a0448..b95fa4e 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRollingAndRegionOperationsJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentRollingAndRegionOperationsJUnitTest.java
@@ -41,7 +41,7 @@ import com.gemstone.gemfire.internal.cache.DiskRegionProperties;
 import com.gemstone.gemfire.internal.cache.DiskRegionTestingBase;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.RegionEntry;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -866,7 +866,7 @@ public class ConcurrentRollingAndRegionOperationsJUnitTest extends
     for (int i = 0; i < threads.size(); ++i) {
       Thread th = (Thread)threads.get(i);
       if (th != null) {
-        DistributedTestCase.join(th, 30 * 1000, null);
+        ThreadUtils.join(th, 30 * 1000);
       }
     }
     assertTrue(

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
index f101acb..7a29e65 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
@@ -39,10 +39,12 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 
 /** A test of 46438 - missing response to an update attributes message */
 public class ConnectDisconnectDUnitTest extends CacheTestCase {
@@ -52,7 +54,7 @@ public class ConnectDisconnectDUnitTest extends CacheTestCase {
   }
 
 
-  private ExpectedException ex;
+  private IgnoredException ex;
 
   public ConnectDisconnectDUnitTest(String name) {
     super(name);
@@ -74,7 +76,7 @@ public class ConnectDisconnectDUnitTest extends CacheTestCase {
 //     setLocatorPorts(ports);
 
     for(int i = 0; i < 20; i++) {
-      getLogWriter().info("Test run: " + i);
+      LogWriterUtils.getLogWriter().info("Test run: " + i);
       runOnce();
       tearDown();
       setUp();
@@ -88,7 +90,7 @@ public class ConnectDisconnectDUnitTest extends CacheTestCase {
   static int[] locatorPorts;
   
   public void setLocatorPorts(int[] ports) {
-    deleteLocatorStateFile(ports);
+    DistributedTestUtils.deleteLocatorStateFile(ports);
     String locators = "";
     for (int i=0; i<ports.length; i++) {
       if (i > 0) {
@@ -109,13 +111,12 @@ public class ConnectDisconnectDUnitTest extends CacheTestCase {
     locatorPorts = ports;
   }
   
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     if (locatorPorts != null) {
-      deleteLocatorStateFile(locatorPorts);
+      DistributedTestUtils.deleteLocatorStateFile(locatorPorts);
     }
   }
-  
 
   /**
    * This test creates 4 vms and starts a cache in each VM. If that doesn't hang, it destroys the DS in all

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
index 43ed3da..9a50146 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
@@ -63,6 +63,8 @@ import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @since 6.1
@@ -163,8 +165,8 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     DeltaPropagationDUnitTest.resetAll();
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     DeltaPropagationDUnitTest.closeCache();
     VM2.invoke(DeltaPropagationDUnitTest.class, "closeCache");
     VM3.invoke(DeltaPropagationDUnitTest.class, "closeCache");
@@ -657,7 +659,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
       primary = (((PoolImpl)pool).getPrimaryPort() == PORT1) ? VM0
           : ((((PoolImpl)pool).getPrimaryPort() == PORT2) ? VM1 : VM2);
   
-      getLogWriter().info("waiting for client to receive last_key");
+      com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("waiting for client to receive last_key");
       waitForLastKey();
   
       long fromDeltasOnClient = DeltaTestImpl.getFromDeltaInvokations()
@@ -835,7 +837,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
       }
     }
     catch (Exception e) {
-      fail("failed in doLocalOp()", e);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed in doLocalOp()", e);
     }
   }
 
@@ -866,7 +868,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
         }
       };
     }
-    DistributedTestCase.waitForCriterion(wc, 5 * 1000, 100, true);
+    Wait.waitForCriterion(wc, 5 * 1000, 100, true);
   }
 
   public static void assertValue(String rName, String key, Object expected) {
@@ -878,7 +880,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
           + expected, expected.equals(value));
     }
     catch (Exception e) {
-      fail("failed in assertValue()", e);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed in assertValue()", e);
     }
   }
 
@@ -897,7 +899,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
         return "HA Overflow did not occure.";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10 * 1000, 100, true);
+    Wait.waitForCriterion(wc, 10 * 1000, 100, true);
   }
 
   public static void waitForLastKey() {
@@ -910,7 +912,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
         return "Last key NOT received.";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10 * 1000, 100, true);
+    Wait.waitForCriterion(wc, 10 * 1000, 100, true);
   }
 
   public static void prepareDeltas() {
@@ -1015,7 +1017,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
       r.put(LAST_KEY, "");
     }
     catch (Exception ex) {
-      fail("failed in createDelta()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed in createDelta()", ex);
     }
   }
 
@@ -1032,7 +1034,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
       r.create(DELTA_KEY, deltaPut[0]);
     }
     catch (Exception ex) {
-      fail("failed in createDelta()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed in createDelta()", ex);
     }
   }
 
@@ -1054,7 +1056,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
       r.put(LAST_KEY, "");
     }
     catch (Exception ex) {
-      fail("failed in updateDelta()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed in updateDelta()", ex);
     }
   }
 
@@ -1069,7 +1071,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
       r.create(LAST_KEY, "");
     }
     catch (Exception ex) {
-      fail("failed in createDeltas()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed in createDeltas()", ex);
     }
   }
 
@@ -1081,7 +1083,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
       r.create("KEY-A", "I push the delta out to disk :)");
     }
     catch (Exception ex) {
-      fail("failed in createAnEntry()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed in createAnEntry()", ex);
     }
   }
 
@@ -1093,7 +1095,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
       r.invalidate(DELTA_KEY);
     }
     catch (Exception ex) {
-      fail("failed in invalidateDelta()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed in invalidateDelta()", ex);
     }
   }
 
@@ -1476,7 +1478,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
       r.registerInterest("ALL_KEYS");
     }
     catch (Exception ex) {
-      fail("failed in registerInterestListAll", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed in registerInterestListAll", ex);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationStatsDUnitTest.java
index d3f99bd..8c249be 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationStatsDUnitTest.java
@@ -44,7 +44,10 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
 import com.gemstone.gemfire.internal.tcp.ConnectionTable;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author ashetkar
@@ -91,8 +94,8 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
     vm3 = host.getVM(3);
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     lastKeyReceived = false;
     vm0.invoke(DeltaPropagationStatsDUnitTest.class, "resetLastKeyReceived");
     vm1.invoke(DeltaPropagationStatsDUnitTest.class, "resetLastKeyReceived");
@@ -129,7 +132,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
     int port = (Integer)vm0.invoke(DeltaPropagationStatsDUnitTest.class,
         "createServerCache", args);
 
-    createClientCache(getServerHostName(vm0.getHost()), port);
+    createClientCache(NetworkUtils.getServerHostName(vm0.getHost()), port);
 
     vm0.invoke(DeltaPropagationStatsDUnitTest.class, "putCleanDelta",
         new Object[] {Integer.valueOf(numOfKeys), Long.valueOf(updates)});
@@ -157,7 +160,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
     int port = (Integer)vm0.invoke(DeltaPropagationStatsDUnitTest.class,
         "createServerCache", args);
 
-    createClientCache(getServerHostName(vm0.getHost()), port);
+    createClientCache(NetworkUtils.getServerHostName(vm0.getHost()), port);
 
     vm0.invoke(DeltaPropagationStatsDUnitTest.class,
         "putErrorDeltaForReceiver", new Object[] {Integer.valueOf(numOfKeys),
@@ -278,7 +281,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
         Scope.DISTRIBUTED_ACK, Boolean.TRUE};
     Integer port = (Integer)vm0.invoke(DeltaPropagationStatsDUnitTest.class,
         "createServerCache", args);
-    createClientCache(getServerHostName(vm0.getHost()), port);
+    createClientCache(NetworkUtils.getServerHostName(vm0.getHost()), port);
 
     putCleanDelta(numOfKeys, updates);
     putLastKey();
@@ -314,7 +317,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
         Scope.DISTRIBUTED_ACK, Boolean.TRUE};
     Integer port = (Integer)vm0.invoke(DeltaPropagationStatsDUnitTest.class,
         "createServerCache", args);
-    createClientCache(getServerHostName(vm0.getHost()), port);
+    createClientCache(NetworkUtils.getServerHostName(vm0.getHost()), port);
 
     putErrorDeltaForReceiver(numOfKeys, updates, errors);
     putErrorDeltaForSender(numOfKeys, updates, errors2, Boolean.FALSE);
@@ -350,7 +353,7 @@ public class DeltaPropagationStatsDUnitTest extends DistributedTestCase {
         return "Last key NOT received.";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 15 * 1000, 100, true);
+    Wait.waitForCriterion(wc, 15 * 1000, 100, true);
   }
 
   public static void putCleanDelta(Integer keys, Long updates) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaSizingDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaSizingDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaSizingDUnitTest.java
index 5da448a..4de9daf 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaSizingDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaSizingDUnitTest.java
@@ -31,6 +31,7 @@ import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -109,8 +110,8 @@ public class DeltaSizingDUnitTest extends CacheTestCase {
           int port2) {
         AttributesFactory<Integer, TestDelta> attr = new AttributesFactory<Integer, TestDelta>();
         PoolFactory pf = PoolManager.createFactory();
-        pf.addServer(getServerHostName(host), port1);
-        pf.addServer(getServerHostName(host), port2);
+        pf.addServer(NetworkUtils.getServerHostName(host), port1);
+        pf.addServer(NetworkUtils.getServerHostName(host), port2);
         pf.create("pool");
         attr.setCloningEnabled(clone);
         attr.setDataPolicy(DataPolicy.EMPTY);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegByteArrayDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegByteArrayDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegByteArrayDUnitTest.java
index 49b47c8..f42c020 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegByteArrayDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegByteArrayDUnitTest.java
@@ -31,6 +31,7 @@ import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -88,10 +89,10 @@ public class DiskRegByteArrayDUnitTest extends CacheTestCase {
       vm1.invoke(DiskRegByteArrayDUnitTest.class, "createCacheForVM1");
      }
     
-    public void tearDown2() throws Exception {
-      super.tearDown2();
+    @Override
+    protected final void postTearDownCacheTestCase() throws Exception {
       cache = null;
-      invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+      Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
     }
 
     /* public void tearDown(){

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionClearJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionClearJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionClearJUnitTest.java
index d1e0791..7e58329 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionClearJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionClearJUnitTest.java
@@ -44,7 +44,7 @@ import com.gemstone.gemfire.cache.RegionEvent;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.versions.VersionStamp;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 // @TODO: use DiskRegionTestingBase and DiskRegionHelperFactory
@@ -184,7 +184,7 @@ public class DiskRegionClearJUnitTest {
         fail("timed out counter="+counter);
       }
     }
-    DistributedTestCase.join(thread, 10 * 60 * 1000, null);
+    ThreadUtils.join(thread, 10 * 60 * 1000);
     Assert.assertTrue(counter == 3);
     if(!cleared)
       fail("clear not done although puts have been done");    

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
index 36044f7..94e6ec6 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionJUnitTest.java
@@ -47,8 +47,9 @@ import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.internal.cache.lru.LRUStatistics;
 import com.gemstone.gemfire.internal.cache.lru.NewLRUClockHand;
 import com.gemstone.gemfire.internal.cache.persistence.UninterruptibleFileChannel;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -411,11 +412,11 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     }
 
     long startTime = System.currentTimeMillis();
-    DistributedTestCase.join(thread1, 20 * 1000, null);
-    DistributedTestCase.join(thread2, 20 * 1000, null);
-    DistributedTestCase.join(thread3, 20 * 1000, null);
-    DistributedTestCase.join(thread4, 20 * 1000, null);
-    DistributedTestCase.join(thread5, 20 * 1000, null);
+    ThreadUtils.join(thread1, 20 * 1000);
+    ThreadUtils.join(thread2, 20 * 1000);
+    ThreadUtils.join(thread3, 20 * 1000);
+    ThreadUtils.join(thread4, 20 * 1000);
+    ThreadUtils.join(thread5, 20 * 1000);
     
     long interval = System.currentTimeMillis() - startTime;
     if (interval > 100000) {
@@ -521,11 +522,11 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
 
     long startTime = System.currentTimeMillis();
     finished = true;
-    DistributedTestCase.join(thread1, 5 * 60 * 1000, null);
-    DistributedTestCase.join(thread2, 5 * 60 * 1000, null);
-    DistributedTestCase.join(thread3, 5 * 60 * 1000, null);
-    DistributedTestCase.join(thread4, 5 * 60 * 1000, null);
-    DistributedTestCase.join(thread5, 5 * 60 * 1000, null);
+    ThreadUtils.join(thread1, 5 * 60 * 1000);
+    ThreadUtils.join(thread2, 5 * 60 * 1000);
+    ThreadUtils.join(thread3, 5 * 60 * 1000);
+    ThreadUtils.join(thread4, 5 * 60 * 1000);
+    ThreadUtils.join(thread5, 5 * 60 * 1000);
     long interval = System.currentTimeMillis() - startTime;
     if (interval > 100000) {
       fail(" Test took too long in going to join, it should have exited before 100000 ms");
@@ -720,7 +721,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     oplogs = dsi.testHookGetAllOverflowOplogs();
     int retryCount = 20;
     while (oplogs.size() > 1 && retryCount > 0) {
-      DistributedTestCase.staticPause(100);
+      Wait.pause(100);
       oplogs = dsi.testHookGetAllOverflowOplogs();
       retryCount--;
     }
@@ -1152,7 +1153,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
               public void beforeGoingToCompact()
               {
                 try {
-                  DistributedTestCase.join(t1, 60 * 1000, null);
+                  ThreadUtils.join(t1, 60 * 1000);
                 }
                 catch (Exception ignore) {
                   logWriter.error("Exception occured", ignore);
@@ -1192,7 +1193,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     });
     testThread.start();
    // region.clear();
-    DistributedTestCase.join(testThread, 40 * 1000, null);
+    ThreadUtils.join(testThread, 40 * 1000);
     assertFalse(failureCause, testFailed);
     assertFalse("Expected situation of max directory size violation happening and available space less than zero did not happen  ", exceptionOccured); // CC jade1d failure
 
@@ -1732,7 +1733,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
     region.create("key1", "value1");
     try {
       cache.getLogger().info("waiting for clear to finish");
-      DistributedTestCase.join(th, 30 * 1000, null);
+      ThreadUtils.join(th, 30 * 1000);
     }
     catch (Exception ie) {
       DiskRegionJUnitTest.this.exceptionOccured = true;
@@ -1782,7 +1783,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
           public void beforeDiskClear()
           {
             th.start();
-            DistributedTestCase.staticPause(7 * 1000);
+            Wait.pause(7 * 1000);
             System.out.println("FIXME: this thread does not terminate--EVER!");
 //            try {
 //              DistributedTestCase.join(th, 7 * 1000, null);
@@ -1795,7 +1796,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
         });
     try {
       region.clear();
-      DistributedTestCase.join(th, 30 * 1000, null);
+      ThreadUtils.join(th, 30 * 1000);
       assertFalse(this.failureCause, this.exceptionOccured);
       //We expect 1 entry to exist, because the clear was triggered before
       //the update
@@ -1842,7 +1843,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
          	LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
             th.start();
             System.out.println("FIXME: this thread (2) does not terminate--EVER!");
-            DistributedTestCase.staticPause(10 * 1000);
+            Wait.pause(10 * 1000);
 //            try {	
 //              DistributedTestCase.join(th, 10 * 1000, null);
 //            }
@@ -1854,7 +1855,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
         });
     try {
       region.clear();
-      DistributedTestCase.join(th, 30 * 1000, null);
+      ThreadUtils.join(th, 30 * 1000);
       assertFalse(this.failureCause, this.exceptionOccured);
       //We expect 1 entry to exist, because the clear was triggered before
       //the update
@@ -2265,7 +2266,7 @@ public class DiskRegionJUnitTest extends DiskRegionTestingBase
         fail("async flusher thread did not terminate");
       }
 
-      DistributedTestCase.waitForCriterion(new WaitCriterion() {
+      Wait.waitForCriterion(new WaitCriterion() {
         @Override
         public boolean done() {
           return cache.isClosed();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DistrbutedRegionProfileOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DistrbutedRegionProfileOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DistrbutedRegionProfileOffHeapDUnitTest.java
index 1c2f921..6ce3ffd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DistrbutedRegionProfileOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DistrbutedRegionProfileOffHeapDUnitTest.java
@@ -28,6 +28,8 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 public class DistrbutedRegionProfileOffHeapDUnitTest extends CacheTestCase {
@@ -38,7 +40,7 @@ public class DistrbutedRegionProfileOffHeapDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -48,12 +50,8 @@ public class DistrbutedRegionProfileOffHeapDUnitTest extends CacheTestCase {
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   /**
@@ -62,7 +60,7 @@ public class DistrbutedRegionProfileOffHeapDUnitTest extends CacheTestCase {
    * cause an exception and the region will not be created.
    */
   public void testPartitionedRegionProfileWithConflict() throws Exception {
-    final String regionName = getTestName() + "Region";
+    final String regionName = getTestMethodName() + "Region";
 
     Host.getHost(0).getVM(0).invoke(new CacheSerializableRunnable("createRegionNoException") {
       private static final long serialVersionUID = 1L;
@@ -96,7 +94,7 @@ public class DistrbutedRegionProfileOffHeapDUnitTest extends CacheTestCase {
         Region region = null;
 
         try {
-          addExpectedException("IllegalStateException");
+          IgnoredException.addIgnoredException("IllegalStateException");
           region = regionFactory.create(regionName);
           fail("Expected exception upon creation with invalid off-heap state");
         } catch (IllegalStateException expected) {
@@ -117,8 +115,8 @@ public class DistrbutedRegionProfileOffHeapDUnitTest extends CacheTestCase {
    * created.
    */
   public void testPartitionedRegionProfileWithoutConflict() throws Exception {
-    final String offHeapRegionName = getTestName() + "OffHeapRegion";
-    final String onHeapRegionName = getTestName() + "OnHeapRegion";
+    final String offHeapRegionName = getTestMethodName() + "OffHeapRegion";
+    final String onHeapRegionName = getTestMethodName() + "OnHeapRegion";
 
     for (int vmId = 0; vmId <= 1; vmId++) {
       Host.getHost(0).getVM(vmId).invoke(new CacheSerializableRunnable("createRegionNoException") {
@@ -155,7 +153,7 @@ public class DistrbutedRegionProfileOffHeapDUnitTest extends CacheTestCase {
    * exception.
    */
   public void testPartitionedRegionProfileWithAccessor() throws Exception {
-    final String regionName = getTestName() + "Region";
+    final String regionName = getTestMethodName() + "Region";
 
     // Create a region using off-heap
     Host.getHost(0).getVM(0).invoke(new CacheSerializableRunnable("createRegionNoException") {
@@ -208,7 +206,7 @@ public class DistrbutedRegionProfileOffHeapDUnitTest extends CacheTestCase {
    * storage and the other being a proxy will not cause an exception.
    */
   public void testPartitionedRegionProfileWithProxy() throws Exception {
-    final String regionName = getTestName() + "Region";
+    final String regionName = getTestMethodName() + "Region";
 
     // Create a region using off-heap
     Host.getHost(0).getVM(0).invoke(new CacheSerializableRunnable("createRegionNoException") {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DistributedCacheTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DistributedCacheTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DistributedCacheTestCase.java
index e6057f5..4c46a55 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DistributedCacheTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/DistributedCacheTestCase.java
@@ -33,6 +33,7 @@ import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -104,7 +105,8 @@ public abstract class DistributedCacheTestCase
   /**
    * Closes the cache in this VM and each remote VM
    */
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     StringBuffer problems = new StringBuffer();
 
     if (cache != null) {
@@ -137,8 +139,6 @@ public abstract class DistributedCacheTestCase
 
     assertEquals("Problems while tearing down", 
                  "", problems.toString().trim());
-
-    super.tearDown2();
   }
 
   /**
@@ -229,7 +229,7 @@ public abstract class DistributedCacheTestCase
 
     Region newRegion =
       root.createSubregion(name, factory.create());
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
       "Created Region '" + newRegion.getFullPath() + "'");
   }
 
@@ -301,7 +301,7 @@ public abstract class DistributedCacheTestCase
                              factory.create());
     sub.create(entryName, null);
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
       "Defined Entry named '" + entryName + "' in region '" +
       sub.getFullPath() +"'");
   }
@@ -328,7 +328,7 @@ public abstract class DistributedCacheTestCase
 
     sub.put(entryName, value);
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
       "Put value " + value + " in entry " + entryName + " in region '" +
       region.getFullPath() +"'");
   }
@@ -377,7 +377,7 @@ public abstract class DistributedCacheTestCase
 
     sub.put(entryName, value);
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
       "Replaced value " + value + "in entry " + entryName + " in region '" +
       region.getFullPath() +"'");
   }
@@ -466,7 +466,7 @@ public abstract class DistributedCacheTestCase
     Host host = Host.getHost(0);
     int vmCount = host.getVMCount();
     for (int i=0; i<vmCount; i++) {
-      getLogWriter().info("Invoking " + methodName + "on VM#" + i);
+      LogWriterUtils.getLogWriter().info("Invoking " + methodName + "on VM#" + i);
       host.getVM(i).invoke(this.getClass(), methodName, args);
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EventTrackerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EventTrackerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EventTrackerDUnitTest.java
index 19f72ab..09fc882 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EventTrackerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EventTrackerDUnitTest.java
@@ -40,9 +40,12 @@ import com.gemstone.gemfire.distributed.internal.DistributionMessage;
 import com.gemstone.gemfire.distributed.internal.DistributionMessageObserver;
 import com.gemstone.gemfire.internal.cache.EventTracker.BulkOpHolder;
 import com.gemstone.gemfire.internal.cache.ha.ThreadIdentifier;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Tests <code>EventTracker</code> management.
@@ -75,13 +78,9 @@ public class EventTrackerDUnitTest extends CacheTestCase {
     disconnectAllFromDS();
   }
 
-  public void tearDown2() throws Exception {
-    try {
-      super.tearDown2();
-    }
-    finally {
-      disconnectAllFromDS();
-    }
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
+    disconnectAllFromDS();
   }
 
   /**
@@ -146,7 +145,7 @@ public class EventTrackerDUnitTest extends CacheTestCase {
         try {
           startCacheServer();
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     });
@@ -162,7 +161,7 @@ public class EventTrackerDUnitTest extends CacheTestCase {
     
     // Create Create Region in the client
     final int port = serverVM.invokeInt(EventTrackerDUnitTest.class, "getCacheServerPort");
-    final String hostName = getServerHostName(host);
+    final String hostName = NetworkUtils.getServerHostName(host);
     clientVM.invoke(new CacheSerializableRunnable("Create client") {
       public void run2() throws CacheException {
         getCache();
@@ -219,7 +218,7 @@ public class EventTrackerDUnitTest extends CacheTestCase {
         try {
           startCacheServer();
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     });
@@ -235,7 +234,7 @@ public class EventTrackerDUnitTest extends CacheTestCase {
     
     // Create Create Region in the client
     final int port = serverVM.invokeInt(EventTrackerDUnitTest.class, "getCacheServerPort");
-    final String hostName = getServerHostName(host);
+    final String hostName = NetworkUtils.getServerHostName(host);
     clientVM.invoke(new CacheSerializableRunnable("Create client") {
       public void run2() throws CacheException {
         getCache();
@@ -266,7 +265,7 @@ public class EventTrackerDUnitTest extends CacheTestCase {
         
         // Pause for the message tracking timeout
         int waitTime = Integer.parseInt(MESSAGE_TRACKING_TIMEOUT) * 3;
-        pause(waitTime);
+        Wait.pause(waitTime);
     
         // Verify the server no longer contains an entry
         eventState = region.getEventState();
@@ -303,7 +302,7 @@ public class EventTrackerDUnitTest extends CacheTestCase {
         try {
           startCacheServer();
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
       }
     };
@@ -313,7 +312,7 @@ public class EventTrackerDUnitTest extends CacheTestCase {
     
  // Create Create Region in the client
     final int port = vm0.invokeInt(EventTrackerDUnitTest.class, "getCacheServerPort");
-    final String hostName = getServerHostName(host);
+    final String hostName = NetworkUtils.getServerHostName(host);
     vm2.invoke(new CacheSerializableRunnable("Create client") {
       public void run2() throws CacheException {
         getCache();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java
index 33807b7..2d19ba5 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionDUnitTest.java
@@ -26,6 +26,7 @@ import org.junit.experimental.categories.Category;
 import com.gemstone.gemfire.cache.EvictionAlgorithm;
 import com.gemstone.gemfire.internal.cache.lru.HeapEvictor;
 import com.gemstone.gemfire.internal.cache.lru.MemLRUCapacityController;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.junit.categories.DistributedTest;
 
 @Category(DistributedTest.class)
@@ -94,7 +95,7 @@ public class EvictionDUnitTest extends EvictionTestBase {
     createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 4, 1, 1000,maxEnteries);
     
     final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PR- " +pr.getEvictionAttributes().getMaximum());
     
     for (int counter = 1; counter <= maxEnteries+extraEntries; counter++) {
@@ -111,7 +112,7 @@ public class EvictionDUnitTest extends EvictionTestBase {
     createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 4, 1, 1000,maxEnteries);
     
     final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PR- " +pr.getEvictionAttributes().getMaximum());
     for (int i = 0; i < 3; i++) {
       // assume mod-based hashing for bucket creation
@@ -138,7 +139,7 @@ public class EvictionDUnitTest extends EvictionTestBase {
     createPartitionedRegion(true, EvictionAlgorithm.LRU_ENTRY, "PR1", 5, 1, 1000,maxEnteries);
     
     final PartitionedRegion pr = (PartitionedRegion)cache.getRegion("PR1");
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "PR- " +pr.getEvictionAttributes().getMaximum());
     
     for (int counter = 1; counter <= maxEnteries+extraEntries; counter++) {
@@ -154,7 +155,7 @@ public class EvictionDUnitTest extends EvictionTestBase {
       if (bucketRegion == null) {
         continue;
       }
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "FINAL bucket= " + bucketRegion.getFullPath() + "size= "
               + bucketRegion.size() + "  count= "+bucketRegion.entryCount());
       assertEquals(4,bucketRegion.size());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionObjectSizerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionObjectSizerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionObjectSizerDUnitTest.java
index dbc1f3f..18346b7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionObjectSizerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/EvictionObjectSizerDUnitTest.java
@@ -37,6 +37,8 @@ import com.gemstone.gemfire.internal.SharedLibrary;
 import com.gemstone.gemfire.internal.cache.lru.HeapLRUCapacityController;
 import com.gemstone.gemfire.internal.cache.lru.Sizeable;
 import com.gemstone.gemfire.internal.size.ReflectionSingleObjectSizer;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 
 public class EvictionObjectSizerDUnitTest extends CacheTestCase {
 
@@ -61,11 +63,6 @@ public class EvictionObjectSizerDUnitTest extends CacheTestCase {
     super.setUp();
   }
 
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   /**
    * Without object sizer
    */
@@ -139,7 +136,7 @@ public class EvictionObjectSizerDUnitTest extends CacheTestCase {
       int valueSize = SharedLibrary.getObjectHeaderSize() + 4 /* array length */ + 0 /* bytes */;
       valueSize = (int) ReflectionSingleObjectSizer.roundUpSize(valueSize);
       int entrySize = keySize + valueSize + ((HeapLRUCapacityController)((PartitionedRegion)region).getEvictionController()).getPerEntryOverhead();
-      getLogWriter().info("testObjectSizerForHeapLRU_CustomizedNonSizerObject expected= " + entrySize);
+      LogWriterUtils.getLogWriter().info("testObjectSizerForHeapLRU_CustomizedNonSizerObject expected= " + entrySize);
       assertEquals(entrySize, getSizeOfCustomizedData(1));
     }
 
@@ -153,7 +150,7 @@ public class EvictionObjectSizerDUnitTest extends CacheTestCase {
       int valueSize = SharedLibrary.getObjectHeaderSize() + 4 /* array length */ + 4 /* bytes */;
       valueSize = (int) ReflectionSingleObjectSizer.roundUpSize(valueSize);
       int entrySize = keySize + valueSize + ((HeapLRUCapacityController)((PartitionedRegion)region).getEvictionController()).getPerEntryOverhead();
-      getLogWriter().info("testObjectSizerForHeapLRU_CustomizedNonSizerObject expected= " + entrySize);
+      LogWriterUtils.getLogWriter().info("testObjectSizerForHeapLRU_CustomizedNonSizerObject expected= " + entrySize);
       assertEquals(entrySize, getSizeOfCustomizedData(2));
     }
   }
@@ -171,7 +168,7 @@ public class EvictionObjectSizerDUnitTest extends CacheTestCase {
     // Total Size of entry should be= 71
     putCustomizedData(1, new TestObjectSizerImpl());
     int expected = (0+160+(Sizeable.PER_OBJECT_OVERHEAD*2)+((HeapLRUCapacityController)((PartitionedRegion)region).getEvictionController()).getPerEntryOverhead());
-    getLogWriter().info("testObjectSizerForHeapLRU_CustomizedSizerObject expected= " + expected);
+    LogWriterUtils.getLogWriter().info("testObjectSizerForHeapLRU_CustomizedSizerObject expected= " + expected);
     assertEquals(expected, getSizeOfCustomizedData(1));
     assertEquals(expected, ((PartitionedRegion)region).getEvictionController()
         .getLRUHelper().getStats().getCounter());
@@ -189,7 +186,7 @@ public class EvictionObjectSizerDUnitTest extends CacheTestCase {
     // Total Size of entry should be= 72
     putCustomizedObjects(new TestNonSizerObject("1"), new TestObjectSizerImpl());
     int expected = (1+160+(Sizeable.PER_OBJECT_OVERHEAD*2)+((HeapLRUCapacityController)((PartitionedRegion)region).getEvictionController()).getPerEntryOverhead());
-    getLogWriter().info("testObjectSizerForHeapLRU_CustomizedSizerObjects expected= " + expected);
+    LogWriterUtils.getLogWriter().info("testObjectSizerForHeapLRU_CustomizedSizerObjects expected= " + expected);
     assertEquals(expected, getSizeOfCustomizedObject(new TestNonSizerObject("1")));
     assertEquals(expected, ((PartitionedRegion)region).getEvictionController()
         .getLRUHelper().getStats().getCounter());
@@ -210,12 +207,12 @@ public class EvictionObjectSizerDUnitTest extends CacheTestCase {
       ds = getSystem(props);
       cache = CacheFactory.create(ds);
       assertNotNull(cache);
-      getLogWriter().info("cache= " + cache);
-      getLogWriter().info("cache closed= " + cache.isClosed());
+      LogWriterUtils.getLogWriter().info("cache= " + cache);
+      LogWriterUtils.getLogWriter().info("cache closed= " + cache.isClosed());
       cache.getResourceManager().setEvictionHeapPercentage(50);
     }
     catch (Exception e) {
-      fail("Failed while creating the cache", e);
+      Assert.fail("Failed while creating the cache", e);
     }
   }
 
@@ -264,7 +261,7 @@ public class EvictionObjectSizerDUnitTest extends CacheTestCase {
 
     region = cache.createRegion(regionName, factory.create());
     assertNotNull(region);
-    getLogWriter().info("Partitioned Region created Successfully :" + region);
+    LogWriterUtils.getLogWriter().info("Partitioned Region created Successfully :" + region);
   }
 
   /**
@@ -298,7 +295,7 @@ public class EvictionObjectSizerDUnitTest extends CacheTestCase {
         if (map == null || map.size() == 0) {
           continue;
         }
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Checking for entry in bucket region: " + bucketRegion);
         for (int counter = 1; counter <= noOfElememts; counter++) {
           assertEquals(entrySize, ((AbstractLRURegionEntry)map


[17/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/UniversalMembershipListenerAdapterDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/UniversalMembershipListenerAdapterDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/UniversalMembershipListenerAdapterDUnitTest.java
index ba642a1..c25a1db 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/UniversalMembershipListenerAdapterDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/UniversalMembershipListenerAdapterDUnitTest.java
@@ -49,9 +49,12 @@ import com.gemstone.gemfire.management.membership.ClientMembershipListener;
 import com.gemstone.gemfire.management.membership.MembershipEvent;
 import com.gemstone.gemfire.management.membership.MembershipListener;
 import com.gemstone.gemfire.management.membership.UniversalMembershipListenerAdapter;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Tests the UniversalMembershipListenerAdapter.
@@ -87,8 +90,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     InternalClientMembership.unregisterAllListeners();
   }
   
@@ -196,7 +198,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
 
     // duplicate join
     InternalClientMembership.notifyJoined(memberA, true);
-    pause(BRIEF_PAUSE_MILLIS);
+    Wait.pause(BRIEF_PAUSE_MILLIS);
     assertFalse(fired[JOINED]);
     assertNull(member[JOINED]);
     assertNull(memberId[JOINED]);
@@ -217,7 +219,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
 
     // duplicate left
     InternalClientMembership.notifyLeft(memberA, true);
-    pause(BRIEF_PAUSE_MILLIS);
+    Wait.pause(BRIEF_PAUSE_MILLIS);
     assertFalse(fired[LEFT]);
     assertNull(member[LEFT]);
     assertNull(memberId[LEFT]);
@@ -376,7 +378,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     final int[] ports = new int[1];
 
     // create BridgeServer in controller vm...
-    getLogWriter().info("[testLonerClientEventsInServer] Create BridgeServer");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] Create BridgeServer");
     getSystem();
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
@@ -390,9 +392,9 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     final DistributedMember serverMember = getDistributedMember();
     final Properties serverProperties = getSystem().getProperties();
 
-    getLogWriter().info("[testLonerClientEventsInServer] ports[0]=" + ports[0]);
-    getLogWriter().info("[testLonerClientEventsInServer] serverMemberId=" + serverMemberId);
-    getLogWriter().info("[testLonerClientEventsInServer] serverMember=" + serverMember);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] ports[0]=" + ports[0]);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] serverMemberId=" + serverMemberId);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] serverMember=" + serverMember);
 
     // register the bridge listener
     ClientMembership.registerClientMembershipListener(bridgeListener);
@@ -411,14 +413,14 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     new CacheSerializableRunnable("Create bridge client") {
       @Override
       public void run2() throws CacheException {
-        getLogWriter().info("[testLonerClientEventsInServer] create bridge client");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] create bridge client");
         Properties config = new Properties();
         config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
         config.setProperty(DistributionConfig.LOCATORS_NAME, "");
         getSystem(config);
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
-        ClientServerTestCase.configureConnectionPool(factory, getServerHostName(host), ports, false, -1, -1, null);
+        ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, false, -1, -1, null);
         createRegion(name, factory.create());
         assertNotNull(getRootRegion().getSubregion(name));
       }
@@ -444,7 +446,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
     }
     
-    getLogWriter().info("[testLonerClientEventsInServer] assert server detected client join");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] assert server detected client join");
     assertTrue(firedBridge[JOINED]);
     assertEquals(clientMember, memberBridge[JOINED]);
     //as of 6.1 the id can change when a bridge is created or a connection pool is created
@@ -490,7 +492,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
 
     vm0.invoke(new SerializableRunnable("Wait for client to fully connect") {
       public void run() {
-        getLogWriter().info("[testLonerClientEventsInServer] wait for client to fully connect");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] wait for client to fully connect");
         final String pl =
           getRootRegion().getSubregion(name).getAttributes().getPoolName();
         PoolImpl pi = (PoolImpl)PoolManager.find(pl);
@@ -500,7 +502,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     
     vm0.invoke(new SerializableRunnable("Close bridge client region") {
       public void run() {
-        getLogWriter().info("[testLonerClientEventsInServer] close bridge client region");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] close bridge client region");
         getRootRegion().getSubregion(name).close();
         PoolManager.close();
       }
@@ -517,7 +519,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
     }
     
-    getLogWriter().info("[testLonerClientEventsInServer] assert server detected client left");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] assert server detected client left");
 
     assertFalse(firedBridge[JOINED]);
     assertNull(memberIdBridge[JOINED]);
@@ -579,7 +581,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
     }
     
-    getLogWriter().info("[testLonerClientEventsInServer] assert server detected client re-join");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] assert server detected client re-join");
     assertTrue(firedBridge[JOINED]);
     assertEquals(clientMember, memberBridge[JOINED]);
     assertEquals(clientMemberId, memberIdBridge[JOINED]);
@@ -624,7 +626,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     
     vm0.invoke(new SerializableRunnable("Wait for client to fully connect") {
       public void run() {
-        getLogWriter().info("[testLonerClientEventsInServer] wait for client to fully connect");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] wait for client to fully connect");
         final String pl =
           getRootRegion().getSubregion(name).getAttributes().getPoolName();
         PoolImpl pi = (PoolImpl)PoolManager.find(pl);
@@ -636,7 +638,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     try {
       vm0.invoke(new SerializableRunnable("Stop bridge client") {
         public void run() {
-          getLogWriter().info("[testLonerClientEventsInServer] Stop bridge client");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] Stop bridge client");
           getRootRegion().getSubregion(name).close();
           PoolManager.close();
         }
@@ -653,7 +655,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
         }
       }
       
-    getLogWriter().info("[testLonerClientEventsInServer] assert server detected client crashed");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testLonerClientEventsInServer] assert server detected client crashed");
     assertFalse(firedBridge[JOINED]);
     assertNull(memberIdBridge[JOINED]);
     assertNull(memberBridge[JOINED]);
@@ -760,7 +762,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       new UniversalMembershipListenerAdapter() {
       @Override
       public synchronized void memberJoined(MembershipEvent event) {
-        getLogWriter().info("[doTestSystemClientEventsInServer] memberJoined >" + event.getMemberId() + "<");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] memberJoined >" + event.getMemberId() + "<");
         firedAdapterDuplicate[JOINED] = firedAdapter[JOINED];
         firedAdapter[JOINED] = true;
         memberAdapter[JOINED] = event.getDistributedMember();
@@ -773,7 +775,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
       @Override
       public synchronized void memberLeft(MembershipEvent event) {
-        getLogWriter().info("[doTestSystemClientEventsInServer] memberLeft >" + event.getMemberId() + "<");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] memberLeft >" + event.getMemberId() + "<");
         firedAdapterDuplicate[LEFT] = firedAdapter[LEFT];
         firedAdapter[LEFT] = true;
         memberAdapter[LEFT] = event.getDistributedMember();
@@ -786,7 +788,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
       @Override
       public synchronized void memberCrashed(MembershipEvent event) {
-        getLogWriter().info("[doTestSystemClientEventsInServer] memberCrashed >" + event.getMemberId() + "<");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] memberCrashed >" + event.getMemberId() + "<");
         firedAdapterDuplicate[CRASHED] = firedAdapter[CRASHED];
         firedAdapter[CRASHED] = true;
         memberAdapter[CRASHED] = event.getDistributedMember();
@@ -832,7 +834,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     final int[] ports = new int[1];
 
     // create BridgeServer in controller vm...
-    getLogWriter().info("[doTestSystemClientEventsInServer] Create BridgeServer");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] Create BridgeServer");
     getSystem();
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
@@ -852,9 +854,9 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     serverProperties.remove(DistributionConfig.SSL_PROTOCOLS_NAME);
     serverProperties.remove(DistributionConfig.SSL_REQUIRE_AUTHENTICATION_NAME);
 
-    getLogWriter().info("[doTestSystemClientEventsInServer] ports[0]=" + ports[0]);
-    getLogWriter().info("[doTestSystemClientEventsInServer] serverMemberId=" + serverMemberId);
-    getLogWriter().info("[doTestSystemClientEventsInServer] serverMember=" + serverMember);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] ports[0]=" + ports[0]);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] serverMemberId=" + serverMemberId);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] serverMember=" + serverMember);
 
     // register the bridge listener
     ClientMembership.registerClientMembershipListener(bridgeListener);
@@ -873,12 +875,12 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     new CacheSerializableRunnable("Create bridge client") {
       @Override
       public void run2() throws CacheException {
-        getLogWriter().info("[doTestSystemClientEventsInServer] create system bridge client");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] create system bridge client");
         assertTrue(getSystem(serverProperties).isConnected());
         assertFalse(getCache().isClosed());
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
-        ClientServerTestCase.configureConnectionPool(factory, getServerHostName(host), ports, false, -1, -1, null);
+        ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, false, -1, -1, null);
         createRegion(name, factory.create());
         assertNotNull(getRootRegion().getSubregion(name));
       }
@@ -908,7 +910,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
     }
     
-    getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client join");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client join");
     assertFalse(firedSystemDuplicate);
     assertFalse(firedAdapterDuplicate);
     assertFalse(firedBridgeDuplicate);
@@ -957,7 +959,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
 
     vm0.invoke(new SerializableRunnable("Wait for client to fully connect") {
       public void run() {
-        getLogWriter().info("[doTestSystemClientEventsInServer] wait for client to fully connect");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] wait for client to fully connect");
         final String pl =
           getRootRegion().getSubregion(name).getAttributes().getPoolName();
         PoolImpl pi = (PoolImpl)PoolManager.find(pl);
@@ -968,7 +970,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     // close bridge client region
     vm0.invoke(new SerializableRunnable("Close bridge client region") {
       public void run() {
-        getLogWriter().info("[doTestSystemClientEventsInServer] close bridge client region");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] close bridge client region");
         getRootRegion().getSubregion(name).close();
         PoolManager.close();
       }
@@ -985,7 +987,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
     }
     
-    getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client left");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client left");
     assertFalse(firedSystemDuplicate);
     assertFalse(firedAdapterDuplicate);
     assertFalse(firedBridgeDuplicate);
@@ -1050,7 +1052,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
     }
     
-    getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client re-join");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client re-join");
     assertFalse(firedSystemDuplicate);
     assertFalse(firedAdapterDuplicate);
     assertFalse(firedBridgeDuplicate);
@@ -1099,7 +1101,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     
     vm0.invoke(new SerializableRunnable("Wait for client to fully connect") {
       public void run() {
-        getLogWriter().info("[doTestSystemClientEventsInServer] wait for client to fully connect");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] wait for client to fully connect");
         final String pl =
           getRootRegion().getSubregion(name).getAttributes().getPoolName();
         PoolImpl pi = (PoolImpl)PoolManager.find(pl);
@@ -1110,7 +1112,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     // have bridge client disconnect from system
     vm0.invoke(new SerializableRunnable("Disconnect bridge client") {
       public void run() {
-        getLogWriter().info("[doTestSystemClientEventsInServer] disconnect bridge client");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] disconnect bridge client");
         closeCache();
         disconnectFromDS();
       }
@@ -1132,7 +1134,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
     }
     
-    getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client left");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client left");
     assertFalse(firedSystemDuplicate);
     assertFalse(firedAdapterDuplicate);
     assertFalse(firedBridgeDuplicate);
@@ -1202,7 +1204,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
     }
     
-    getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client re-join");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client re-join");
     assertFalse(firedSystemDuplicate);
     assertFalse(firedAdapterDuplicate);
     assertFalse(firedBridgeDuplicate);
@@ -1251,7 +1253,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     
     vm0.invoke(new SerializableRunnable("Wait for client to fully connect") {
       public void run() {
-        getLogWriter().info("[doTestSystemClientEventsInServer] wait for client to fully connect");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] wait for client to fully connect");
         final String pl =
           getRootRegion().getSubregion(name).getAttributes().getPoolName();
         PoolImpl pi = (PoolImpl)PoolManager.find(pl);
@@ -1264,7 +1266,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     try {
       vm0.invoke(new SerializableRunnable("Close bridge client region") {
         public void run() {
-          getLogWriter().info("[doTestSystemClientEventsInServer] close bridge client region");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] close bridge client region");
           getRootRegion().getSubregion(name).close();
           PoolManager.close();
         }
@@ -1281,7 +1283,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
         }
       }
       
-    getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client crashed");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] assert server detected client crashed");
     assertFalse(firedSystemDuplicate);
     assertFalse(firedAdapterDuplicate);
     assertFalse(firedBridgeDuplicate);
@@ -1335,11 +1337,11 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
    * Note: This probably won't work if the pool has more than one Endpoint.
    */
   protected void waitForClientToFullyConnect(final PoolImpl pool) {
-    getLogWriter().info("[waitForClientToFullyConnect]");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[waitForClientToFullyConnect]");
     final long failMillis = System.currentTimeMillis() + JOIN_FAIL_MILLIS;
     boolean fullyConnected = false;
     while (!fullyConnected) {
-      pause(100);
+      Wait.pause(100);
       fullyConnected = pool.getConnectionCount() >= pool.getMinConnections();
       assertTrue("Client failed to create "
                  + pool.getMinConnections()
@@ -1348,7 +1350,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
                  + " connections were created.",
                  System.currentTimeMillis() < failMillis);
     }
-    getLogWriter().info("[waitForClientToFullyConnect] fullyConnected=" + fullyConnected);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[waitForClientToFullyConnect] fullyConnected=" + fullyConnected);
   }
   
   /**
@@ -1459,7 +1461,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       new UniversalMembershipListenerAdapter() {
       @Override
       public synchronized void memberJoined(MembershipEvent event) {
-        getLogWriter().info("[testServerEventsInSystemClient] memberJoined >" + event.getMemberId() + "<");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInSystemClient] memberJoined >" + event.getMemberId() + "<");
         firedAdapterDuplicate[JOINED] = firedAdapter[JOINED];
         firedAdapter[JOINED] = true;
         memberAdapter[JOINED] = event.getDistributedMember();
@@ -1472,7 +1474,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
       @Override
       public synchronized void memberLeft(MembershipEvent event) {
-        getLogWriter().info("[testServerEventsInSystemClient] memberLeft >" + event.getMemberId() + "<");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInSystemClient] memberLeft >" + event.getMemberId() + "<");
         firedAdapterDuplicate[LEFT] = firedAdapter[LEFT];
         firedAdapter[LEFT] = true;
         memberAdapter[LEFT] = event.getDistributedMember();
@@ -1485,7 +1487,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
       @Override
       public synchronized void memberCrashed(MembershipEvent event) {
-        getLogWriter().info("[testServerEventsInSystemClient] memberCrashed >" + event.getMemberId() + "<");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInSystemClient] memberCrashed >" + event.getMemberId() + "<");
         firedAdapterDuplicate[CRASHED] = firedAdapter[CRASHED];
         firedAdapter[CRASHED] = true;
         memberAdapter[CRASHED] = event.getDistributedMember();
@@ -1533,7 +1535,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     assertTrue(ports[0] != 0);
         
  // create BridgeServer in controller vm...
-    getLogWriter().info("[doTestSystemClientEventsInServer] Create BridgeServer");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[doTestSystemClientEventsInServer] Create BridgeServer");
     getSystem();
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
@@ -1552,9 +1554,9 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     serverProperties.remove(DistributionConfig.SSL_PROTOCOLS_NAME);
     serverProperties.remove(DistributionConfig.SSL_REQUIRE_AUTHENTICATION_NAME);
     
-    getLogWriter().info("[testServerEventsInPeerSystem] ports[0]=" + ports[0]);
-    getLogWriter().info("[testServerEventsInPeerSystem] serverMemberId=" + serverMemberId);
-    getLogWriter().info("[testServerEventsInPeerSystem] serverMember=" + serverMember);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInPeerSystem] ports[0]=" + ports[0]);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInPeerSystem] serverMemberId=" + serverMemberId);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInPeerSystem] serverMember=" + serverMember);
     
     GemFireCacheImpl cache = GemFireCacheImpl.getExisting();
     assertNotNull(cache);
@@ -1570,7 +1572,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     new CacheSerializableRunnable("Create Peer Cache") {
       @Override
       public void run2() throws CacheException {
-        getLogWriter().info("[testServerEventsInPeerSystem] Create Peer cache");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInPeerSystem] Create Peer cache");
         getSystem(serverProperties);
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
@@ -1587,8 +1589,8 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     DistributedMember peerMember = (DistributedMember) vm0.invoke(
       UniversalMembershipListenerAdapterDUnitTest.class, "getDistributedMember");
 
-    getLogWriter().info("[testServerEventsInPeerSystem] peerMemberId=" + peerMemberId);
-    getLogWriter().info("[testServerEventsInPeerSystem] peerMember=" + peerMember);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInPeerSystem] peerMemberId=" + peerMemberId);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInPeerSystem] peerMember=" + peerMember);
 
     
 
@@ -1603,7 +1605,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
     }
     
-    getLogWriter().info("[testServerEventsInPeerSystem] assert server detected peer join");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInPeerSystem] assert server detected peer join");
     assertFalse(firedSystemDuplicate);
     // TODO: sometimes get adapter duplicate since memberId isn't endpoint
     // initial impl uses Endpoint.toString() for memberId of server; final
@@ -1646,12 +1648,12 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
           new LocalLogWriter(InternalLogWriter.ALL_LEVEL, System.out);
     bgexecLogger.info("<ExpectedException action=add>" + 
         "java.io.IOException" + "</ExpectedException>");
-    final ExpectedException ex = addExpectedException(
+    final IgnoredException ex = IgnoredException.addIgnoredException(
         ServerConnectivityException.class.getName());
     try {
       vm0.invoke(new SerializableRunnable("Disconnect Peer server") {
         public void run() {
-          getLogWriter().info("[testServerEventsInPeerSystem] disconnect peer server");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInPeerSystem] disconnect peer server");
           closeCache();
           disconnectFromDS();
         }
@@ -1674,7 +1676,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       ex.remove();
     }
 
-    getLogWriter().info("[testServerEventsInPeerSystem] assert server detected peer crashed");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInPeerSystem] assert server detected peer crashed");
     assertFalse(firedSystemDuplicate);
     // TODO: sometimes get adapter duplicate since memberId isn't endpoint
     // initial impl uses Endpoint.toString() for memberId of server; final
@@ -1755,7 +1757,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       new UniversalMembershipListenerAdapter() {
       @Override
       public synchronized void memberJoined(MembershipEvent event) {
-        getLogWriter().info("[testServerEventsInLonerClient] memberJoined >" + event.getMemberId() + "<");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] memberJoined >" + event.getMemberId() + "<");
         firedAdapterDuplicate[JOINED] = firedAdapter[JOINED];
         firedAdapter[JOINED] = true;
         memberAdapter[JOINED] = event.getDistributedMember();
@@ -1768,7 +1770,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
       @Override
       public synchronized void memberLeft(MembershipEvent event) {
-        getLogWriter().info("[testServerEventsInLonerClient] memberLeft >" + event.getMemberId() + "<");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] memberLeft >" + event.getMemberId() + "<");
         firedAdapterDuplicate[LEFT] = firedAdapter[LEFT];
         firedAdapter[LEFT] = true;
         memberAdapter[LEFT] = event.getDistributedMember();
@@ -1781,7 +1783,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
       @Override
       public synchronized void memberCrashed(MembershipEvent event) {
-        getLogWriter().info("[testServerEventsInLonerClient] memberCrashed >" + event.getMemberId() + "<");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] memberCrashed >" + event.getMemberId() + "<");
         firedAdapterDuplicate[CRASHED] = firedAdapter[CRASHED];
         firedAdapter[CRASHED] = true;
         memberAdapter[CRASHED] = event.getDistributedMember();
@@ -1828,13 +1830,13 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       { AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET) };
     assertTrue(ports[0] != 0);
 
-    getLogWriter().info("[testServerEventsInLonerClient] create loner bridge client");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] create loner bridge client");
     Properties config = new Properties();
     config.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
     config.setProperty(DistributionConfig.LOCATORS_NAME, "");
     getSystem(config);
         
-    getLogWriter().info("[testServerEventsInLonerClient] create system bridge client");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] create system bridge client");
     getSystem();
 
     // register the bridge listener
@@ -1850,7 +1852,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     new CacheSerializableRunnable("Create BridgeServer") {
       @Override
       public void run2() throws CacheException {
-        getLogWriter().info("[testServerEventsInLonerClient] Create BridgeServer");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] Create BridgeServer");
         getSystem();
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.LOCAL);
@@ -1861,7 +1863,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
           testServerEventsInLonerClient_port = startBridgeServer(ports[0]);
         }
         catch (IOException e) {
-          getLogWriter().error(e);
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().error(e);
           fail(e.getMessage());
         }
       }
@@ -1878,14 +1880,14 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     DistributedMember serverMember = (DistributedMember) vm0.invoke(
       UniversalMembershipListenerAdapterDUnitTest.class, "getDistributedMember");
 
-    getLogWriter().info("[testServerEventsInLonerClient] ports[0]=" + ports[0]);
-    getLogWriter().info("[testServerEventsInLonerClient] serverMemberId=" + serverMemberId);
-    getLogWriter().info("[testServerEventsInLonerClient] serverMember=" + serverMember);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] ports[0]=" + ports[0]);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] serverMemberId=" + serverMemberId);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] serverMember=" + serverMember);
 
     // create region which connects to bridge server
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.LOCAL);
-    ClientServerTestCase.configureConnectionPool(factory, getServerHostName(host), ports, false, -1, -1, null);
+    ClientServerTestCase.configureConnectionPool(factory, NetworkUtils.getServerHostName(host), ports, false, -1, -1, null);
     createRegion(name, factory.create());
     assertNotNull(getRootRegion().getSubregion(name));
 
@@ -1900,7 +1902,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
     }
     
-    getLogWriter().info("[testServerEventsInLonerClient] assert client detected server join");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] assert client detected server join");
     // TODO: sometimes get adapter duplicate since memberId isn't endpoint KIRK
     // initial impl uses Endpoint.toString() for memberId of server; final
     // impl should have server send its real memberId to client via HandShake
@@ -1940,7 +1942,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     assertFalse(isClientAdapter[CRASHED]);
     resetArraysForTesting(firedAdapter, memberAdapter, memberIdAdapter, isClientAdapter);
 
-    getLogWriter().info("[testServerEventsInLonerClient] wait for client to fully connect");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] wait for client to fully connect");
     final String pl =
       getRootRegion().getSubregion(name).getAttributes().getPoolName();
     PoolImpl pi = (PoolImpl)PoolManager.find(pl);
@@ -1969,7 +1971,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     try {
       vm0.invoke(new SerializableRunnable("Disconnect bridge server") {
         public void run() {
-          getLogWriter().info("[testServerEventsInLonerClient] disconnect bridge server");
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] disconnect bridge server");
           closeCache();
         }
       });
@@ -1993,7 +1995,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       lw.info(removeExpected2);
     }
     
-    getLogWriter().info("[testServerEventsInLonerClient] assert client detected server crashed");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] assert client detected server crashed");
     // TODO: sometimes get adapter duplicate since memberId isn't endpoint KIRK
     // initial impl uses Endpoint.toString() for memberId of server; final
     // impl should have server send its real memberId to client via HandShake
@@ -2049,9 +2051,9 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
     serverMember = (DistributedMember) vm0.invoke(
       UniversalMembershipListenerAdapterDUnitTest.class, "getDistributedMember");
 
-    getLogWriter().info("[testServerEventsInLonerClient] ports[0]=" + ports[0]);
-    getLogWriter().info("[testServerEventsInLonerClient] serverMemberId=" + serverMemberId);
-    getLogWriter().info("[testServerEventsInLonerClient] serverMember=" + serverMember);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] ports[0]=" + ports[0]);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] serverMemberId=" + serverMemberId);
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] serverMember=" + serverMember);
                                                 
     synchronized(adapter) {
       if (!firedAdapter[JOINED]) {
@@ -2064,7 +2066,7 @@ public class UniversalMembershipListenerAdapterDUnitTest extends ClientServerTes
       }
     }
     
-    getLogWriter().info("[testServerEventsInLonerClient] assert client detected server re-join");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("[testServerEventsInLonerClient] assert client detected server re-join");
     // TODO: sometimes get adapter duplicate since memberId isn't endpoint KIRK
     // initial impl uses Endpoint.toString() for memberId of server; final
     // impl should have server send its real memberId to client via HandShake

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/DistributedSystemStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/DistributedSystemStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/DistributedSystemStatsDUnitTest.java
index b7b3a70..1de93ee 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/DistributedSystemStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/bean/stats/DistributedSystemStatsDUnitTest.java
@@ -29,6 +29,7 @@ import com.gemstone.gemfire.management.MemberMXBean;
 import com.gemstone.gemfire.management.internal.SystemManagementService;
 import com.gemstone.gemfire.management.internal.beans.MemberMBean;
 import com.gemstone.gemfire.management.internal.beans.MemberMBeanBridge;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -47,10 +48,6 @@ public class DistributedSystemStatsDUnitTest extends ManagementTestBase{
     super.setUp();
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-  
   public void testDistributedSystemStats() throws Exception {
     initManagement(true);
 
@@ -99,9 +96,9 @@ public class DistributedSystemStatsDUnitTest extends ManagementTestBase{
             MemberMXBean memberBean = service.getMBeanProxy(memberMBeanName, MemberMXBean.class);
             waitForRefresh(2, memberMBeanName);
           } catch (NullPointerException e) {
-            fail("FAILED WITH EXCEPION", e);
+            Assert.fail("FAILED WITH EXCEPION", e);
           } catch (Exception e) {
-            fail("FAILED WITH EXCEPION", e);
+            Assert.fail("FAILED WITH EXCEPION", e);
           }
         }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/CliUtilDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/CliUtilDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/CliUtilDUnitTest.java
index 5c68915..192b458 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/CliUtilDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/CliUtilDUnitTest.java
@@ -37,11 +37,14 @@ import com.gemstone.gemfire.management.DistributedRegionMXBean;
 import com.gemstone.gemfire.management.ManagementService;
 import com.gemstone.gemfire.management.RegionMXBean;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResultException;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * 
@@ -73,9 +76,9 @@ public class CliUtilDUnitTest extends CacheTestCase {
   
   private static final long serialVersionUID = 1L;
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
     destroySetup();
-    super.tearDown2();
   }
   
   
@@ -162,22 +165,22 @@ public class CliUtilDUnitTest extends CacheTestCase {
             checkBean(REGION_MEMBER1_GROUP2,1) &&
             checkBean(REGION_MEMBER2_GROUP2,1) ;                    
         if(!flag){
-          getLogWriter().info("Still probing for mbeans");
+          LogWriterUtils.getLogWriter().info("Still probing for mbeans");
           return false; 
         }
         else{
-          getLogWriter().info("All distributed region mbeans are federated to manager.");
+          LogWriterUtils.getLogWriter().info("All distributed region mbeans are federated to manager.");
           return true;
         }
       }
       private boolean checkBean(String string, int memberCount) {
         DistributedRegionMXBean bean2 = service.getDistributedRegionMXBean(Region.SEPARATOR+string);        
-        getLogWriter().info("DistributedRegionMXBean for region=" + string + " is " + bean2);
+        LogWriterUtils.getLogWriter().info("DistributedRegionMXBean for region=" + string + " is " + bean2);
         if(bean2==null)
           return false;
         else{
           int members = bean2.getMemberCount();
-          getLogWriter().info("DistributedRegionMXBean for region=" + string + " is aggregated for " + memberCount + " expected count=" + memberCount);
+          LogWriterUtils.getLogWriter().info("DistributedRegionMXBean for region=" + string + " is aggregated for " + memberCount + " expected count=" + memberCount);
           if(members<memberCount){            
             return false;
           }
@@ -192,8 +195,8 @@ public class CliUtilDUnitTest extends CacheTestCase {
       }
     };
     
-    DistributedTestCase.waitForCriterion(waitForMaangerMBean, 120000, 2000, true);  
-    getLogWriter().info("Manager federation is complete");
+    Wait.waitForCriterion(waitForMaangerMBean, 120000, 2000, true);  
+    LogWriterUtils.getLogWriter().info("Manager federation is complete");
   }
   
   private void registerFunction() {    
@@ -209,7 +212,7 @@ public class CliUtilDUnitTest extends CacheTestCase {
     assertNotNull(service.getMemberMXBean());
     RegionMXBean bean = service.getLocalRegionMBean(Region.SEPARATOR+regionName); 
     assertNotNull(bean);
-    getLogWriter().info("Created region=" + regionName + " Bean=" + bean);
+    LogWriterUtils.getLogWriter().info("Created region=" + regionName + " Bean=" + bean);
     return region;
   }
   
@@ -221,7 +224,7 @@ public class CliUtilDUnitTest extends CacheTestCase {
     localProps.setProperty(DistributionConfig.JMX_MANAGER_START_NAME, "false");    
     int jmxPort = AvailablePortHelper.getRandomAvailableTCPPort();
     localProps.setProperty(DistributionConfig.JMX_MANAGER_PORT_NAME, ""+jmxPort);
-    getLogWriter().info("Set jmx-port="+ jmxPort);
+    LogWriterUtils.getLogWriter().info("Set jmx-port="+ jmxPort);
     getSystem(localProps);
     getCache();
     final ManagementService service = ManagementService.getManagementService(getCache());
@@ -240,7 +243,7 @@ public class CliUtilDUnitTest extends CacheTestCase {
     
     final VM vm1 = Host.getHost(0).getVM(0);
     
-    getLogWriter().info("testFor - findAllMatchingMembers");
+    LogWriterUtils.getLogWriter().info("testFor - findAllMatchingMembers");
     vm1.invoke(new SerializableRunnable() {      
       @Override
       public void run() {
@@ -256,7 +259,7 @@ public class CliUtilDUnitTest extends CacheTestCase {
       }
     });
     
-    getLogWriter().info("testFor - getDistributedMemberByNameOrId");
+    LogWriterUtils.getLogWriter().info("testFor - getDistributedMemberByNameOrId");
     vm1.invoke(new SerializableRunnable() {      
       @Override
       public void run() {
@@ -264,7 +267,7 @@ public class CliUtilDUnitTest extends CacheTestCase {
       }     
     });
     
-    getLogWriter().info("testFor - executeFunction");
+    LogWriterUtils.getLogWriter().info("testFor - executeFunction");
     vm1.invoke(new SerializableRunnable() {      
       @Override
       public void run() {
@@ -272,7 +275,7 @@ public class CliUtilDUnitTest extends CacheTestCase {
       }     
     });
     
-    getLogWriter().info("testFor - getRegionAssociatedMembers");
+    LogWriterUtils.getLogWriter().info("testFor - getRegionAssociatedMembers");
     vm1.invoke(new SerializableRunnable() {      
       @Override
       public void run() {
@@ -314,7 +317,7 @@ public class CliUtilDUnitTest extends CacheTestCase {
       assertEquals(true,containsMember(set,MEMBER_2_GROUP2));
       
     } catch (CommandResultException e) {     
-      fail("CliUtil failed with exception",e);
+      Assert.fail("CliUtil failed with exception",e);
     }
   }
   
@@ -355,7 +358,7 @@ public class CliUtilDUnitTest extends CacheTestCase {
       assertEquals("executeOnGroup", region1.get(MEMBER_1_GROUP1));
       assertEquals("executeOnGroup", region1.get(MEMBER_2_GROUP1));
     } catch (CommandResultException e) {
-      fail("Error during querying members",e);
+      Assert.fail("Error during querying members",e);
     }        
   }
   
@@ -418,7 +421,7 @@ public class CliUtilDUnitTest extends CacheTestCase {
       Region region = cache.getRegion(COMMON_REGION);
       String id = cache.getDistributedSystem().getDistributedMember().getName();
       region.put(id, object);
-      getLogWriter().info("Completed executeFunction on member : " + id);
+      LogWriterUtils.getLogWriter().info("Completed executeFunction on member : " + id);
       context.getResultSender().lastResult(true);
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CliCommandTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CliCommandTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CliCommandTestBase.java
index 4d8f8ff..664e7a6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CliCommandTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CliCommandTestBase.java
@@ -29,6 +29,7 @@ import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
@@ -80,9 +81,12 @@ public class CliCommandTestBase extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    preTearDownCliCommandTestBase();
     destroyDefaultSetup();
-    super.tearDown2();
+  }
+  
+  protected void preTearDownCliCommandTestBase() throws Exception {
   }
 
   /**
@@ -544,18 +548,18 @@ public class CliCommandTestBase extends CacheTestCase {
   }
 
   protected void info(String string) {
-    getLogWriter().info(string);
+    LogWriterUtils.getLogWriter().info(string);
   }
 
   protected void debug(String string) {
-    getLogWriter().fine(string);
+    LogWriterUtils.getLogWriter().fine(string);
   }
 
   protected void error(String string) {
-    getLogWriter().error(string);
+    LogWriterUtils.getLogWriter().error(string);
   }
 
   protected void error(String string, Throwable e) {
-    getLogWriter().error(string, e);
+    LogWriterUtils.getLogWriter().error(string, e);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ConfigCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ConfigCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ConfigCommandsDUnitTest.java
index 37f7520..dca0f78 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ConfigCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ConfigCommandsDUnitTest.java
@@ -35,11 +35,15 @@ import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.remote.CommandProcessor;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import org.apache.commons.io.FileUtils;
 
@@ -79,20 +83,20 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
     super(name);
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected void preTearDownCliCommandTestBase() throws Exception {
     deleteTestFiles();
-    invokeInEveryVM(new SerializableRunnable() {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
 
       @Override
       public void run() {
         try {
           deleteTestFiles();
         } catch (IOException e) {
-          fail("error", e);
+          Assert.fail("error", e);
         }
       }
     });
-    super.tearDown2();
   }
 
   public void testDescribeConfig() throws ClassNotFoundException, IOException {
@@ -121,10 +125,10 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
     RuntimeMXBean runtimeBean = ManagementFactory.getRuntimeMXBean();
     List<String> jvmArgs = runtimeBean.getInputArguments();
 
-    getLogWriter().info("#SB Actual JVM Args : ");
+    LogWriterUtils.getLogWriter().info("#SB Actual JVM Args : ");
 
     for (String jvmArg : jvmArgs) {
-      getLogWriter().info("#SB JVM " + jvmArg);
+      LogWriterUtils.getLogWriter().info("#SB JVM " + jvmArg);
     }
 
     InternalDistributedSystem system = (InternalDistributedSystem) cache.getDistributedSystem();
@@ -138,7 +142,7 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
     CommandResult cmdResult = executeCommand(command);
 
     String resultStr = commandResultToString(cmdResult);
-    getLogWriter().info("#SB Hiding the defaults\n" + resultStr);
+    LogWriterUtils.getLogWriter().info("#SB Hiding the defaults\n" + resultStr);
 
     assertEquals(true, cmdResult.getStatus().equals(Status.OK));
     assertEquals(true, resultStr.contains("G1"));
@@ -148,7 +152,7 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
 
     cmdResult = executeCommand(command + " --" + CliStrings.DESCRIBE_CONFIG__HIDE__DEFAULTS + "=false");
     resultStr = commandResultToString(cmdResult);
-    getLogWriter().info("#SB No hiding of defaults\n" + resultStr);
+    LogWriterUtils.getLogWriter().info("#SB No hiding of defaults\n" + resultStr);
 
     assertEquals(true, cmdResult.getStatus().equals(Status.OK));
     assertEquals(true, resultStr.contains("is-server"));
@@ -250,7 +254,7 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
       FileReader reader = new FileReader(shellConfigFile);
       reader.read(fileContents);
     } catch (Exception ex) {
-      fail("Unable to read file contents for comparison", ex);
+      Assert.fail("Unable to read file contents for comparison", ex);
     }
 
     assertEquals(configToMatch, new String(fileContents));
@@ -277,8 +281,8 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
     csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT, "10");
     CommandResult cmdResult = executeCommand(csb.getCommandString());
     String resultString = commandResultToString(cmdResult);
-    getLogWriter().info("Result\n");
-    getLogWriter().info(resultString);
+    LogWriterUtils.getLogWriter().info("Result\n");
+    LogWriterUtils.getLogWriter().info(resultString);
     assertEquals(true, cmdResult.getStatus().equals(Status.OK));
     assertEquals(LogWriterImpl.INFO_LEVEL, config.getLogLevel());
     assertEquals(50, config.getLogFileSizeLimit());
@@ -316,8 +320,8 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
     CommandStringBuilder csb = new CommandStringBuilder(CliStrings.ALTER_RUNTIME_CONFIG);
     CommandResult cmdResult = executeCommand(csb.getCommandString());
     String resultAsString = commandResultToString(cmdResult);
-    getLogWriter().info("#SB Result\n");
-    getLogWriter().info(resultAsString);
+    LogWriterUtils.getLogWriter().info("#SB Result\n");
+    LogWriterUtils.getLogWriter().info(resultAsString);
     assertEquals(true, cmdResult.getStatus().equals(Status.ERROR));
     assertTrue(resultAsString.contains(CliStrings.ALTER_RUNTIME_CONFIG__RELEVANT__OPTION__MESSAGE));
 
@@ -325,8 +329,8 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
     csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT, "2000000000");
     cmdResult = executeCommand(csb.getCommandString());
     resultAsString = commandResultToString(cmdResult);
-    getLogWriter().info("#SB Result\n");
-    getLogWriter().info(resultAsString);
+    LogWriterUtils.getLogWriter().info("#SB Result\n");
+    LogWriterUtils.getLogWriter().info(resultAsString);
     assertEquals(true, cmdResult.getStatus().equals(Status.ERROR));
 
   }
@@ -361,8 +365,8 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
     csb.addOption(CliStrings.ALTER_RUNTIME_CONFIG__LOG__DISK__SPACE__LIMIT, "10");
     CommandResult cmdResult = executeCommand(csb.getCommandString());
     String resultString = commandResultToString(cmdResult);
-    getLogWriter().info("#SB Result\n");
-    getLogWriter().info(resultString);
+    LogWriterUtils.getLogWriter().info("#SB Result\n");
+    LogWriterUtils.getLogWriter().info(resultString);
     assertEquals(true, cmdResult.getStatus().equals(Status.OK));
     assertEquals(LogWriterImpl.INFO_LEVEL, config.getLogLevel());
     assertEquals(50, config.getLogFileSizeLimit());
@@ -415,7 +419,7 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
               locatorProps);
 
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -426,7 +430,7 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+          Wait.waitForCriterion(wc, 5000, 500, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }
@@ -477,7 +481,7 @@ public class ConfigCommandsDUnitTest extends CliCommandTestBase {
           gemfireProperties = sharedConfig.getConfiguration(groupName).getGemfireProperties();
           assertEquals("fine", gemfireProperties.get(DistributionConfig.LOG_LEVEL_NAME));
         } catch (Exception e) {
-          fail("Error occurred in cluster configuration service", e);
+          Assert.fail("Error occurred in cluster configuration service", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java
index 9c0fa21..9f6b141 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/CreateAlterDestroyRegionCommandsDUnitTest.java
@@ -40,11 +40,14 @@ import com.gemstone.gemfire.management.internal.ManagementConstants;
 import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import javax.management.MBeanServer;
 import javax.management.MalformedObjectNameException;
@@ -221,7 +224,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
     Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
       @Override
       public void run() {
-        DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+        WaitCriterion wc = new WaitCriterion() {
           @Override
           public boolean done() {
             try {
@@ -231,7 +234,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
               ObjectName queryExpON = new ObjectName(queryExp);
               return !(mbeanServer.queryNames(null, queryExpON).isEmpty());
             } catch (MalformedObjectNameException mone) {
-              getLogWriter().error(mone);
+              LogWriterUtils.getLogWriter().error(mone);
               fail(mone.getMessage());
               return false;
             }
@@ -243,42 +246,42 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
           }
         };
 
-        DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+        Wait.waitForCriterion(wc, 5000, 500, true);
       }
     });
 
     // Test failure when region not found
     String command = "destroy region --name=DOESNOTEXIST";
-    getLogWriter().info("testDestroyRegion command=" + command);
+    LogWriterUtils.getLogWriter().info("testDestroyRegion command=" + command);
     CommandResult cmdResult = executeCommand(command);
     String strr = commandResultToString(cmdResult);
-    getLogWriter().info("testDestroyRegion strr=" + strr);
+    LogWriterUtils.getLogWriter().info("testDestroyRegion strr=" + strr);
     assertTrue(stringContainsLine(strr, "Could not find.*\"DOESNOTEXIST\".*"));
     assertEquals(Result.Status.ERROR, cmdResult.getStatus());
 
     // Test unable to destroy with co-location
     command = "destroy region --name=/Customer";
-    getLogWriter().info("testDestroyRegion command=" + command);
+    LogWriterUtils.getLogWriter().info("testDestroyRegion command=" + command);
     cmdResult = executeCommand(command);
     strr = commandResultToString(cmdResult);
-    getLogWriter().info("testDestroyRegion strr=" + strr);
+    LogWriterUtils.getLogWriter().info("testDestroyRegion strr=" + strr);
     assertEquals(Result.Status.ERROR, cmdResult.getStatus());
 
     // Test success
     command = "destroy region --name=/Order";
-    getLogWriter().info("testDestroyRegion command=" + command);
+    LogWriterUtils.getLogWriter().info("testDestroyRegion command=" + command);
     cmdResult = executeCommand(command);
     strr = commandResultToString(cmdResult);
     assertTrue(stringContainsLine(strr, ".*Order.*destroyed successfully.*"));
-    getLogWriter().info("testDestroyRegion strr=" + strr);
+    LogWriterUtils.getLogWriter().info("testDestroyRegion strr=" + strr);
     assertEquals(Result.Status.OK, cmdResult.getStatus());
 
     command = "destroy region --name=/Customer";
-    getLogWriter().info("testDestroyRegion command=" + command);
+    LogWriterUtils.getLogWriter().info("testDestroyRegion command=" + command);
     cmdResult = executeCommand(command);
     strr = commandResultToString(cmdResult);
     assertTrue(stringContainsLine(strr, ".*Customer.*destroyed successfully.*"));
-    getLogWriter().info("testDestroyRegion strr=" + strr);
+    LogWriterUtils.getLogWriter().info("testDestroyRegion strr=" + strr);
     assertEquals(Result.Status.OK, cmdResult.getStatus());
   }
 
@@ -287,19 +290,19 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
     createDefaultSetup(null);
     String command = CliStrings.CREATE_REGION + " --" + CliStrings.CREATE_REGION__REGION + "=" + this.region46391 + " --" + CliStrings.CREATE_REGION__REGIONSHORTCUT + "=REPLICATE";
 
-    getLogWriter().info("testCreateRegion46391 create region command=" + command);
+    LogWriterUtils.getLogWriter().info("testCreateRegion46391 create region command=" + command);
 
     CommandResult cmdResult = executeCommand(command);
     assertEquals(Result.Status.OK, cmdResult.getStatus());
 
     command = CliStrings.PUT + " --" + CliStrings.PUT__KEY + "=k1" + " --" + CliStrings.PUT__VALUE + "=k1" + " --" + CliStrings.PUT__REGIONNAME + "=" + this.region46391;
 
-    getLogWriter().info("testCreateRegion46391 put command=" + command);
+    LogWriterUtils.getLogWriter().info("testCreateRegion46391 put command=" + command);
 
     CommandResult cmdResult2 = executeCommand(command);
     assertEquals(Result.Status.OK, cmdResult2.getStatus());
 
-    getLogWriter().info("testCreateRegion46391  cmdResult2=" + commandResultToString(cmdResult2));
+    LogWriterUtils.getLogWriter().info("testCreateRegion46391  cmdResult2=" + commandResultToString(cmdResult2));
     String str1 = "Result      : true";
     String str2 = "Key         : k1";
     String str3 = "Key Class   : java.lang.String";
@@ -751,7 +754,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
               locatorProps);
 
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -762,7 +765,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+          Wait.waitForCriterion(wc, 5000, 500, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }
@@ -802,7 +805,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
     Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
       @Override
       public void run() {
-        DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+        WaitCriterion wc = new WaitCriterion() {
           @Override
           public boolean done() {
             try {
@@ -812,7 +815,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
               ObjectName queryExpON = new ObjectName(queryExp);
               return !(mbeanServer.queryNames(null, queryExpON).isEmpty());
             } catch (MalformedObjectNameException mone) {
-              getLogWriter().error(mone);
+              LogWriterUtils.getLogWriter().error(mone);
               fail(mone.getMessage());
               return false;
             }
@@ -824,7 +827,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
           }
         };
 
-        DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+        Wait.waitForCriterion(wc, 5000, 500, true);
       }
     });
 
@@ -836,7 +839,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
         try {
           assertTrue(sharedConfig.getConfiguration(groupName).getCacheXmlContent().contains(regionName));
         } catch (Exception e) {
-          fail("Error in cluster configuration service", e);
+          Assert.fail("Error in cluster configuration service", e);
         }
       }
     });
@@ -937,7 +940,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
               locatorProps);
 
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -948,7 +951,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+          Wait.waitForCriterion(wc, 5000, 500, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }
@@ -988,7 +991,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
     Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
       @Override
       public void run() {
-        DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+        WaitCriterion wc = new WaitCriterion() {
           @Override
           public boolean done() {
             try {
@@ -998,7 +1001,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
               ObjectName queryExpON = new ObjectName(queryExp);
               return !(mbeanServer.queryNames(null, queryExpON).isEmpty());
             } catch (MalformedObjectNameException mone) {
-              getLogWriter().error(mone);
+              LogWriterUtils.getLogWriter().error(mone);
               fail(mone.getMessage());
               return false;
             }
@@ -1010,7 +1013,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
           }
         };
 
-        DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+        Wait.waitForCriterion(wc, 5000, 500, true);
       }
     });
 
@@ -1031,7 +1034,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
     commandStringBuilder = new CommandStringBuilder(CliStrings.DESTROY_REGION);
     commandStringBuilder.addOption(CliStrings.DESTROY_REGION__REGION, regionName);
     cmdResult = executeCommand(commandStringBuilder.toString());
-    getLogWriter().info("#SB" + commandResultToString(cmdResult));
+    LogWriterUtils.getLogWriter().info("#SB" + commandResultToString(cmdResult));
     assertEquals(Result.Status.OK, cmdResult.getStatus());
 
     // Make sure the region was removed from the shared config
@@ -1075,7 +1078,7 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCliCommandTestBase() throws Exception {
     for (String path : this.filesToBeDeleted) {
       try {
         final File fileToDelete = new File(path);
@@ -1084,11 +1087,10 @@ public class CreateAlterDestroyRegionCommandsDUnitTest extends CliCommandTestBas
           executeCommand("undeploy --jar=" + fileToDelete.getName());
         }
       } catch (IOException e) {
-        getLogWriter().error("Unable to delete file", e);
+        LogWriterUtils.getLogWriter().error("Unable to delete file", e);
       }
     }
     this.filesToBeDeleted.clear();
-    super.tearDown2();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DeployCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DeployCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DeployCommandsDUnitTest.java
index 2e2a2d5..21e44b2 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DeployCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DeployCommandsDUnitTest.java
@@ -29,10 +29,12 @@ import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.remote.CommandExecutionContext;
 import com.gemstone.gemfire.management.internal.cli.remote.CommandProcessor;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import java.io.File;
 import java.io.FilenameFilter;
@@ -76,7 +78,7 @@ public class DeployCommandsDUnitTest extends CliCommandTestBase {
 
   @SuppressWarnings("serial")
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCliCommandTestBase() throws Exception {
     Host.getHost(0).getVM(1).invoke(new SerializableRunnable() {
       public void run() {
         DistributionManager.isDedicatedAdminVM = false;
@@ -89,8 +91,6 @@ public class DeployCommandsDUnitTest extends CliCommandTestBase {
       }
     });
     deleteSavedJarFiles();
-
-    super.tearDown2();
   }
 
   @SuppressWarnings("serial")
@@ -346,7 +346,7 @@ public class DeployCommandsDUnitTest extends CliCommandTestBase {
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
               locatorProps);
 
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -357,7 +357,7 @@ public class DeployCommandsDUnitTest extends CliCommandTestBase {
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+          Wait.waitForCriterion(wc, 5000, 500, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }
@@ -412,7 +412,7 @@ public class DeployCommandsDUnitTest extends CliCommandTestBase {
         try {
           assertTrue(sharedConfig.getConfiguration(groupName).getJarNames().contains("DeployCommandsDUnit1.jar"));
         } catch (Exception e) {
-          fail("Error occurred in cluster configuration service", e);
+          Assert.fail("Error occurred in cluster configuration service", e);
         }
       }
     });
@@ -445,7 +445,7 @@ public class DeployCommandsDUnitTest extends CliCommandTestBase {
         try {
           assertFalse(sharedConfig.getConfiguration(groupName).getJarNames().contains("DeployCommandsDUnit1.jar"));
         } catch (Exception e) {
-          fail("Error occurred in cluster configuration service", e);
+          Assert.fail("Error occurred in cluster configuration service", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DiskStoreCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DiskStoreCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DiskStoreCommandsDUnitTest.java
index b32a3a0..826f128 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DiskStoreCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/DiskStoreCommandsDUnitTest.java
@@ -46,11 +46,14 @@ import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import org.junit.Test;
 
@@ -198,7 +201,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
             return "Waiting for another persistent member to come online";
           }
         };
-        waitForCriterion(waitCriterion, 70000, 100, true);
+        Wait.waitForCriterion(waitCriterion, 70000, 100, true);
       }
     });
 
@@ -369,16 +372,16 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
       }
     });
     String command = "validate offline-disk-store --name=" + diskStoreName1 + " --disk-dirs=" + diskStoreDir.getAbsolutePath();
-    getLogWriter().info("testValidateDiskStore command: " + command);
+    LogWriterUtils.getLogWriter().info("testValidateDiskStore command: " + command);
     CommandResult cmdResult = executeCommand(command);
     if (cmdResult != null) {
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testValidateDiskStore cmdResult is stringResult " + stringResult);
+      LogWriterUtils.getLogWriter().info("testValidateDiskStore cmdResult is stringResult " + stringResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
       assertTrue(stringResult.contains("Total number of region entries in this disk store is"));
 
     } else {
-      getLogWriter().info("testValidateDiskStore cmdResult is null");
+      LogWriterUtils.getLogWriter().info("testValidateDiskStore cmdResult is null");
       fail("Did not get CommandResult in testValidateDiskStore");
     }
   }
@@ -423,7 +426,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
       }
     });
     String command = "export offline-disk-store --name=" + diskStoreName1 + " --disk-dirs=" + diskStoreDir.getAbsolutePath() + " --dir=" + exportDir;
-    getLogWriter().info("testExportDiskStore command" + command);
+    LogWriterUtils.getLogWriter().info("testExportDiskStore command" + command);
     CommandResult cmdResult = executeCommand(command);
     if (cmdResult != null) {
       assertEquals(Result.Status.OK, cmdResult.getStatus());
@@ -432,7 +435,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
       SnapshotTestUtil.checkSnapshotEntries(exportDir, entries, diskStoreName1, region2);
 
     } else {
-      getLogWriter().info("testExportOfflineDiskStore cmdResult is null");
+      LogWriterUtils.getLogWriter().info("testExportOfflineDiskStore cmdResult is null");
       fail("Did not get CommandResult in testExportOfflineDiskStore");
     }
   }
@@ -462,7 +465,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
               locatorProps);
 
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -473,7 +476,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+          Wait.waitForCriterion(wc, 5000, 500, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }
@@ -522,7 +525,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
           xmlFromConfig = sharedConfig.getConfiguration(groupName).getCacheXmlContent();
           assertTrue(xmlFromConfig.contains(diskStoreName));
         } catch (Exception e) {
-          fail("Error occurred in cluster configuration service", e);
+          Assert.fail("Error occurred in cluster configuration service", e);
         }
       }
     });
@@ -573,7 +576,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
           xmlFromConfig = sharedConfig.getConfiguration(groupName).getCacheXmlContent();
           assertFalse(xmlFromConfig.contains(diskStoreName));
         } catch (Exception e) {
-          fail("Error occurred in cluster configuration service", e);
+          Assert.fail("Error occurred in cluster configuration service", e);
         }
       }
     });
@@ -664,7 +667,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
 
     CommandResult cmdResult = executeCommand(commandString);
     String resultString = commandResultToString(cmdResult);
-    getLogWriter().info("#SB command output : \n" + resultString);
+    LogWriterUtils.getLogWriter().info("#SB command output : \n" + resultString);
     assertEquals(true, Result.Status.OK.equals(cmdResult.getStatus()));
     assertEquals(true, resultString.contains("concurrencyLevel=5"));
     assertEquals(true, resultString.contains("lruAction=local-destroy"));
@@ -695,7 +698,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
 
     cmdResult = executeCommand(commandString);
     resultString = commandResultToString(cmdResult);
-    getLogWriter().info("command output : \n" + resultString);
+    LogWriterUtils.getLogWriter().info("command output : \n" + resultString);
     assertEquals(true, Result.Status.OK.equals(cmdResult.getStatus()));
 
     Object postDestroyValue = vm1.invoke(new SerializableCallable() {
@@ -722,7 +725,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
 
     cmdResult = executeCommand(commandString);
     resultString = commandResultToString(cmdResult);
-    getLogWriter().info("Alter DiskStore with wrong remove option  : \n" + resultString);
+    LogWriterUtils.getLogWriter().info("Alter DiskStore with wrong remove option  : \n" + resultString);
     assertEquals(true, Result.Status.ERROR.equals(cmdResult.getStatus()));
 
     filesToBeDeleted.add(diskDirName);
@@ -789,7 +792,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
 
     CommandResult cmdResult = executeCommand(commandString);
     String resultAsString = commandResultToString(cmdResult);
-    getLogWriter().info("Result from full backup : \n" + resultAsString);
+    LogWriterUtils.getLogWriter().info("Result from full backup : \n" + resultAsString);
     assertEquals(Result.Status.OK, cmdResult.getStatus());
     assertEquals(true, resultAsString.contains("Manager"));
     assertEquals(true, resultAsString.contains(vm1Name));
@@ -819,7 +822,7 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
 
     cmdResult = executeCommand(csb.toString());
     resultAsString = commandResultToString(cmdResult);
-    getLogWriter().info("Result from incremental backup : \n" + resultAsString);
+    LogWriterUtils.getLogWriter().info("Result from incremental backup : \n" + resultAsString);
     assertEquals(Result.Status.OK, cmdResult.getStatus());
 
     assertEquals(true, resultAsString.contains("Manager"));
@@ -1141,15 +1144,14 @@ public class DiskStoreCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCliCommandTestBase() throws Exception {
     for (String path : this.filesToBeDeleted) {
       try {
         FileUtil.delete(new File(path));
       } catch (IOException e) {
-        getLogWriter().error("Unable to delete file", e);
+        LogWriterUtils.getLogWriter().error("Unable to delete file", e);
       }
     }
     this.filesToBeDeleted.clear();
-    super.tearDown2();
   }
 }


[18/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionBase.java
index 1c3a33a..b515959 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OffHeapRegionBase.java
@@ -44,7 +44,7 @@ import com.gemstone.gemfire.internal.offheap.annotations.Retained;
 import com.gemstone.gemfire.pdx.PdxReader;
 import com.gemstone.gemfire.pdx.PdxSerializable;
 import com.gemstone.gemfire.pdx.PdxWriter;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Basic test of regions that use off heap storage.
@@ -146,7 +146,7 @@ public abstract class OffHeapRegionBase {
           return "Waiting for disconnect to complete";
         }
       };
-      com.gemstone.gemfire.test.dunit.DistributedTestCase.waitForCriterion(waitForDisconnect, 10*1000, 100, true);
+      com.gemstone.gemfire.test.dunit.Wait.waitForCriterion(waitForDisconnect, 10*1000, 100, true);
 
       assertTrue(gfc.isClosed());
     } finally {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OutOfOffHeapMemoryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OutOfOffHeapMemoryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OutOfOffHeapMemoryDUnitTest.java
index cd20ad1..25de4ea 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OutOfOffHeapMemoryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/offheap/OutOfOffHeapMemoryDUnitTest.java
@@ -40,7 +40,11 @@ import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
 import com.gemstone.gemfire.internal.logging.LogService;
 import com.gemstone.gemfire.internal.util.StopWatch;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 
 import static com.jayway.awaitility.Awaitility.with;
@@ -67,11 +71,11 @@ public class OutOfOffHeapMemoryDUnitTest extends CacheTestCase {
   public void setUp() throws Exception {
     disconnectAllFromDS();
     super.setUp();
-    addExpectedException(OutOfOffHeapMemoryException.class.getSimpleName());
+    IgnoredException.addIgnoredException(OutOfOffHeapMemoryException.class.getSimpleName());
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     final SerializableRunnable checkOrphans = new SerializableRunnable() {
       @Override
       public void run() {
@@ -80,13 +84,8 @@ public class OutOfOffHeapMemoryDUnitTest extends CacheTestCase {
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      invokeInEveryVM(getClass(), "cleanup");
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @SuppressWarnings("unused") // invoked by reflection from tearDown2()

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/process/LocalProcessLauncherDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/process/LocalProcessLauncherDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/process/LocalProcessLauncherDUnitTest.java
index 1a27437..cb05740 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/process/LocalProcessLauncherDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/process/LocalProcessLauncherDUnitTest.java
@@ -49,10 +49,6 @@ public class LocalProcessLauncherDUnitTest extends DistributedTestCase {
     new File(getClass().getSimpleName()).mkdir();
   }
   
-  @Override
-  public void tearDown2() throws Exception {
-  }
-  
   public void testExistingPidFileThrows() throws Exception {
     final File pidFile = new File(getClass().getSimpleName() 
         + File.separator + "testExistingPidFileThrows.pid");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/statistics/StatisticsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/statistics/StatisticsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/statistics/StatisticsDUnitTest.java
index 9f35f05..7126c46 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/statistics/StatisticsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/statistics/StatisticsDUnitTest.java
@@ -60,10 +60,14 @@ import com.gemstone.gemfire.internal.StatArchiveReader.StatSpec;
 import com.gemstone.gemfire.internal.StatSamplerStats;
 import com.gemstone.gemfire.internal.StatisticsTypeFactoryImpl;
 import com.gemstone.gemfire.internal.StatArchiveReader.StatValue;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Integration tests for Statistics. VM0 performs puts and VM1 receives
@@ -127,8 +131,8 @@ public class StatisticsDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(getClass(), "cleanup");
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(getClass(), "cleanup");
     disconnectAllFromDS(); // because this test enabled stat sampling!
   }
   
@@ -173,7 +177,7 @@ public class StatisticsDUnitTest extends CacheTestCase {
               return "sampler.getSampleCollector() is still null!";
             }
           };
-          waitForCriterion(waitForSampleCollector, 4*1000, 10, true);
+          Wait.waitForCriterion(waitForSampleCollector, 4*1000, 10, true);
   
           final SampleCollector sampleCollector = sampler.getSampleCollector();
           assertNotNull(sampleCollector);
@@ -230,7 +234,7 @@ public class StatisticsDUnitTest extends CacheTestCase {
             return "sampler.getSampleCollector() is still null!";
           }
         };
-        waitForCriterion(waitForSampleCollector, 2*1000, 10, true);
+        Wait.waitForCriterion(waitForSampleCollector, 2*1000, 10, true);
 
         final SampleCollector sampleCollector = sampler.getSampleCollector();
         assertNotNull(sampleCollector);
@@ -284,7 +288,7 @@ public class StatisticsDUnitTest extends CacheTestCase {
                 return rml.members + " should contain " + subMember;
               }
             };
-            waitForCriterion(wc, 4*1000, 10, true);
+            Wait.waitForCriterion(wc, 4*1000, 10, true);
             
             // publish lots of puts cycling through the NUM_KEYS
             assertEquals(0, statistics.getPuts());
@@ -331,14 +335,14 @@ public class StatisticsDUnitTest extends CacheTestCase {
                 return "Waiting for " + StatSamplerStats.SAMPLE_COUNT + " >= " + initialSampleCount + 2;
               }
             };
-            waitForCriterion(wc, 4*1000, 10, true);
+            Wait.waitForCriterion(wc, 4*1000, 10, true);
           }
         });
       }
       for (int pubThread = 0; pubThread < publishers.length; pubThread++) {
         publishers[pubThread].join();
         if (publishers[pubThread].exceptionOccurred()) {
-          fail("Test failed", publishers[pubThread].getException());
+          Assert.fail("Test failed", publishers[pubThread].getException());
         }
       }
     }
@@ -361,7 +365,7 @@ public class StatisticsDUnitTest extends CacheTestCase {
             return "Waiting for " + StatSamplerStats.SAMPLE_COUNT + " >= " + initialSampleCount + 2;
           }
         };
-        waitForCriterion(wc, 4*1000, 10, true);
+        Wait.waitForCriterion(wc, 4*1000, 10, true);
         
         // now post total updateEvents to static
         final PubSubStats statistics = subStatsRef.get();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/statistics/ValueMonitorJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/statistics/ValueMonitorJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/statistics/ValueMonitorJUnitTest.java
index cc76ce2..1d1a738 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/statistics/ValueMonitorJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/statistics/ValueMonitorJUnitTest.java
@@ -39,8 +39,8 @@ import com.gemstone.gemfire.StatisticsType;
 import com.gemstone.gemfire.internal.NanoTimer;
 import com.gemstone.gemfire.internal.StatisticsManager;
 import com.gemstone.gemfire.internal.statistics.StatisticsNotification.Type;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 import junit.framework.TestCase;
@@ -369,6 +369,6 @@ public class ValueMonitorJUnitTest {
         return "waiting for notification";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, ms, interval, throwOnTimeout);
+    Wait.waitForCriterion(wc, ms, interval, throwOnTimeout);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/CacheManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/CacheManagementDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/CacheManagementDUnitTest.java
index 2698e2a..320e20a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/CacheManagementDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/CacheManagementDUnitTest.java
@@ -46,9 +46,11 @@ import com.gemstone.gemfire.management.internal.ManagementConstants;
 import com.gemstone.gemfire.management.internal.SystemManagementService;
 import com.gemstone.gemfire.management.internal.NotificationHub.NotificationHubListener;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import static com.jayway.awaitility.Awaitility.*;
 import static org.hamcrest.Matchers.*;
@@ -99,12 +101,6 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-  
-  
-
   public void testGemFireConfigData() throws Exception {
      initManagement(false);
    
@@ -142,7 +138,7 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
       String log = (String) vm.invoke(CacheManagementDUnitTest.class,
           "fetchLog");
       assertNotNull(log);
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "<ExpectedString> Log Of Member is " + log.toString()
               + "</ExpectedString> ");
 
@@ -658,7 +654,7 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
         .getMemberMXBean();
     JVMMetrics metrics = bean.showJVMMetrics();
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "<ExpectedString> JVMMetrics is " + metrics.toString()
             + "</ExpectedString> ");
 
@@ -669,7 +665,7 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
         .getMemberMXBean();
     OSMetrics metrics = bean.showOSMetrics();
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "<ExpectedString> OSMetrics is " + metrics.toString()
             + "</ExpectedString> ");
 
@@ -683,7 +679,7 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
   }
 
   public static void assertExpectedMembers(int expectedMemberCount) {
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       public String description() {
         return "Waiting all nodes to shutDown";
       }
@@ -710,14 +706,14 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
       MemberMXBean bean = MBeanUtil.getMemberMbeanProxy(member);
       JVMMetrics metrics = bean.showJVMMetrics();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "<ExpectedString> JVMMetrics is " + metrics.toString()
               + "</ExpectedString> ");
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "<ExpectedString> OSMetrics is " + metrics.toString()
               + "</ExpectedString> ");
       
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "<ExpectedString> Boolean Data Check " +bean.isManager()
               + "</ExpectedString> ");
       
@@ -841,7 +837,7 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
     SystemManagementService service = (SystemManagementService) getManagementService();
     final Map<ObjectName, NotificationHubListener> hubMap = service.getNotificationHub().getListenerObjectMap();
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       public String description() {
         return "Waiting for manager to register the listener";
       }
@@ -919,7 +915,7 @@ public class CacheManagementDUnitTest extends ManagementTestBase {
 
       public void run() {
 
-        DistributedTestCase.waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           public String description() {
             return "Waiting for all the RegionCreated notification to reach the manager " + notifList.size();
           }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/ClientHealthStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/ClientHealthStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/ClientHealthStatsDUnitTest.java
index 7f7c4fd..9d42589 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/ClientHealthStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/ClientHealthStatsDUnitTest.java
@@ -48,10 +48,13 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Client health stats check
@@ -101,11 +104,11 @@ public class ClientHealthStatsDUnitTest extends DistributedTestCase {
     server = host.getVM(1);
     client = host.getVM(2);
     client2 = host.getVM(3);
-    addExpectedException("Connection reset");
+    IgnoredException.addIgnoredException("Connection reset");
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     reset();
     helper.closeCache(managingNode);
     helper.closeCache(client);
@@ -233,9 +236,9 @@ public class ClientHealthStatsDUnitTest extends DistributedTestCase {
     props.setProperty(DistributionConfig.DURABLE_CLIENT_ID_NAME, "durable-"+clientNum);
     props.setProperty(DistributionConfig.DURABLE_CLIENT_TIMEOUT_NAME, "300000");
 
-    props.setProperty("log-file", testName+"_client_" + clientNum + ".log");
+    props.setProperty("log-file", getTestMethodName()+"_client_" + clientNum + ".log");
     props.setProperty("log-level", "info");
-    props.setProperty("statistic-archive-file", testName+"_client_" + clientNum
+    props.setProperty("statistic-archive-file", getTestMethodName()+"_client_" + clientNum
         + ".gfs");
     props.setProperty("statistic-sampling-enabled", "true");
 
@@ -340,7 +343,7 @@ public class ClientHealthStatsDUnitTest extends DistributedTestCase {
         return "Did not receive last key.";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60*1000, 500, true);
+    Wait.waitForCriterion(wc, 60*1000, 500, true);
   }
 
 
@@ -358,8 +361,8 @@ public class ClientHealthStatsDUnitTest extends DistributedTestCase {
 
       String[] clientIds = bean.getClientIds();
       assertTrue(clientIds.length == 2);
-      getLogWriter().info("<ExpectedString> ClientId-1 of the Server is  " + clientIds[0] + "</ExpectedString> ");
-      getLogWriter().info("<ExpectedString> ClientId-2 of the Server is  " + clientIds[1] + "</ExpectedString> ");
+      LogWriterUtils.getLogWriter().info("<ExpectedString> ClientId-1 of the Server is  " + clientIds[0] + "</ExpectedString> ");
+      LogWriterUtils.getLogWriter().info("<ExpectedString> ClientId-2 of the Server is  " + clientIds[1] + "</ExpectedString> ");
       
       ClientHealthStatus[] clientStatuses = bean.showAllClientStats();
 
@@ -369,15 +372,15 @@ public class ClientHealthStatsDUnitTest extends DistributedTestCase {
       ClientHealthStatus clientStatus2 = bean.showClientStats(clientIds[1]);
       assertNotNull(clientStatus1);
       assertNotNull(clientStatus2);
-      getLogWriter().info("<ExpectedString> ClientStats-1 of the Server is  " + clientStatus1 + "</ExpectedString> ");
-      getLogWriter().info("<ExpectedString> ClientStats-2 of the Server is  " + clientStatus2 + "</ExpectedString> ");
+      LogWriterUtils.getLogWriter().info("<ExpectedString> ClientStats-1 of the Server is  " + clientStatus1 + "</ExpectedString> ");
+      LogWriterUtils.getLogWriter().info("<ExpectedString> ClientStats-2 of the Server is  " + clientStatus2 + "</ExpectedString> ");
 
-      getLogWriter().info("<ExpectedString> clientStatuses " + clientStatuses + "</ExpectedString> ");
+      LogWriterUtils.getLogWriter().info("<ExpectedString> clientStatuses " + clientStatuses + "</ExpectedString> ");
       assertNotNull(clientStatuses);
       
       assertTrue(clientStatuses.length == 2);
       for (ClientHealthStatus status : clientStatuses) {
-        getLogWriter().info("<ExpectedString> ClientStats of the Server is  " + status + "</ExpectedString> ");
+        LogWriterUtils.getLogWriter().info("<ExpectedString> ClientStats of the Server is  " + status + "</ExpectedString> ");
 
       }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/CompositeTypeTestDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/CompositeTypeTestDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/CompositeTypeTestDUnitTest.java
index e7c11a7..49db7d5 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/CompositeTypeTestDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/CompositeTypeTestDUnitTest.java
@@ -26,6 +26,8 @@ import com.gemstone.gemfire.management.internal.ManagementConstants;
 import com.gemstone.gemfire.management.internal.SystemManagementService;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class CompositeTypeTestDUnitTest extends ManagementTestBase {
 
@@ -46,11 +48,6 @@ public class CompositeTypeTestDUnitTest extends ManagementTestBase {
     
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    
-  }
-  
   public void testCompositeTypeGetters() throws Exception{
     
     initManagement(false);
@@ -117,7 +114,7 @@ public class CompositeTypeTestDUnitTest extends ManagementTestBase {
         try {
           final ObjectName objectName = new ObjectName("GemFire:service=custom,type=composite,member="+memberID);
           
-          waitForCriterion(new WaitCriterion() {
+          Wait.waitForCriterion(new WaitCriterion() {
             public String description() {
               return "Waiting for Composite Type MBean";
             }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/DLockManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/DLockManagementDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/DLockManagementDUnitTest.java
index 793526c..2e08d58 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/DLockManagementDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/DLockManagementDUnitTest.java
@@ -29,8 +29,11 @@ import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedM
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.management.internal.MBeanJMXAdapter;
 import com.gemstone.gemfire.management.internal.SystemManagementService;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class DLockManagementDUnitTest extends ManagementTestBase {
 
@@ -55,11 +58,6 @@ public class DLockManagementDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    
-  }
-
   /**
    * Distributed Lock Service test
    * 
@@ -145,7 +143,7 @@ public class DLockManagementDUnitTest extends ManagementTestBase {
           RegionMXBean bean = null;
           try {
 
-            waitForCriterion(new WaitCriterion() {
+            Wait.waitForCriterion(new WaitCriterion() {
 
               LockServiceMXBean bean = null;
 
@@ -199,7 +197,7 @@ public class DLockManagementDUnitTest extends ManagementTestBase {
 
         assertNotNull(grantor);
 
-        getLogWriter().info("In identifyLockGrantor - grantor is " + grantor);
+        LogWriterUtils.getLogWriter().info("In identifyLockGrantor - grantor is " + grantor);
 
        
 
@@ -305,7 +303,7 @@ public class DLockManagementDUnitTest extends ManagementTestBase {
 
         final String LOCK_OBJECT = "lockObject_" + vm.getPid();
 
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           DistributedLockService service = null;
 
           public String description() {
@@ -337,10 +335,10 @@ public class DLockManagementDUnitTest extends ManagementTestBase {
         assertNotNull(bean);
         String[] listHeldLock = bean.listHeldLocks();
         assertEquals(listHeldLock.length, 1);
-        getLogWriter().info("List Of Lock Object is  " + listHeldLock[0]);
+        LogWriterUtils.getLogWriter().info("List Of Lock Object is  " + listHeldLock[0]);
         Map<String, String> lockThreadMap = bean.listThreadsHoldingLock();
         assertEquals(lockThreadMap.size(), 1);
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "List Of Lock Thread is  " + lockThreadMap.toString());
       }
     };
@@ -373,10 +371,10 @@ public class DLockManagementDUnitTest extends ManagementTestBase {
           assertNotNull(bean);
           String[] listHeldLock = bean.listHeldLocks();
           assertEquals(listHeldLock.length, 1);
-          getLogWriter().info("List Of Lock Object is  " + listHeldLock[0]);
+          LogWriterUtils.getLogWriter().info("List Of Lock Object is  " + listHeldLock[0]);
           Map<String, String> lockThreadMap = bean.listThreadsHoldingLock();
           assertEquals(lockThreadMap.size(), 1);
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "List Of Lock Thread is  " + lockThreadMap.toString());
         }
 
@@ -432,7 +430,7 @@ public class DLockManagementDUnitTest extends ManagementTestBase {
         final ManagementService service = getManagementService();
         if (expectedMembers == 0) {
           try {
-            waitForCriterion(new WaitCriterion() {
+            Wait.waitForCriterion(new WaitCriterion() {
 
               DistributedLockServiceMXBean bean = null;
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/DiskManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/DiskManagementDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/DiskManagementDUnitTest.java
index b9f49ab..b850b54 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/DiskManagementDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/DiskManagementDUnitTest.java
@@ -45,6 +45,8 @@ import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Test cases to cover all test cases which pertains to disk from Management
@@ -90,8 +92,8 @@ public class DiskManagementDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();    
+  @Override
+  protected final void postTearDownManagementTestBase() throws Exception {
     com.gemstone.gemfire.internal.FileUtil.delete(diskDir);
   }
 
@@ -176,9 +178,9 @@ public class DiskManagementDUnitTest extends ManagementTestBase {
     VM vm1 = getManagedNodeList().get(1);
     VM vm2 = getManagedNodeList().get(2);
     
-    getLogWriter().info("Creating region in VM0");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Creating region in VM0");
     createPersistentRegion(vm0);
-    getLogWriter().info("Creating region in VM1");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Creating region in VM1");
     createPersistentRegion(vm1);
 
     putAnEntry(vm0);
@@ -197,12 +199,12 @@ public class DiskManagementDUnitTest extends ManagementTestBase {
       }
     });
 
-    getLogWriter().info("closing region in vm0");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("closing region in vm0");
     closeRegion(vm0);
 
     updateTheEntry(vm1);
 
-    getLogWriter().info("closing region in vm1");
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("closing region in vm1");
     closeRegion(vm1);
     AsyncInvocation future = createPersistentRegionAsync(vm0);
     waitForBlockedInitialization(vm0);
@@ -217,14 +219,14 @@ public class DiskManagementDUnitTest extends ManagementTestBase {
         DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
         PersistentMemberDetails[] missingDiskStores = bean
         .listMissingDiskStores();
-        getLogWriter().info("waiting members=" + missingDiskStores);
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("waiting members=" + missingDiskStores);
         assertNotNull(missingDiskStores);
         assertEquals(1, missingDiskStores.length);
 
         for (PersistentMemberDetails id : missingDiskStores) {
-          getLogWriter().info("Missing DiskStoreID is =" + id.getDiskStoreId());
-          getLogWriter().info("Missing Host is =" + id.getHost());
-          getLogWriter().info("Missing Directory is =" + id.getDirectory());
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Missing DiskStoreID is =" + id.getDiskStoreId());
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Missing Host is =" + id.getHost());
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Missing Directory is =" + id.getDirectory());
 
           try {
             bean.revokeMissingDiskStores(id.getDiskStoreId());
@@ -377,11 +379,11 @@ public class DiskManagementDUnitTest extends ManagementTestBase {
         Cache cache = getCache();
         Region region = cache.getRegion(REGION_NAME);
         DiskRegion dr = ((LocalRegion) region).getDiskRegion();
-        getLogWriter().info("putting key1");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("putting key1");
         region.put("key1", "value1");
-        getLogWriter().info("putting key2");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("putting key2");
         region.put("key2", "value2");
-        getLogWriter().info("removing key2");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("removing key2");
         region.remove("key2");
         // now that it is compactable the following forceCompaction should
         // go ahead and do a roll and compact it.
@@ -413,7 +415,7 @@ public class DiskManagementDUnitTest extends ManagementTestBase {
 
         assertTrue(compactedDiskStores.length > 0);
         for (int i = 0; i < compactedDiskStores.length; i++) {
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "<ExpectedString> Compacted Store " + i + " "
                   + compactedDiskStores[i] + "</ExpectedString> ");
         }
@@ -463,13 +465,13 @@ public class DiskManagementDUnitTest extends ManagementTestBase {
             String[] allDisks = bean.listDiskStores(true);
             assertNotNull(allDisks);
             List<String> listString = Arrays.asList(allDisks);
-            getLogWriter().info(
+            com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
                 "<ExpectedString> Remote All Disk Stores Are  "
                     + listString.toString() + "</ExpectedString> ");
             String[] compactedDiskStores = bean.compactAllDiskStores();
             assertTrue(compactedDiskStores.length > 0);
             for (int i = 0; i < compactedDiskStores.length; i++) {
-              getLogWriter().info(
+              com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
                   "<ExpectedString> Remote Compacted Store " + i + " "
                       + compactedDiskStores[i] + "</ExpectedString> ");
             }
@@ -578,7 +580,7 @@ public class DiskManagementDUnitTest extends ManagementTestBase {
     vm.invoke(new SerializableRunnable() {
 
       public void run() {
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
 
           public String description() {
             return "Waiting to blocked waiting for another persistent member to come online";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/DistributedSystemDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/DistributedSystemDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/DistributedSystemDUnitTest.java
index f234e34..10f628d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/DistributedSystemDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/DistributedSystemDUnitTest.java
@@ -49,10 +49,14 @@ import com.gemstone.gemfire.management.internal.NotificationHub.NotificationHubL
 import com.gemstone.gemfire.management.internal.SystemManagementService;
 import com.gemstone.gemfire.management.internal.beans.MemberMBean;
 import com.gemstone.gemfire.management.internal.beans.SequenceNumber;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Distributed System tests
@@ -112,11 +116,6 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    
-  }
-
   /**
    * Tests each and every operations that is defined on the MemberMXBean
    * 
@@ -205,7 +204,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
 
         public Object call() throws Exception {
           
-          waitForCriterion(new WaitCriterion() {
+          Wait.waitForCriterion(new WaitCriterion() {
             public String description() {
               return "Waiting for all alert Listener to register with managed node";
             }
@@ -284,7 +283,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
             ManagementService service = getManagementService();
             final DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
             
-            waitForCriterion(new WaitCriterion() {
+            Wait.waitForCriterion(new WaitCriterion() {
               public String description() {
                 return "Waiting for all members to send their initial Data";
               }
@@ -304,7 +303,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
                     null);
                 notificationListenerMap.put(objectName, listener);
               } catch (InstanceNotFoundException e) {
-                getLogWriter().error(e);
+                LogWriterUtils.getLogWriter().error(e);
               }
             }
           }
@@ -359,7 +358,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
 
       public void run() {
 
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           public String description() {
             return "Waiting for all Notifications to reach the Managing Node";
           }
@@ -383,9 +382,9 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
           try {
             mbeanServer.removeNotificationListener(objectName, listener);
           } catch (ListenerNotFoundException e) {
-            getLogWriter().error(e);
+            LogWriterUtils.getLogWriter().error(e);
           } catch (InstanceNotFoundException e) {
-            getLogWriter().error(e);
+            LogWriterUtils.getLogWriter().error(e);
           }
         }
 
@@ -430,7 +429,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
         ManagementService service = getManagementService();
         final DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
         
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           public String description() {
             return "Waiting for all members to send their initial Data";
           }
@@ -450,7 +449,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
           try {
             mbeanServer.removeNotificationListener(objectName, listener);
           } catch (InstanceNotFoundException e) {
-            getLogWriter().error(e);
+            LogWriterUtils.getLogWriter().error(e);
           } catch (ListenerNotFoundException e) {
             // TODO: apparently there is never a notification listener on any these mbeans at this point 
             // fix this test so it doesn't hit these unexpected exceptions -- getLogWriter().error(e);
@@ -480,7 +479,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
             } catch (ListenerNotFoundException e) {
               // Expected Exception Do nothing
             } catch (InstanceNotFoundException e) {
-              getLogWriter().error(e);
+              LogWriterUtils.getLogWriter().error(e);
             }
           }
         }
@@ -563,7 +562,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
 
         public Object call() throws Exception {
           final AlertNotifListener nt = AlertNotifListener.getInstance();
-          waitForCriterion(new WaitCriterion() {
+          Wait.waitForCriterion(new WaitCriterion() {
             public String description() {
               return "Waiting for all alerts to reach the Managing Node";
             }
@@ -614,7 +613,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
       vm1.invoke(new SerializableCallable("Warning level Alerts") {
 
         public Object call() throws Exception {
-          final ExpectedException warnEx = addExpectedException(WARNING_LEVEL_MESSAGE);
+          final IgnoredException warnEx = IgnoredException.addIgnoredException(WARNING_LEVEL_MESSAGE);
           logger.warn(WARNING_LEVEL_MESSAGE);
           warnEx.remove();
           return null;
@@ -648,7 +647,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
         public Object call() throws Exception {
           // add expected exception strings         
           
-          final ExpectedException severeEx = addExpectedException(SEVERE_LEVEL_MESSAGE);
+          final IgnoredException severeEx = IgnoredException.addIgnoredException(SEVERE_LEVEL_MESSAGE);
           logger.fatal(SEVERE_LEVEL_MESSAGE);
           severeEx.remove();
           return null;
@@ -706,7 +705,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
           final DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
           assertNotNull(service.getDistributedSystemMXBean());
           
-          waitForCriterion(new WaitCriterion() {
+          Wait.waitForCriterion(new WaitCriterion() {
             public String description() {
               return "Waiting All members to intitialize DistributedSystemMBean expect 5 but found " + bean.getMemberCount();
             }
@@ -729,10 +728,10 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
           Iterator<DistributedMember> memberIt = otherMemberSet.iterator();
           while (memberIt.hasNext()) {
             DistributedMember member = memberIt.next();
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "JVM Metrics For Member " + member.getId() + ":"
                     + bean.showJVMMetrics(member.getId()));
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "OS Metrics For Member " + member.getId() + ":"
                     + bean.showOSMetrics(member.getId()));
           }
@@ -778,7 +777,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
           DistributedSystemMXBean bean = service.getDistributedSystemMXBean();
           assertNotNull(service.getDistributedSystemMXBean());
           bean.shutDownAllMembers();
-          staticPause(2000);
+          Wait.pause(2000);
           assertEquals(
               cache.getDistributedSystem().getAllOtherMembers().size(), 1);
           return null;
@@ -805,7 +804,7 @@ public class DistributedSystemDUnitTest extends ManagementTestBase {
           waitForAllMembers(4);
           
           for(int i =0; i< bean.listMemberObjectNames().length ; i++){
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "ObjectNames Of the Mmeber" + bean.listMemberObjectNames()[i] );
           }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/LocatorManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/LocatorManagementDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/LocatorManagementDUnitTest.java
index 4c9aaee..0d2fdbc 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/LocatorManagementDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/LocatorManagementDUnitTest.java
@@ -31,9 +31,13 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.management.internal.ManagementConstants;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 
 /**
@@ -69,10 +73,9 @@ public class LocatorManagementDUnitTest extends ManagementTestBase {
     locator = managedNode1;
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDownManagementTestBase() throws Exception {
     stopLocator(locator);
-    super.tearDown2();
-
   }
 
   /**
@@ -163,21 +166,21 @@ public class LocatorManagementDUnitTest extends ManagementTestBase {
         props.setProperty(DistributionConfig.MCAST_PORT_NAME,"0");
         
         props.setProperty(DistributionConfig.LOCATORS_NAME, "");
-        props.setProperty(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        props.setProperty(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
 
         InetAddress bindAddr = null;
         try {
           bindAddr = InetAddress.getByName(getServerHostName(vm.getHost()));
         } catch (UnknownHostException uhe) {
-          fail("While resolving bind address ", uhe);
+          Assert.fail("While resolving bind address ", uhe);
         }
 
         try {
-          File logFile = new File(testName + "-locator" + port + ".log");
+          File logFile = new File(getTestMethodName() + "-locator" + port + ".log");
           Locator locator = Locator.startLocatorAndDS(port, logFile, bindAddr,
               props, isPeer, true, null);
         } catch (IOException ex) {
-          fail("While starting locator on port " + port, ex);
+          Assert.fail("While starting locator on port " + port, ex);
         }
 
         assertTrue(InternalLocator.hasLocator());
@@ -225,8 +228,8 @@ public class LocatorManagementDUnitTest extends ManagementTestBase {
         LocatorMXBean bean = service.getLocalLocatorMXBean();
         assertNotNull(bean);
         assertEquals(locPort, bean.getPort());
-        getLogWriter().info("Log of Locator" + bean.viewLog());
-        getLogWriter().info("BindAddress" + bean.getBindAddress());
+        LogWriterUtils.getLogWriter().info("Log of Locator" + bean.viewLog());
+        LogWriterUtils.getLogWriter().info("BindAddress" + bean.getBindAddress());
         assertEquals(isPeer, bean.isPeerLocator());
         return null;
       }
@@ -251,8 +254,8 @@ public class LocatorManagementDUnitTest extends ManagementTestBase {
         LocatorMXBean bean = MBeanUtil.getLocatorMbeanProxy(member);
         assertNotNull(bean);
 
-        getLogWriter().info("Log of Locator" + bean.viewLog());
-        getLogWriter().info("BindAddress" + bean.getBindAddress());
+        LogWriterUtils.getLogWriter().info("Log of Locator" + bean.viewLog());
+        LogWriterUtils.getLogWriter().info("BindAddress" + bean.getBindAddress());
 
         return null;
       }
@@ -278,7 +281,7 @@ public class LocatorManagementDUnitTest extends ManagementTestBase {
         final LocatorMXBean bean = service.getLocalLocatorMXBean();
         assertNotNull(bean);
 
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
 
           public String description() {
             return "Waiting for the managers List";
@@ -317,7 +320,7 @@ public class LocatorManagementDUnitTest extends ManagementTestBase {
         final LocatorMXBean bean = service.getLocalLocatorMXBean();
         assertNotNull(bean);
 
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
 
           public String description() {
             return "Waiting for the Willing managers List";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/MBeanUtil.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/MBeanUtil.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/MBeanUtil.java
index 171204c..6c3c8e7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/MBeanUtil.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/MBeanUtil.java
@@ -37,7 +37,9 @@ import com.gemstone.gemfire.management.internal.MBeanJMXAdapter;
 import com.gemstone.gemfire.management.internal.ManagementConstants;
 import com.gemstone.gemfire.management.internal.SystemManagementService;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Utility test class to get various proxies
@@ -66,7 +68,7 @@ public class MBeanUtil {
     final SystemManagementService service = (SystemManagementService) ManagementTestBase
         .getManagementService();
     final ObjectName memberMBeanName = service.getMemberMBeanName(member);
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       MemberMXBean bean = null;
 
       public String description() {
@@ -110,7 +112,7 @@ public class MBeanUtil {
     final ObjectName cacheServerMBeanName = service
         .getCacheServerMBeanName(port,member);
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       CacheServerMXBean bean = null;
 
       public String description() {
@@ -159,7 +161,7 @@ public class MBeanUtil {
     final ObjectName lockServiceMBeanName = service.getLockServiceMBeanName(
         member, lockServiceName);
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       LockServiceMXBean bean = null;
 
       public String description() {
@@ -208,7 +210,7 @@ public class MBeanUtil {
     final ObjectName regionMBeanName = service.getRegionMBeanName(
         member, regionPath);
     
-    DistributedTestCase.waitForCriterion(new WaitCriterion(){ 
+    Wait.waitForCriterion(new WaitCriterion(){ 
       
       RegionMXBean bean = null;
       public String description() {
@@ -258,7 +260,7 @@ public class MBeanUtil {
 
     final ObjectName senderMBeanName = service.getGatewaySenderMBeanName(member, gatwaySenderId);
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
 
       GatewaySenderMXBean bean = null;
 
@@ -310,7 +312,7 @@ public class MBeanUtil {
     final ObjectName queueMBeanName = service.getAsyncEventQueueMBeanName(
         member, queueId);
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
 
       AsyncEventQueueMXBean bean = null;
 
@@ -358,7 +360,7 @@ public class MBeanUtil {
 
     final ObjectName receiverMBeanName = service.getGatewayReceiverMBeanName(member);
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
 
       GatewayReceiverMXBean bean = null;
 
@@ -406,7 +408,7 @@ public class MBeanUtil {
     final ManagementService service = ManagementTestBase
         .getManagementService();
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
 
       DistributedRegionMXBean bean = null;
 
@@ -444,7 +446,7 @@ public class MBeanUtil {
 
     final ManagementService service = ManagementTestBase
         .getManagementService();
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
 
       DistributedLockServiceMXBean bean = null;
 
@@ -483,7 +485,7 @@ public class MBeanUtil {
 
     final ObjectName locatorMBeanName = service.getLocatorMBeanName(member);
 
-    DistributedTestCase.waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
 
       LocatorMXBean bean = null;
 
@@ -536,7 +538,7 @@ public class MBeanUtil {
       try {
         propertyName = attributeInfo.getName();
         propertyValue = mbeanServer.getAttribute(objName, propertyName);
-        DistributedTestCase.getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "<ExpectedString> " + propertyName + " = " + propertyValue
                 + "</ExpectedString> ");
       } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/ManagementTestBase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/ManagementTestBase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/ManagementTestBase.java
index ecfc698..663abee 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/ManagementTestBase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/ManagementTestBase.java
@@ -42,12 +42,16 @@ import com.gemstone.gemfire.management.internal.LocalManager;
 import com.gemstone.gemfire.management.internal.MBeanJMXAdapter;
 import com.gemstone.gemfire.management.internal.ManagementStrings;
 import com.gemstone.gemfire.management.internal.SystemManagementService;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class ManagementTestBase extends DistributedTestCase {
 
@@ -120,16 +124,25 @@ public class ManagementTestBase extends DistributedTestCase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
+    preTearDownManagementTestBase();
+    
     closeAllCache();
     managementService = null;
 
     mcastPort = 0;
     disconnectAllFromDS();
     props.clear();
+    
+    postTearDownManagementTestBase();
   }
 
+  protected void preTearDownManagementTestBase() throws Exception {
+  }
+
+  protected void postTearDownManagementTestBase() throws Exception {
+  }
 
   public void closeAllCache() throws Exception{
     closeCache(managingNode);
@@ -143,7 +156,7 @@ public class ManagementTestBase extends DistributedTestCase {
    * Enable system property gemfire.disableManagement false in each VM.
    */
   public void enableManagement() {
-    invokeInEveryVM(new SerializableRunnable("Enable Management") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("Enable Management") {
       public void run() {
         System.setProperty(InternalDistributedSystem.DISABLE_MANAGEMENT_PROPERTY, "false");
       }
@@ -155,7 +168,7 @@ public class ManagementTestBase extends DistributedTestCase {
    * Disable system property gemfire.disableManagement true in each VM.
    */
   public void disableManagement() {
-    invokeInEveryVM(new SerializableRunnable("Disable Management") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("Disable Management") {
       public void run() {
         System.setProperty(InternalDistributedSystem.DISABLE_MANAGEMENT_PROPERTY, "true");
       }
@@ -213,7 +226,7 @@ public class ManagementTestBase extends DistributedTestCase {
 
   public Cache createCache(Properties props) {
     System.setProperty("dunitLogPerTest", "true");
-    props.setProperty(DistributionConfig.LOG_FILE_NAME,testName+"-.log");
+    props.setProperty(DistributionConfig.LOG_FILE_NAME,getTestMethodName()+"-.log");
     ds = (new ManagementTestBase("temp")).getSystem(props);
     cache = CacheFactory.create(ds);
     managementService = ManagementService.getManagementService(cache);
@@ -238,7 +251,7 @@ public class ManagementTestBase extends DistributedTestCase {
     }
     props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
     props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
-    props.setProperty(DistributionConfig.LOG_FILE_NAME,testName+"-.log");
+    props.setProperty(DistributionConfig.LOG_FILE_NAME,getTestMethodName()+"-.log");
     ds = (new ManagementTestBase("temp")).getSystem(props);
     cache = CacheFactory.create(ds);
     managementService = ManagementService.getManagementService(cache);
@@ -290,7 +303,7 @@ public class ManagementTestBase extends DistributedTestCase {
   protected void waitForProxy(final ObjectName objectName,
       final Class interfaceClass) {
 
-    waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       public String description() {
         return "Waiting for the proxy of " + objectName.getCanonicalName()
             + " to get propagated to Manager";
@@ -458,7 +471,7 @@ public class ManagementTestBase extends DistributedTestCase {
 
 
         } catch (ManagementException e) {
-          fail("failed with ManagementException", e);
+          Assert.fail("failed with ManagementException", e);
         }
       }
     });
@@ -554,7 +567,7 @@ public class ManagementTestBase extends DistributedTestCase {
         RegionFactory rf = cache
             .createRegionFactory(RegionShortcut.LOCAL);
 
-        getLogWriter().info("Creating Local Region");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Creating Local Region");
         rf.create(localRegionName);
 
       }
@@ -577,7 +590,7 @@ public class ManagementTestBase extends DistributedTestCase {
         SystemManagementService service = (SystemManagementService) getManagementService();
         Region region = cache.getRegion(parentRegionPath);
 
-        getLogWriter().info("Creating Sub Region");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Creating Sub Region");
         region.createSubregion(subregionName, region.getAttributes());
 
       }
@@ -620,7 +633,7 @@ public class ManagementTestBase extends DistributedTestCase {
         SystemManagementService service = (SystemManagementService) getManagementService();
 
         RegionFactory rf = cache.createRegionFactory(RegionShortcut.REPLICATE);
-        getLogWriter().info("Creating Dist Region");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Creating Dist Region");
         rf.create(regionName);
 
       }
@@ -642,7 +655,7 @@ public class ManagementTestBase extends DistributedTestCase {
         SystemManagementService service = (SystemManagementService) getManagementService();
         RegionFactory rf = cache
             .createRegionFactory(RegionShortcut.PARTITION_REDUNDANT);
-        getLogWriter().info("Creating Par Region");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Creating Par Region");
         rf.create(partitionRegionName);
 
       }
@@ -661,7 +674,7 @@ public class ManagementTestBase extends DistributedTestCase {
       public void run() {
         GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
 
-        getLogWriter().info("Closing Dist Region");
+        com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("Closing Dist Region");
         Region region = cache.getRegion(regionPath);
         region.close();
 
@@ -677,7 +690,7 @@ public class ManagementTestBase extends DistributedTestCase {
     assertNotNull(service.getDistributedSystemMXBean());
 
 
-    waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       public String description() {
         return "Waiting All members to intimate DistributedSystemMBean";
       }
@@ -685,7 +698,7 @@ public class ManagementTestBase extends DistributedTestCase {
       public boolean done() {
         if (bean.listMemberObjectNames() != null) {
 
-          getLogWriter().info(
+          com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info(
               "Member Length " + bean.listMemberObjectNames().length);
 
         }
@@ -709,7 +722,7 @@ public class ManagementTestBase extends DistributedTestCase {
 
     final long currentTime = System.currentTimeMillis();
 
-    waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       int actualRefreshCount = 0;
       long lastRefreshTime = service.getLastUpdateTime(objectName);
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/MemberMBeanAttributesDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/MemberMBeanAttributesDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/MemberMBeanAttributesDUnitTest.java
index 50db569..c3c1c2f 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/MemberMBeanAttributesDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/MemberMBeanAttributesDUnitTest.java
@@ -26,6 +26,7 @@ import com.gemstone.gemfire.internal.NanoTimer;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.process.PidUnavailableException;
 import com.gemstone.gemfire.internal.process.ProcessUtils;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -62,10 +63,6 @@ public class MemberMBeanAttributesDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   protected void sample(VM vm1) {
     vm1.invoke(new SerializableRunnable("Create Cache") {
       public void run() {
@@ -112,7 +109,7 @@ public class MemberMBeanAttributesDUnitTest extends ManagementTestBase {
       public void run() {
         GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
         RegionFactory rf = cache.createRegionFactory(RegionShortcut.REPLICATE);
-        getLogWriter().info("Creating Dist Region");
+        LogWriterUtils.getLogWriter().info("Creating Dist Region");
         rf.create("testRegion1");
         rf.create("testRegion2");
         rf.create("testRegion3");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/OffHeapManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/OffHeapManagementDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/OffHeapManagementDUnitTest.java
index 4e2c278..acafada 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/OffHeapManagementDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/OffHeapManagementDUnitTest.java
@@ -44,6 +44,8 @@ import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Tests the off-heap additions to the RegionMXBean and MemberMXBean JMX interfaces.
@@ -151,7 +153,7 @@ public class OffHeapManagementDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     Host.getHost(0).getVM(0).invoke(new SerializableRunnable() {
       @Override
       public void run() {
@@ -927,7 +929,7 @@ public class OffHeapManagementDUnitTest extends CacheTestCase {
     vm.invoke(new SerializableRunnable() {
       @Override
       public void run() {
-        waitForCriterion(new WaitCriterion() {          
+        Wait.waitForCriterion(new WaitCriterion() {          
           @Override
           public boolean done() {
             return (notificationListener.getNotificationSize() > 0);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/QueryDataDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/QueryDataDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/QueryDataDUnitTest.java
index f55511f..e52594e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/QueryDataDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/QueryDataDUnitTest.java
@@ -53,9 +53,10 @@ import com.gemstone.gemfire.management.internal.cli.json.TypedJson;
 import com.gemstone.gemfire.pdx.PdxInstance;
 import com.gemstone.gemfire.pdx.PdxInstanceFactory;
 import com.gemstone.gemfire.pdx.internal.PdxInstanceFactoryImpl;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import org.json.JSONArray;
 import org.json.JSONException;
@@ -148,11 +149,6 @@ public class QueryDataDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-
-  }
-  
   private void initCommonRegions(){
     createRegionsInNodes();
     fillValuesInRegions();
@@ -171,7 +167,7 @@ public class QueryDataDUnitTest extends ManagementTestBase {
         Region region = cache.getRegion(regionName);
         for (int j = from; j < to; j++)
           region.put(new Integer(j), portfolio[j]);
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info(
                 "PRQueryDUnitHelper#getCacheSerializableRunnableForPRPuts: Inserted Portfolio data on Region "
                     + regionName);
@@ -406,7 +402,7 @@ public class QueryDataDUnitTest extends ManagementTestBase {
               if (jsonString1.contains("result")) {
                 JSONObject jsonObj = new JSONObject(jsonString1);
               } else {
-                getLogWriter().info("Failed Test String" + queriesForRR[i] + " is = " + jsonString1);
+                LogWriterUtils.getLogWriter().info("Failed Test String" + queriesForRR[i] + " is = " + jsonString1);
                 fail("Join on Replicated did not work.");
               }
             }
@@ -785,7 +781,7 @@ public class QueryDataDUnitTest extends ManagementTestBase {
             final DistributedRegionMXBean regionMBean = MBeanUtil.getDistributedRegionMbean("/"
                 + PartitionedRegionName6, 3);
 
-            DistributedTestCase.waitForCriterion(new WaitCriterion() {
+            Wait.waitForCriterion(new WaitCriterion() {
 
               public String description() {
                 return "Waiting for all entries to get reflected at managing node";
@@ -799,19 +795,19 @@ public class QueryDataDUnitTest extends ManagementTestBase {
 
             }, MAX_WAIT, 1000, true);
 
-            getLogWriter().info("member1RealData  is = " + member1RealData);
-            getLogWriter().info("member2RealData  is = " + member2RealData);
-            getLogWriter().info("member3RealData  is = " + member3RealData);
+            LogWriterUtils.getLogWriter().info("member1RealData  is = " + member1RealData);
+            LogWriterUtils.getLogWriter().info("member2RealData  is = " + member2RealData);
+            LogWriterUtils.getLogWriter().info("member3RealData  is = " + member3RealData);
             
             String member1Result = bean.queryData(query, member1.getId(), 0);
-            getLogWriter().info("member1Result " + query + " is = " + member1Result);
+            LogWriterUtils.getLogWriter().info("member1Result " + query + " is = " + member1Result);
 
 
             String member2Result = bean.queryData(query, member2.getId(), 0);
-            getLogWriter().info("member2Result " + query + " is = " + member2Result);
+            LogWriterUtils.getLogWriter().info("member2Result " + query + " is = " + member2Result);
             
             String member3Result = bean.queryData(query, member3.getId(), 0);
-            getLogWriter().info("member3Result " + query + " is = " + member3Result);
+            LogWriterUtils.getLogWriter().info("member3Result " + query + " is = " + member3Result);
             
             for (String val : member1RealData) {
               assertTrue(member1Result.contains(val));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/RegionManagementDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/RegionManagementDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/RegionManagementDUnitTest.java
index c148029..303188a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/RegionManagementDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/RegionManagementDUnitTest.java
@@ -49,8 +49,12 @@ import com.gemstone.gemfire.internal.cache.lru.LRUStatistics;
 import com.gemstone.gemfire.internal.cache.partitioned.fixed.SingleHopQuarterPartitionResolver;
 import com.gemstone.gemfire.management.internal.MBeanJMXAdapter;
 import com.gemstone.gemfire.management.internal.SystemManagementService;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This class checks and verifies various data and operations exposed through
@@ -107,11 +111,6 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
 
   }
 
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    
-  }
-
   /**
    * Tests all Region MBean related Management APIs
    * 
@@ -439,7 +438,7 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
             region.put(new Integer(total), array);
           }
           assertTrue(bean.getEntrySize() > 0);
-          getLogWriter().info("DEBUG: EntrySize =" + bean.getEntrySize());
+          LogWriterUtils.getLogWriter().info("DEBUG: EntrySize =" + bean.getEntrySize());
           
 
 
@@ -470,7 +469,7 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
           assertNotNull(bean);
 
           assertTrue(bean.getEntrySize() > 0);
-          getLogWriter().info("DEBUG: EntrySize =" + bean.getEntrySize());
+          LogWriterUtils.getLogWriter().info("DEBUG: EntrySize =" + bean.getEntrySize());
         }
       });
 
@@ -673,14 +672,14 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
     attr.setPartitionAttributes(paf.create());
     fixedPrRegion = cache.createRegion(FIXED_PR_NAME, attr.create());
     assertNotNull(fixedPrRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + FIXED_PR_NAME + " created Successfully :"
             + fixedPrRegion.toString());
 
     RegionMXBean bean = service.getLocalRegionMBean(FIXED_PR_PATH);
     RegionAttributes regAttrs = fixedPrRegion.getAttributes();
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "FixedPartitionAttribute From GemFire :"
             + regAttrs.getPartitionAttributes().getFixedPartitionAttributes());
 
@@ -697,7 +696,7 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
 
     assertEquals(3, fixedPrData.length);
     for (int i = 0; i < fixedPrData.length; i++) {
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "<ExpectedString> Fixed PR Data is " + fixedPrData[i]
               + "</ExpectedString> ");
     }
@@ -731,7 +730,7 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
           assertNotNull(fixedPrData);
           assertEquals(3, fixedPrData.length);
           for (int i = 0; i < fixedPrData.length; i++) {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "<ExpectedString> Remote PR Data is " + fixedPrData[i]
                     + "</ExpectedString> ");
           }
@@ -770,7 +769,7 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
             Set<ObjectName> names = service.queryMBeanNames(member);
             if(names != null){
               for(ObjectName name : names){
-                getLogWriter().info(
+                LogWriterUtils.getLogWriter().info(
                     "<ExpectedString> ObjectNames arr" + name
                         + "</ExpectedString> ");
               }
@@ -779,11 +778,11 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
             mbeanServer.addNotificationListener(memberMBeanName, regionCreate,
                 null, null);
           } catch (NullPointerException e) {
-            fail("FAILED WITH EXCEPION", e);
+            Assert.fail("FAILED WITH EXCEPION", e);
           } catch (InstanceNotFoundException e) {
-            fail("FAILED WITH EXCEPION", e);
+            Assert.fail("FAILED WITH EXCEPION", e);
           } catch (Exception e) {
-            fail("FAILED WITH EXCEPION", e);
+            Assert.fail("FAILED WITH EXCEPION", e);
           }
 
         }
@@ -817,9 +816,9 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
               null, null);
 
         } catch (NullPointerException e) {
-          fail("FAILED WITH EXCEPION", e);
+          Assert.fail("FAILED WITH EXCEPION", e);
         } catch (InstanceNotFoundException e) {
-          fail("FAILED WITH EXCEPION", e);
+          Assert.fail("FAILED WITH EXCEPION", e);
 
         }
 
@@ -844,7 +843,7 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
           RegionMXBean bean = null;
           try {
 
-            waitForCriterion(new WaitCriterion() {
+            Wait.waitForCriterion(new WaitCriterion() {
 
               RegionMXBean bean = null;
 
@@ -905,10 +904,10 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
           EvictionAttributesData evictionData = bean.listEvictionAttributes();
           assertNotNull(membershipData);
           assertNotNull(evictionData);
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "<ExpectedString> Membership Data is "
                   + membershipData.toString() + "</ExpectedString> ");
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "<ExpectedString> Eviction Data is " + membershipData.toString()
                   + "</ExpectedString> ");
  
@@ -942,7 +941,7 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
 
         if (expectedMembers == 0) {
           try {
-            waitForCriterion(new WaitCriterion() {
+            Wait.waitForCriterion(new WaitCriterion() {
 
               RegionMXBean bean = null;
 
@@ -983,15 +982,15 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
 
         // Check Stats related Data
         // Add Mock testing
-        getLogWriter()
+        LogWriterUtils.getLogWriter()
             .info(
                 "<ExpectedString> CacheListenerCallsAvgLatency is "
                     + bean.getCacheListenerCallsAvgLatency()
                     + "</ExpectedString> ");
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "<ExpectedString> CacheWriterCallsAvgLatency is "
                 + bean.getCacheWriterCallsAvgLatency() + "</ExpectedString> ");
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "<ExpectedString> CreatesRate is " + bean.getCreatesRate()
                 + "</ExpectedString> ");
 
@@ -1068,12 +1067,12 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
               .addNotificationListener(memberMBeanName, test, null, null);
         } catch (MalformedObjectNameException e) {
 
-          fail("FAILED WITH EXCEPION", e);
+          Assert.fail("FAILED WITH EXCEPION", e);
         } catch (NullPointerException e) {
-          fail("FAILED WITH EXCEPION", e);
+          Assert.fail("FAILED WITH EXCEPION", e);
 
         } catch (InstanceNotFoundException e) {
-          fail("FAILED WITH EXCEPION", e);
+          Assert.fail("FAILED WITH EXCEPION", e);
 
         }
 
@@ -1092,10 +1091,10 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
         EvictionAttributesData evictionData = bean.listEvictionAttributes();
         assertNotNull(membershipData);
         assertNotNull(evictionData);
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "<ExpectedString> Membership Data is " + membershipData.toString()
                 + "</ExpectedString> ");
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "<ExpectedString> Eviction Data is " + membershipData.toString()
                 + "</ExpectedString> ");
       }
@@ -1143,7 +1142,7 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
         try {
           bean = service.getLocalRegionMBean(REGION_PATH);
         } catch (ManagementException mgtEx) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "<ExpectedString> Expected Exception  "
                   + mgtEx.getLocalizedMessage() + "</ExpectedString> ");
         }
@@ -1168,12 +1167,12 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
       public void run() {
         GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
         ManagementService service = getManagementService();
-        getLogWriter().info("Closing Par Region");
+        LogWriterUtils.getLogWriter().info("Closing Par Region");
         RegionMXBean bean = null;
         try {
           bean = service.getLocalRegionMBean(PARTITIONED_REGION_PATH);
         } catch (ManagementException mgtEx) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "<ExpectedString> Expected Exception  "
                   + mgtEx.getLocalizedMessage() + "</ExpectedString> ");
         }
@@ -1194,14 +1193,14 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
       public void run() {
         GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
         ManagementService service = getManagementService();
-        getLogWriter().info("Closing Fixed Par Region");
+        LogWriterUtils.getLogWriter().info("Closing Fixed Par Region");
         Region region = cache.getRegion(FIXED_PR_PATH);
         region.close();
         RegionMXBean bean = null;
         try {
           bean = service.getLocalRegionMBean(FIXED_PR_PATH);
         } catch (ManagementException mgtEx) {
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "<ExpectedString> Expected Exception  "
                   + mgtEx.getLocalizedMessage() + "</ExpectedString> ");
         }
@@ -1422,7 +1421,7 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
       Notification rn =  notification;
       assertTrue(rn.getType().equals(JMXNotificationType.REGION_CREATED)
           || rn.getType().equals(JMXNotificationType.REGION_CLOSED));
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "<ExpectedString> Member Level Notifications" + rn.toString()
               + "</ExpectedString> ");
     }
@@ -1441,7 +1440,7 @@ public class RegionManagementDUnitTest extends ManagementTestBase {
     public void handleNotification(Notification notification, Object handback) {
       assertNotNull(notification);
       Notification rn = notification;
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "<ExpectedString> Distributed System Notifications" + rn.toString()
               + "</ExpectedString> ");
     }


[45/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
index c812e72..e6b17e7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRColocatedEquiJoinDUnitTest.java
@@ -54,7 +54,10 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * @author shobhit
@@ -99,40 +102,40 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy, Portfolio.class));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedCreate(coloName,
         redundancy, name));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -158,7 +161,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -167,40 +170,40 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
 
   public void testNonColocatedPRLocalQuerying() throws Exception
   {
-    addExpectedException("UnsupportedOperationException");
+    IgnoredException.addIgnoredException("UnsupportedOperationException");
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy, Portfolio.class));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
@@ -224,7 +227,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
           partitionedregion = cache.createRegion(coloName, attr.create());
         }
         catch (IllegalStateException ex) {
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
               .warning(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRCreateWithRedundancy: Creation caught IllegalStateException",
                   ex);
@@ -241,11 +244,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
       }
     });
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -271,7 +274,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -357,7 +360,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
         } catch (FunctionException e) {
           if (e.getCause() instanceof UnsupportedOperationException) {
-            getLogWriter().info("Query received FunctionException successfully while using QueryService.");
+            LogWriterUtils.getLogWriter().info("Query received FunctionException successfully while using QueryService.");
           } else {
             fail("UnsupportedOperationException must be thrown here");
           }
@@ -365,7 +368,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
       }
     });
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -382,13 +385,13 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
@@ -397,16 +400,16 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex1", "r1.ID", "/"+name+" r1", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex11", "r1.status", "/"+name+" r1", null));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
@@ -415,11 +418,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex2", "r2.id", "/"+coloName+" r2", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex22", "r2.status", "/"+coloName+" r2", null));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -445,7 +448,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -454,7 +457,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -471,13 +474,13 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
@@ -486,27 +489,27 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex1", "r1.ID", "/"+name+" r1", null));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex11", "r1.status", "/"+name+" r1", null));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedCreate(coloName,
         redundancy, name));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -532,7 +535,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -541,7 +544,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -558,39 +561,39 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy, Portfolio.class));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(coloName, NewPortfolio.class));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -616,7 +619,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -625,7 +628,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -642,13 +645,13 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
@@ -657,16 +660,16 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex1", "r1.ID", "/"+name+" r1", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex11", "r1.status", "/"+name+" r1", null));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
@@ -675,11 +678,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex2", "r2.id", "/"+coloName+" r2", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex22", "r2.status", "/"+coloName+" r2", null));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -705,7 +708,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -714,7 +717,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -731,13 +734,13 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
@@ -746,26 +749,26 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex1", "r1.ID", "/"+name+" r1", null));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex11", "r1.status", "/"+name+" r1", null));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(coloName, NewPortfolio.class));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -791,7 +794,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -800,7 +803,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -817,39 +820,39 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(coloName,
         redundancy, NewPortfolio.class));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(name, Portfolio.class));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -875,7 +878,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -884,7 +887,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -901,13 +904,13 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
@@ -915,16 +918,16 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
         redundancy, NewPortfolio.class));
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex1", "r2.id", "/"+coloName+" r2", null));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
@@ -932,11 +935,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex2", "r1.ID", "/"+name+" r1", null));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -962,7 +965,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -971,7 +974,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -988,29 +991,29 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(coloName,
         redundancy, NewPortfolio.class));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
@@ -1018,11 +1021,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex1", "r1.ID", "/"+name+" r1", null));
 
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -1048,7 +1051,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -1057,7 +1060,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedDataSetQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -1075,13 +1078,13 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
@@ -1089,16 +1092,16 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
         redundancy, Portfolio.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy, Portfolio.class));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
@@ -1106,11 +1109,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
         redundancy, name));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRColocatedCreate(coloName,
         redundancy, name));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -1136,7 +1139,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -1198,7 +1201,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (QueryException e) {
-          getLogWriter()
+          LogWriterUtils.getLogWriter()
               .error(
                   "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught QueryException while querying"
                       + e, e);
@@ -1206,11 +1209,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
               "PRQueryDUnitHelper#getCacheSerializableRunnableForPRQueryAndCompareResults: Caught unexpected query exception",
               e);
         } catch (UnsupportedOperationException uso) {
-          getLogWriter().info(uso.getMessage());
+          LogWriterUtils.getLogWriter().info(uso.getMessage());
           if (!uso.getMessage().equalsIgnoreCase(LocalizedStrings.DefaultQuery_A_QUERY_ON_A_PARTITIONED_REGION_0_MAY_NOT_REFERENCE_ANY_OTHER_REGION_1.toLocalizedString(new Object[] {name, "/"+coloName}))) {
             fail("Query did not throw UnsupportedOperationException while using QueryService instead of LocalQueryService");
           } else {
-            getLogWriter().info("Query received UnsupportedOperationException successfully while using QueryService.");
+            LogWriterUtils.getLogWriter().info("Query received UnsupportedOperationException successfully while using QueryService.");
           }
         } finally {
           for (int i = 0; i < expectedExceptions.length; i++) {
@@ -1222,7 +1225,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
       }
     });
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -1232,13 +1235,13 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
@@ -1247,16 +1250,16 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex1", "r1.ID", "/"+name+" r1", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex11", "r1.status", "/"+name+" r1", null));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
@@ -1265,11 +1268,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex2", "r2.id", "/"+coloName+" r2, r2.positions.values pos2", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex22", "r2.status", "/"+coloName+" r2", null));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -1295,7 +1298,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -1304,7 +1307,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAndRRQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -1315,13 +1318,13 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
@@ -1330,16 +1333,16 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex1", "r2.id", "/"+coloName+" r2", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex11", "r1.status", "/"+name+" r1", null));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
@@ -1348,11 +1351,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex2", "r1.ID", "/"+name+" r1, r1.positions.values pos1", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex22", "r2.status", "/"+coloName+" r2", null));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -1378,7 +1381,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -1387,7 +1390,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForRRAndPRQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -1397,13 +1400,13 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
@@ -1412,16 +1415,16 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex1", "r1.ID", "/"+name+" r1", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex11", "r1.status", "/"+name+" r1", null));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
@@ -1430,11 +1433,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex2", "pos2.id", "/"+coloName+" r2, r2.positions.values pos2", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex22", "r2.status", "/"+coloName+" r2", null));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -1460,7 +1463,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -1469,7 +1472,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAndRRQueryWithCompactAndRangeIndexAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -1479,13 +1482,13 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     Host host = Host.getHost(0);
     VM vm0 = host.getVM(0);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
@@ -1494,16 +1497,16 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex1", "r1.ID", "/"+name+" r1", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(name, "IdIndex11", "r1.status", "/"+name+" r1", null));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the PR");
 
@@ -1512,11 +1515,11 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex2", "r2.id", "/"+coloName+" r2", null));
     //vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRIndexCreate(coloName, "IdIndex22", "r2.status", "/"+coloName+" r2", null));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the Colocated DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -1545,19 +1548,19 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
 
     //Let async index updates be finished.
-    pause(5000);
+    Wait.pause(5000);
 
     // querying the VM for data and comparing the result with query result of
     // local region.
     // querying the VM for data
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAndRRQueryAndCompareResults(name, coloName, localName, coloLocalName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }
@@ -1576,13 +1579,13 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR Test with DACK Started");
 
     // Creting PR's on the participating VM's
     // Creating DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the DataStore node in the PR");
 
@@ -1590,22 +1593,22 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
         0, Portfolio.class));
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         0, Portfolio.class));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully created the DataStore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
     // Creating Colocated Region DataStore node on the VM0.
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Creating the Colocated DataStore node in the RR");
 
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForLocalRegionCreation(coloName, NewPortfolio.class));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Successfully Created PR's across all VM's");
 
@@ -1618,7 +1621,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(coloName, newPortfolio,
         cnt, cntDest));
     
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Inserted Portfolio data across PR's");
     
@@ -1704,7 +1707,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
 
         } catch (FunctionException e) {
           if (e.getCause() instanceof RegionNotFoundException) {
-            getLogWriter().info("Query received FunctionException successfully while using QueryService.");
+            LogWriterUtils.getLogWriter().info("Query received FunctionException successfully while using QueryService.");
           } else {
             fail("RegionNotFoundException must be thrown here");
           }
@@ -1712,7 +1715,7 @@ public class PRColocatedEquiJoinDUnitTest extends PartitionedRegionDUnitTestCase
       }
     });
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQBasicQueryDUnitTest#testPRBasicQuerying: Querying PR's Test ENDED");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRInvalidQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRInvalidQueryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRInvalidQueryDUnitTest.java
index 2028ff7..aa7f1fe 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRInvalidQueryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRInvalidQueryDUnitTest.java
@@ -25,6 +25,7 @@ package com.gemstone.gemfire.cache.query.partitioned;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
@@ -63,7 +64,7 @@ public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
 
   public void testPRDAckCreationAndQueryingWithInvalidQuery() throws Exception
   {
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Querying PR Test with Expected InvalidQueryException*****");
     Host host = Host.getHost(0);
@@ -76,17 +77,17 @@ public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // Creting PR's on the participating VM's
 
     // Creating Accessor node on the VM
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Creating the Accessor node in the PR");
     vm0.invoke(prq.getCacheSerializableRunnableForPRAccessorCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Successfully created the Accessor node in the PR");
 
     // Creating the Datastores Nodes in the VM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Creating the Datastore node in the PR");
     vm1.invoke(prq.getCacheSerializableRunnableForPRCreate(name,
@@ -95,11 +96,11 @@ public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
         redundancy));
     vm3.invoke(prq.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Successfully Created the Datastore node in the PR");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Successfully Created PR's across all VM's");
 
@@ -117,7 +118,7 @@ public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
         + (2 * step), i + (3 * step)));
     vm3.invoke(prq.getCacheSerializableRunnableForPRPuts(name, portfolio, i
         + (3 * step), cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: Successfully Inserted Portfolio data across PR's");
 
@@ -125,7 +126,7 @@ public class PRInvalidQueryDUnitTest extends PartitionedRegionDUnitTestCase
     // querying the VM for data
     vm0.invoke(prq.getCacheSerializableRunnableForPRInvalidQuery(name,
         invalidQuery));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRInvalidQueryDUnitTest#testPRDAckCreationAndQueryingWithInvalidQuery: *****Querying PR's Test with Expected Invalid Query Exception *****");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheCloseDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheCloseDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheCloseDUnitTest.java
index dd12b21..6c6c36f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheCloseDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheCloseDUnitTest.java
@@ -34,10 +34,13 @@ import java.util.Random;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.internal.cache.ForceReattemptException;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
 {
@@ -88,7 +91,7 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
   public void testPRWithCacheCloseInOneDatastoreWithDelay() throws Exception
   {
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Querying PR Test with cache Close PR operation*****");
     Host host = Host.getHost(0);
@@ -103,33 +106,33 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
     // Creting PR's on the participating VM's
 
     // Creting Accessor PR's on the participating VM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Creating Accessor node on VM0");
     accessor.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Successfully Created Accessor node on VM0");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Creating PR's across all VM1 , VM2");
     datastore1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
     datastore2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Successfully Created PR on VM1 , VM2");
 
     // creating a local region on one of the JVM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Creating Local Region on VM0");
     accessor.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Successfully Created Local Region on VM0");
 
@@ -139,45 +142,45 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
     final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
 
     // Putting the data into the accessor node
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Inserting Portfolio data through the accessor node");
     accessor.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Successfully Inserted Portfolio data through the accessor node");
 
     // Putting the same data in the local region created
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Inserting Portfolio data on local node  VM0 for result Set Comparison");
     accessor.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Successfully Inserted Portfolio data on local node  VM0 for result Set Comparison");
 
     Random random = new Random();
     AsyncInvocation async0;
     // querying the VM for data
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Querying on VM0 both on PR Region & local ,also  Comparing the Results sets from both");
     async0 = accessor
         .invokeAsync(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
             name, localName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Calling for cache close on either of the Datastores VM1 , VM2 at random and then recreating the cache, with a predefined Delay ");
     for (int j = 0; j < queryTestCycle; j++) {
       int k = (random.nextInt(vmList.size()));
       ((VM)(vmList.get(k))).invoke(PRQHelp.getCacheSerializableRunnableForCacheClose(
           name, redundancy));
-      pause(threadSleepTime);
+      Wait.pause(threadSleepTime);
     }
-    DistributedTestCase.join(async0, 5 * 60 * 1000, getLogWriter());
+    ThreadUtils.join(async0, 5 * 60 * 1000);
 
     if (async0.exceptionOccurred()) {
       // for now, certain exceptions when a region is closed are acceptable
@@ -193,11 +196,11 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
       } while (t != null);
       
       if (!isForceReattempt) {
-        fail("Unexpected exception during query", async0.getException());
+        Assert.fail("Unexpected exception during query", async0.getException());
       }
     }
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithDelay: Querying with PR Operations ENDED*****");
   }
@@ -216,7 +219,7 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
    */
   public void testPRWithCacheCloseInOneDatastoreWithoutDelay() throws Exception
   {
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Querying PR Test with cache Close PR operation without delay*****");
     Host host = Host.getHost(0);
@@ -229,39 +232,39 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
     vmList.add(vm2);
 
     // Creting PR's on the participating VM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Creating Accessor node on VM0");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRAccessorCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Successfully Created Accessor node on VM0");
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Creating PR's across all VM1 , VM2");
     vm1.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
     vm2.invoke(PRQHelp.getCacheSerializableRunnableForPRCreate(name,
         redundancy));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Successfully Created PR on VM1 , VM2");
 
     // creating a local region on one of the JVM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Creating Local Region on VM0");
     vm0.invoke(PRQHelp
         .getCacheSerializableRunnableForLocalRegionCreation(localName));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Successfully Created Local Region on VM0");
 
-    getLogWriter().info("Successfully Created PR's across all VM's");
+    LogWriterUtils.getLogWriter().info("Successfully Created PR's across all VM's");
     // creating a local region on one of the JVM's
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Successfully Created Local Region on VM0");
 
@@ -270,22 +273,22 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
 
     final PortfolioData[] portfolio = PRQHelp.createPortfolioData(cnt, cntDest);
     // Putting the data into the accessor node
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Inserting Portfolio data through the accessor node");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(name, portfolio,
         cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Successfully Inserted Portfolio data through the accessor node");
 
     // Putting the same data in the local region created
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Inserting Portfolio data on local node  VM0 for result Set Comparison");
     vm0.invoke(PRQHelp.getCacheSerializableRunnableForPRPuts(localName,
         portfolio, cnt, cntDest));
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Successfully Inserted Portfolio data on local node  VM0 for result Set Comparison");
 
@@ -293,14 +296,14 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
 
     AsyncInvocation async0;
     // querying the VM for data
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Querying on VM0 both on PR Region & local ,also  Comparing the Results sets from both");
     async0 = vm0
         .invokeAsync(PRQHelp.getCacheSerializableRunnableForPRQueryAndCompareResults(
             name, localName));
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Calling for cache close on either of the Datastores VM1 , VM2 at random and then recreating the cache, with no delay ");
     for (int j = 0; j < queryTestCycle; j++) {
@@ -309,7 +312,7 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
           name, redundancy));
     }
 
-    DistributedTestCase.join(async0, 5 * 60 * 1000, getLogWriter());
+    ThreadUtils.join(async0, 5 * 60 * 1000);
 
     if (async0.exceptionOccurred()) {
       // for now, certain exceptions when a region is closed are acceptable
@@ -325,11 +328,11 @@ public class PRQueryCacheCloseDUnitTest extends PartitionedRegionDUnitTestCase
       } while (t != null);
       
       if (!isForceReattempt) {
-        fail("Unexpected exception during query", async0.getException());
+        Assert.fail("Unexpected exception during query", async0.getException());
       }
     }
 
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info(
             "PRQueryCacheCloseDUnitTest#testPRWithCacheCloseInOneDatastoreWithoutDelay: Querying with PR Operations  without delay ENDED*****");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheClosedJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheClosedJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheClosedJUnitTest.java
index 620caa3..da64daf 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheClosedJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/partitioned/PRQueryCacheClosedJUnitTest.java
@@ -34,7 +34,7 @@ import com.gemstone.gemfire.cache.query.RegionNotFoundException;
 import com.gemstone.gemfire.cache.query.SelectResults;
 import com.gemstone.gemfire.cache.query.data.PortfolioData;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionTestHelper;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -222,8 +222,8 @@ public class PRQueryCacheClosedJUnitTest
       logger
           .info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: Waiting for the Threads to join ");
 
-      DistributedTestCase.join(t1, 30 * 1000, null);
-      DistributedTestCase.join(t2, 30 * 1000, null);
+      ThreadUtils.join(t1, 30 * 1000);
+      ThreadUtils.join(t2, 30 * 1000);
       logger
           .info("PRQueryCacheClosedJUnitTest#testQueryOnSingleDataStoreWithCacheClose: checking for any Unexpected Exception's occured");
 


[55/62] [abbrv] incubator-geode git commit: GEODE-946: Source and javadoc artifacts from modules have inconsistent locations

Posted by je...@apache.org.
GEODE-946: Source and javadoc artifacts from modules have inconsistent locations


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/9fff1ebd
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/9fff1ebd
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/9fff1ebd

Branch: refs/heads/feature/GEODE-17
Commit: 9fff1ebdee253017a935f6c46285dc02a9697cb9
Parents: 4664325
Author: Jens Deppe <jd...@pivotal.io>
Authored: Tue Feb 9 08:40:22 2016 -0800
Committer: Jens Deppe <jd...@pivotal.io>
Committed: Tue Feb 9 16:16:16 2016 -0800

----------------------------------------------------------------------
 gradle/java.gradle | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/9fff1ebd/gradle/java.gradle
----------------------------------------------------------------------
diff --git a/gradle/java.gradle b/gradle/java.gradle
index 758420d..06fb766 100644
--- a/gradle/java.gradle
+++ b/gradle/java.gradle
@@ -88,6 +88,14 @@ subprojects {
     }
   }
  
+  // This ensures that javadoc and source jars also have any prefix paths stripped and will
+  // be created as libs/foo-sources.jar instead of libs/extensions/foo-sources.jar for example.
+  tasks.all { task ->
+    if (task instanceof Jar) {
+      baseName = sanitizedName()
+    }
+  }
+
   task jarTest (type: Jar, dependsOn: testClasses) {
     description 'Assembles a jar archive of test classes.'
     from sourceSets.test.output
@@ -125,11 +133,6 @@ subprojects {
     compile 'com.google.code.findbugs:jsr305:' + project.'jsr305.version'
     compile 'javax.enterprise:cdi-api:' + project.'cdi-api.version'
   }
-
-  jar {
-    baseName = sanitizedName()
-  }
-
 }
 
 


[32/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionQueryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionQueryDUnitTest.java
index b4af7d1..42b34dd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionQueryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionQueryDUnitTest.java
@@ -58,7 +58,10 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.partitioned.QueryMessage;
 import com.gemstone.gemfire.pdx.JSONFormatter;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -295,7 +298,7 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
                     9 })), results.asSet());
   
           } catch (Exception e) {
-            fail("Bad query", e);
+            Assert.fail("Bad query", e);
           }
         }
       });
@@ -367,7 +370,7 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
                     9 })), results.asSet());
   
           } catch (Exception e) {
-            fail("Bad query", e);
+            Assert.fail("Bad query", e);
           }
         }
       });
@@ -438,7 +441,7 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
                     9 })), results.asSet());
   
           } catch (Exception e) {
-            fail("Bad query", e);
+            Assert.fail("Bad query", e);
           }
         }
       });
@@ -510,7 +513,7 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
                     9 })), results.asSet());
   
           } catch (Exception e) {
-            fail("Bad query", e);
+            Assert.fail("Bad query", e);
           }
         }
       });
@@ -582,7 +585,7 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
                     9 })), results.asSet());
   
           } catch (Exception e) {
-            fail("Bad query", e);
+            Assert.fail("Bad query", e);
           }
         }
       });
@@ -655,7 +658,7 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
                     9 })), results.asSet());
   
           } catch (Exception e) {
-            fail("Bad query", e);
+            Assert.fail("Bad query", e);
           }
         }
       });
@@ -727,7 +730,7 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
                     9 })), results.asSet());
   
           } catch (Exception e) {
-            fail("Bad query", e);
+            Assert.fail("Bad query", e);
           }
         }
       });
@@ -798,7 +801,7 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
                     9 })), results.asSet());
   
           } catch (Exception e) {
-            fail("Bad query", e);
+            Assert.fail("Bad query", e);
           }
         }
       });
@@ -941,8 +944,8 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
       @Override
       public Object call() throws Exception {
         ClientCacheFactory cf = new ClientCacheFactory();
-        cf.addPoolServer(getServerHostName(server1.getHost()), port1);
-        cf.addPoolServer(getServerHostName(server2.getHost()), port2);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server1.getHost()), port1);
+        cf.addPoolServer(NetworkUtils.getServerHostName(server2.getHost()), port2);
         ClientCache cache = getClientCache(cf);
 
         Region region = cache.createClientRegionFactory(
@@ -953,7 +956,7 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
         try {
           qs = getCache().getQueryService();
         } catch (Exception e) {
-          fail("Failed to get QueryService.", e);
+          Assert.fail("Failed to get QueryService.", e);
         }
 
         for (int i = 0; i < queries.length; i++) {
@@ -962,7 +965,7 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
             assertTrue("Size of resultset should be greater than 0 for query: "
              + queries[i], sr.size() > 0);
           } catch (Exception e) {
-            fail("Failed executing query ", e);
+            Assert.fail("Failed executing query ", e);
           }
         }
         return null;
@@ -980,12 +983,12 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
     SerializableRunnable closeCache = new CacheSerializableRunnable(
         "Close Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close Client. ###");
+        LogWriterUtils.getLogWriter().info("### Close Client. ###");
         try {
           closeCache();
           disconnectFromDS();
         } catch (Exception ex) {
-          getLogWriter().info("### Failed to get close client. ###");
+          LogWriterUtils.getLogWriter().info("### Failed to get close client. ###");
         }
       }
     };
@@ -1004,7 +1007,7 @@ public class PartitionedRegionQueryDUnitTest extends CacheTestCase {
           SelectResults results = (SelectResults) query.execute();
           assertEquals(new HashSet(Arrays.asList(new Integer[] { 1, 2, 3 ,4, 5, 6, 7, 8, 9 })), results.asSet());
         } catch (Exception e) {
-          fail("Bad query", e);
+          Assert.fail("Bad query", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionRedundancyZoneDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionRedundancyZoneDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionRedundancyZoneDUnitTest.java
index 39fcc76..c112539 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionRedundancyZoneDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionRedundancyZoneDUnitTest.java
@@ -36,8 +36,7 @@ import com.gemstone.gemfire.test.dunit.VM;
 public class PartitionedRegionRedundancyZoneDUnitTest extends CacheTestCase {
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     // this makes sure we don't leave anything for the next tests
     // Tests that set redundancy zones causes other jvms connected
     // to the ds to have "enforce-unique-hosts" set to true.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSerializableObjectJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSerializableObjectJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSerializableObjectJUnitTest.java
index 6244cea..53c219f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSerializableObjectJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSerializableObjectJUnitTest.java
@@ -35,7 +35,7 @@ import static org.junit.Assert.*;
 
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.distributed.DistributedSystem;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 import junit.framework.TestCase;
@@ -82,7 +82,7 @@ public class PartitionedRegionSerializableObjectJUnitTest
 
     for (int i = 0; i < MAX_THREADS; i++) {
       threadArr[i].start();
-      DistributedTestCase.join(threadArr[i], 30 * 1000, null);
+      ThreadUtils.join(threadArr[i], 30 * 1000);
     }
 
     for (int i = 0; i < MAX_THREADS; i++) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
index e5e238e..55fed01 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopDUnitTest.java
@@ -65,10 +65,16 @@ import com.gemstone.gemfire.internal.cache.execute.data.CustId;
 import com.gemstone.gemfire.internal.cache.execute.data.OrderId;
 import com.gemstone.gemfire.internal.cache.execute.data.ShipmentId;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 
@@ -104,7 +110,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 
   public void setUp() throws Exception {
     super.setUp();
-    addExpectedException("Connection refused");
+    IgnoredException.addIgnoredException("Connection refused");
     Host host = Host.getHost(0);
     member0 = host.getVM(0);
     member1 = host.getVM(1);
@@ -112,18 +118,11 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     member3 = host.getVM(3);
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     try {
-      /* fixes GEODE-444, really close client cache first by using super.tearDown2();
-      // close the clients first
-      member0.invoke(PartitionedRegionSingleHopDUnitTest.class, "closeCache");
-      member1.invoke(PartitionedRegionSingleHopDUnitTest.class, "closeCache");
-      member2.invoke(PartitionedRegionSingleHopDUnitTest.class, "closeCache");
-      member3.invoke(PartitionedRegionSingleHopDUnitTest.class, "closeCache");
       closeCache();
-      */
-      super.tearDown2();
-
+      
       member0 = null;
       member1 = null;
       member2 = null;
@@ -131,7 +130,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 
     }
     finally {
-      unregisterAllDataSerializersFromAllVms();
+      DistributedTestUtils.unregisterAllDataSerializersFromAllVms();
     }
   }
 
@@ -186,7 +185,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
 
     PartitionAttributesFactory paf = new PartitionAttributesFactory();
@@ -197,7 +196,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     region = cache.createRegion(PR_NAME, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -211,7 +210,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     customerRegion = cache.createRegion("CUSTOMER", attr.create());
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -225,7 +224,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     orderRegion = cache.createRegion("ORDER", attr.create());
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -239,7 +238,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());
     return port;
@@ -438,7 +437,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected metadataservice to be called atleast once, but it was not called";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
 
     cms.satisfyRefreshMetadata_TEST_ONLY(false);
     region.put(new Integer(0), "create0");
@@ -455,7 +454,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
 
   }
 
@@ -497,7 +496,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected metadataservice to be called atleast once, but it was not called";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
   }
 
   public void test_MetadataServiceCallAccuracy_FromGetOp() {
@@ -539,15 +538,15 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected metadataservice to be called atleast once, but it was not called";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
     printMetadata();
-    pause(5000);
+    Wait.pause(5000);
     cms.satisfyRefreshMetadata_TEST_ONLY(false);
     region.get(new Integer(0));
     region.get(new Integer(1));
     region.get(new Integer(2));
     region.get(new Integer(3));
-    pause(5000);
+    Wait.pause(5000);
     assertFalse(cms.isRefreshMetadataTestOnly());
 
   }
@@ -591,7 +590,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected metadataservice to be called atleast once, but it was not called";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
 
     // kill server
     member0.invoke(PartitionedRegionSingleHopDUnitTest.class, "stopServer");
@@ -604,7 +603,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 
   public void test_SingleHopWithHAWithLocator() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -669,7 +668,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     region.get(new Integer(1));
     region.get(new Integer(2));
     region.get(new Integer(3));
-    pause(5000);
+    Wait.pause(5000);
     assertFalse(cms.isRefreshMetadataTestOnly());
     printMetadata();
 
@@ -678,7 +677,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     region.get(new Integer(1));
     region.get(new Integer(2));
     region.get(new Integer(3));
-    pause(5000);
+    Wait.pause(5000);
     assertFalse(cms.isRefreshMetadataTestOnly());
   }
 
@@ -722,7 +721,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     region.put(new Integer(3), "create3");
     final boolean metadataRefreshed_get4 = cms
         .isRefreshMetadataTestOnly();
-    pause(5000);
+    Wait.pause(5000);
     assertFalse(metadataRefreshed_get1 || metadataRefreshed_get2
             || metadataRefreshed_get3 || metadataRefreshed_get4);
 
@@ -732,7 +731,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     region.put(new Integer(1), "create1");
     region.put(new Integer(2), "create2");
     region.put(new Integer(3), "create3");
-    pause(5000);
+    Wait.pause(5000);
     assertFalse(cms.isRefreshMetadataTestOnly());
 
   }
@@ -760,7 +759,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     region.destroy(new Integer(1));
     region.destroy(new Integer(2));
     region.destroy(new Integer(3));
-    pause(5000);
+    Wait.pause(5000);
     assertFalse(cms.isRefreshMetadataTestOnly());
   }
 
@@ -780,7 +779,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     createClient(port0, port1, port2, port3);
     putIntoPartitionedRegions();    
     getFromPartitionedRegions();
-    pause(5000);
+    Wait.pause(5000);
     ClientMetadataService cms = ((GemFireCacheImpl)cache).getClientMetadataService();
     Map<String, ClientPartitionAdvisor> regionMetaData = cms.getClientPRMetadata_TEST_ONLY();    
     assertEquals(4, regionMetaData.size());
@@ -797,7 +796,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     }
     member0.invoke(PartitionedRegionSingleHopDUnitTest.class, "stopServer");
     member1.invoke(PartitionedRegionSingleHopDUnitTest.class, "stopServer");
-    pause(5000);//make sure that ping detects the dead servers
+    Wait.pause(5000);//make sure that ping detects the dead servers
     getFromPartitionedRegions();
     verifyDeadServer(regionMetaData, customerRegion, port0, port1);
     verifyDeadServer(regionMetaData, region, port0, port1);
@@ -805,7 +804,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
   
   public void testMetadataFetchOnlyThroughFunctions() {
     //Workaround for 52004
-    addExpectedException("InternalFunctionInvocationTargetException");
+    IgnoredException.addIgnoredException("InternalFunctionInvocationTargetException");
     Integer port0 = (Integer)member0.invoke(
         PartitionedRegionSingleHopDUnitTest.class, "createServer",
         new Object[] { 3, 4 });
@@ -832,7 +831,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
     
     assertEquals(1, regionMetaData.size());
     assertTrue(regionMetaData.containsKey(region.getFullPath()));
@@ -866,7 +865,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
 //    assertEquals(4/*numBuckets*/, prMetaData.getBucketServerLocationsMap_TEST_ONLY().size());    
   }
 
@@ -896,7 +895,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
     
     assertTrue(regionMetaData.containsKey(region.getFullPath()));
     
@@ -910,7 +909,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);  
+    Wait.waitForCriterion(wc, 60000, 1000, true);  
   }
   
   
@@ -955,7 +954,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true); 
+    Wait.waitForCriterion(wc, 60000, 1000, true); 
     for (Entry entry : clientMap.entrySet()) {
       assertEquals(4, ((List)entry.getValue()).size());
     }
@@ -990,7 +989,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         for (Entry entry : clientMap.entrySet()) {
           List list = (List)entry.getValue();
           if(list.size()<4){
-            getLogWriter().info("still waiting for 4 bucket owners in " + entry.getKey() + ": " + list);
+            LogWriterUtils.getLogWriter().info("still waiting for 4 bucket owners in " + entry.getKey() + ": " + list);
             finished = false;
             break;
           }
@@ -1001,7 +1000,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "bucket copies are not created";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 400, true);
+    Wait.waitForCriterion(wc, 60000, 400, true);
     cms = ((GemFireCacheImpl)cache).getClientMetadataService();
     cms.getClientPRMetadata((LocalRegion)region);
     
@@ -1020,7 +1019,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true); 
+    Wait.waitForCriterion(wc, 60000, 1000, true); 
     for (Entry entry : clientMap.entrySet()) {
       assertEquals(4, ((List)entry.getValue()).size());
     }
@@ -1077,12 +1076,12 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true); 
+    Wait.waitForCriterion(wc, 60000, 1000, true); 
     for (Entry entry : clientMap.entrySet()) {
       assertEquals(2, ((List)entry.getValue()).size());
     }
     final Map<Integer, List<BucketServerLocation66>> fclientMap = clientMap;
-    waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
 
       public boolean done() {
         try {
@@ -1091,7 +1090,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
           member2.invoke(PartitionedRegionSingleHopDUnitTest.class, "verifyMetadata", new Object[]{fclientMap});
           member3.invoke(PartitionedRegionSingleHopDUnitTest.class, "verifyMetadata", new Object[]{fclientMap});
         } catch (Exception e) {
-          getLogWriter().info("verification failed", e);
+          LogWriterUtils.getLogWriter().info("verification failed", e);
           return false;
         }
         return true;
@@ -1129,7 +1128,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected metadata is ready";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
     
     assertEquals(1, regionMetaData.size());
     assertTrue(regionMetaData.containsKey(region.getFullPath()));
@@ -1163,7 +1162,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true); 
+    Wait.waitForCriterion(wc, 60000, 1000, true); 
     for (Entry entry : clientMap.entrySet()) {
       assertEquals(2, ((List)entry.getValue()).size());
     }
@@ -1213,7 +1212,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
             + bucketId + " size : " + size + " the list is " + globalList;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 400, true);
+    Wait.waitForCriterion(wc, 60000, 400, true);
   }
 
   //TODO This is failing in WAN_Dev_Dec11 branch after downmerge from trunk revision 34709
@@ -1247,7 +1246,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     member1.invoke(PartitionedRegionSingleHopDUnitTest.class, "closeCache");
     member2.invoke(PartitionedRegionSingleHopDUnitTest.class, "closeCache");
     member3.invoke(PartitionedRegionSingleHopDUnitTest.class, "closeCache");
-    pause(1000); //let client detect that servers are dead through ping
+    Wait.pause(1000); //let client detect that servers are dead through ping
     AsyncInvocation m3 = member3.invokeAsync(PartitionedRegionSingleHopDUnitTest.class, "createPersistentPrsAndServerOnPort",new Object[] { 3, 4,port3 });
     AsyncInvocation m2 = member2.invokeAsync(PartitionedRegionSingleHopDUnitTest.class, "createPersistentPrsAndServerOnPort",new Object[] { 3, 4,port2 });
     AsyncInvocation m1 = member1.invokeAsync(PartitionedRegionSingleHopDUnitTest.class, "createPersistentPrsAndServerOnPort",new Object[] { 3, 4,port1 });
@@ -1333,7 +1332,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
             + pr.getDataStore().getAllLocalBuckets().size();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 400, true);
+    Wait.waitForCriterion(wc, 60000, 400, true);
   }
   
   public static void waitForBucketsCreation(){
@@ -1349,7 +1348,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "bucket copies are not created";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 400, true);
+    Wait.waitForCriterion(wc, 60000, 400, true);
   }
   
   private void verifyDeadServer(Map<String, ClientPartitionAdvisor> regionMetaData, Region region, int port0, int port1) {
@@ -1404,7 +1403,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
 
     PartitionAttributesFactory paf = new PartitionAttributesFactory();
@@ -1415,7 +1414,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     region = cache.createRegion(PR_NAME, attr.create());
 
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1429,7 +1428,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     customerRegion = cache.createRegion("CUSTOMER", attr.create());
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1442,7 +1441,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     orderRegion = cache.createRegion("ORDER", attr.create());
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1455,7 +1454,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());
     replicatedRegion = cache.createRegion("rr", new AttributesFactory().create());
@@ -1474,7 +1473,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
 
     PartitionAttributesFactory paf = new PartitionAttributesFactory();
@@ -1485,7 +1484,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     region = cache.createRegion(PR_NAME, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1499,7 +1498,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     customerRegion = cache.createRegion("CUSTOMER", attr.create());
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1513,7 +1512,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     orderRegion = cache.createRegion("ORDER", attr.create());
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1527,7 +1526,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());    
     
@@ -1555,7 +1554,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 //    attr.setConcurrencyChecksEnabled(true);
     region = cache.createRegion(PR_NAME, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1571,7 +1570,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 //    attr.setConcurrencyChecksEnabled(true);
     customerRegion = cache.createRegion("CUSTOMER", attr.create());
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1587,7 +1586,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 //    attr.setConcurrencyChecksEnabled(true);
     orderRegion = cache.createRegion("ORDER", attr.create());
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1603,7 +1602,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 //    attr.setConcurrencyChecksEnabled(true);
     shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());    
     
@@ -1616,7 +1615,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
     return port;
   }
@@ -1639,7 +1638,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 //    attr.setConcurrencyChecksEnabled(true);
     region = cache.createRegion(PR_NAME, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1655,7 +1654,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 //    attr.setConcurrencyChecksEnabled(true);
     customerRegion = cache.createRegion("CUSTOMER", attr.create());
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1671,7 +1670,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 //    attr.setConcurrencyChecksEnabled(true);
     orderRegion = cache.createRegion("ORDER", attr.create());
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1687,7 +1686,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
 //    attr.setConcurrencyChecksEnabled(true);
     shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());    
     
@@ -1699,7 +1698,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
     return port;
   }
@@ -1714,7 +1713,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
 
     PartitionAttributesFactory paf = new PartitionAttributesFactory();
@@ -1725,7 +1724,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     region = cache.createRegion(PR_NAME, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1739,7 +1738,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     customerRegion = cache.createRegion("CUSTOMER", attr.create());
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1753,7 +1752,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     orderRegion = cache.createRegion("ORDER", attr.create());
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1767,7 +1766,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());    
     
@@ -1786,7 +1785,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
   }
   
@@ -1803,7 +1802,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     region = cache.createRegion(PR_NAME, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1816,7 +1815,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     customerRegion = cache.createRegion("CUSTOMER", attr.create());
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1829,7 +1828,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     orderRegion = cache.createRegion("ORDER", attr.create());
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1842,7 +1841,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attr.setConcurrencyChecksEnabled(true);
     shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());
     replicatedRegion = cache.createRegion("rr", new AttributesFactory().create());
@@ -1958,7 +1957,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     RegionAttributes attrs = factory.create();
     region = cache.createRegion(PR_NAME, attrs);
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1969,7 +1968,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attrs = factory.create();
     customerRegion = cache.createRegion("CUSTOMER", attrs);
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1980,7 +1979,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attrs = factory.create();
     orderRegion = cache.createRegion("ORDER", attrs);
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1991,7 +1990,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
     attrs = factory.create();
     shipmentRegion = cache.createRegion("SHIPMENT", attrs);
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());
     factory = new AttributesFactory();
@@ -2252,7 +2251,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected PRAdvisor to be ready";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
     
     assertEquals(4, regionMetaData.size());
     assertTrue(regionMetaData.containsKey(region.getFullPath()));
@@ -2266,7 +2265,7 @@ public class PartitionedRegionSingleHopDUnitTest extends CacheTestCase {
         return "expected no metadata to be refreshed";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopWithServerGroupDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopWithServerGroupDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopWithServerGroupDUnitTest.java
index ee256a9..87738b8 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopWithServerGroupDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSingleHopWithServerGroupDUnitTest.java
@@ -41,10 +41,17 @@ import com.gemstone.gemfire.internal.cache.execute.data.CustId;
 import com.gemstone.gemfire.internal.cache.execute.data.OrderId;
 import com.gemstone.gemfire.internal.cache.execute.data.ShipmentId;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import java.io.File;
 import java.io.IOException;
@@ -115,26 +122,27 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     member1 = host.getVM(1);
     member2 = host.getVM(2);
     member3 = host.getVM(3);
-    addExpectedException("java.net.SocketException");
+    IgnoredException.addIgnoredException("java.net.SocketException");
   }
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
+    // close the clients first
+    member0.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "closeCache");
+    member1.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "closeCache");
+    member2.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "closeCache");
+    member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "closeCache");
+    closeCache();
+  }
+  
+  @Override
+  protected final void postTearDownCacheTestCase() throws Exception {
     try {
-
-      // close the clients first
-      member0.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "closeCache");
-      member1.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "closeCache");
-      member2.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "closeCache");
-      member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class, "closeCache");
-      closeCache();
-
-      super.tearDown2();
-
       member0 = null;
       member1 = null;
       member2 = null;
       member3 = null;
-      invokeInEveryVM(new SerializableRunnable() { public void run() {
+      Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() {
         cache = null;
         orderRegion = null;
         orderRegion2 = null;
@@ -149,7 +157,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
 
     }
     finally {
-      unregisterAllDataSerializersFromAllVms();
+      DistributedTestUtils.unregisterAllDataSerializersFromAllVms();
     }
   }
 
@@ -169,7 +177,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
 
   public void test_SingleHopWith2ServerGroup() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -203,7 +211,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
 
   public void test_SingleHopWith2ServerGroup2() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -237,7 +245,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
   
   public void test_SingleHopWith2ServerGroup2WithoutSystemProperty() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -266,7 +274,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
 
   public void test_SingleHopWithServerGroupAccessor() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -299,7 +307,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
   
   public void test_SingleHopWithServerGroupOneServerInTwoGroups() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -338,7 +346,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
   
   public void test_SingleHopWithServerGroupWithOneDefaultServer() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -371,7 +379,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
   
   public void test_SingleHopWithServerGroupClientServerGroupNull() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -404,7 +412,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
   
   public void test_SingleHopWithServerGroupTwoClientServerGroup() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -451,7 +459,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
   
   public void test_SingleHopWithServerGroupTwoClientServerGroup2() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -495,7 +503,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
   
   public void test_SingleHopWithServerGroupTwoClientOneWithOneWithoutServerGroup() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -534,7 +542,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
   
   public void test_SingleHopWithServerGroup2ClientInOneVMServerGroup() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -578,7 +586,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
 
   public void test_SingleHopWithServerGroupColocatedRegionsInDifferentGroup() {
     int port3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
-    final String host0 = getServerHostName(member3.getHost());
+    final String host0 = NetworkUtils.getServerHostName(member3.getHost());
     final String locator = host0 + "[" + port3 + "]";
     member3.invoke(PartitionedRegionSingleHopWithServerGroupDUnitTest.class,
         "startLocatorInVM", new Object[] { port3 });
@@ -626,7 +634,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
       }
     };
     
-    DistributedTestCase.waitForCriterion(wc, 60000, 1000, true);
+    Wait.waitForCriterion(wc, 60000, 1000, true);
     
     if (numRegions != 0) {
       assertTrue(regionMetaData.containsKey(region.getFullPath()));
@@ -667,7 +675,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
       }
     };
 
-    DistributedTestCase.waitForCriterion(wc, 120000, 1000, true);
+    Wait.waitForCriterion(wc, 120000, 1000, true);
     
     assertTrue(regionMetaData.containsKey(region.getFullPath()));
     ClientPartitionAdvisor prMetaData = regionMetaData.get(region
@@ -747,7 +755,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
       }
     };
     
-    DistributedTestCase.waitForCriterion(wc, 120000, 1000, true);
+    Wait.waitForCriterion(wc, 120000, 1000, true);
     
     if (numRegions != 0) {
       assertTrue(regionMetaData.containsKey(region.getFullPath()));
@@ -809,7 +817,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
 
     PartitionAttributesFactory paf = new PartitionAttributesFactory();
@@ -819,7 +827,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     region = cache.createRegion(PR_NAME, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -832,7 +840,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     customerRegion = cache.createRegion("CUSTOMER", attr.create());
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -845,7 +853,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     orderRegion = cache.createRegion("ORDER", attr.create());
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -858,7 +866,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());
     return port;
@@ -895,7 +903,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
 
     PartitionAttributesFactory paf = new PartitionAttributesFactory();
@@ -905,7 +913,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     region = cache.createRegion(PR_NAME, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -918,7 +926,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     customerRegion = cache.createRegion("CUSTOMER", attr.create());
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -931,7 +939,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     orderRegion = cache.createRegion("ORDER", attr.create());
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -944,7 +952,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());
     return port;
@@ -981,7 +989,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
 
     PartitionAttributesFactory paf = new PartitionAttributesFactory();
@@ -991,7 +999,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     region = cache.createRegion(PR_NAME, attr.create());
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1004,7 +1012,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     customerRegion = cache.createRegion("CUSTOMER", attr.create());
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1017,7 +1025,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     orderRegion = cache.createRegion("ORDER", attr.create());
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1030,7 +1038,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());
     
@@ -1043,7 +1051,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     region2 = cache.createRegion(PR_NAME2, attr.create());
     assertNotNull(region2);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME2 + " created Successfully :"
             + region2.toString());
 
@@ -1056,7 +1064,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     customerRegion2 = cache.createRegion(CUSTOMER2, attr.create());
     assertNotNull(customerRegion2);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER2 created Successfully :"
             + customerRegion2.toString());
 
@@ -1069,7 +1077,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     orderRegion2 = cache.createRegion(ORDER2, attr.create());
     assertNotNull(orderRegion2);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER2 created Successfully :"
             + orderRegion2.toString());
 
@@ -1082,7 +1090,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     shipmentRegion2 = cache.createRegion(SHIPMENT2, attr.create());
     assertNotNull(shipmentRegion2);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT2 created Successfully :"
             + shipmentRegion2.toString());
     
@@ -1185,7 +1193,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     RegionAttributes attrs = factory.create();
     region = cache.createRegion(PR_NAME, attrs);
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1194,7 +1202,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     customerRegion = cache.createRegion("CUSTOMER", attrs);
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1203,7 +1211,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     orderRegion = cache.createRegion("ORDER", attrs);
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1212,7 +1220,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     shipmentRegion = cache.createRegion("SHIPMENT", attrs);
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());
   }
@@ -1224,7 +1232,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     RegionAttributes attrs = factory.create();
     region = cache.createRegion(PR_NAME, attrs);
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1233,7 +1241,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     customerRegion = cache.createRegion("CUSTOMER", attrs);
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1242,7 +1250,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     orderRegion = cache.createRegion("ORDER", attrs);
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1251,7 +1259,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     shipmentRegion = cache.createRegion("SHIPMENT", attrs);
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());
     
@@ -1262,7 +1270,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     region2 = cache.createRegion(PR_NAME2, attrs);
     assertNotNull(region2);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region " + PR_NAME2 + " created Successfully :"
             + region2.toString());
 
@@ -1271,7 +1279,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     customerRegion2 = cache.createRegion(CUSTOMER2, attrs);
     assertNotNull(customerRegion2);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region CUSTOMER2 created Successfully :"
             + customerRegion2.toString());
 
@@ -1280,7 +1288,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     orderRegion2 = cache.createRegion(ORDER2, attrs);
     assertNotNull(orderRegion2);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region ORDER2 created Successfully :"
             + orderRegion2.toString());
 
@@ -1289,7 +1297,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     shipmentRegion2 = cache.createRegion(SHIPMENT2, attrs);
     assertNotNull(shipmentRegion2);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region SHIPMENT2 created Successfully :"
             + shipmentRegion2.toString());
   }
@@ -1301,7 +1309,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     RegionAttributes attrs = factory.create();
     region = cache.createRegion(PR_NAME, attrs);
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1310,7 +1318,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     customerRegion = cache.createRegion("CUSTOMER", attrs);
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1319,7 +1327,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     orderRegion = cache.createRegion("ORDER", attrs);
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1328,7 +1336,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attrs = factory.create();
     shipmentRegion = cache.createRegion("SHIPMENT", attrs);
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Distributed Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());
     
@@ -1350,7 +1358,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
       server.start();
     }
     catch (IOException e) {
-      fail("Failed to start server ", e);
+      Assert.fail("Failed to start server ", e);
     }
 
     PartitionAttributesFactory paf = new PartitionAttributesFactory();
@@ -1360,7 +1368,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     region = cache.createRegion(PR_NAME, attr.create());
 
     assertNotNull(region);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + PR_NAME + " created Successfully :"
             + region.toString());
 
@@ -1373,7 +1381,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     customerRegion = cache.createRegion("CUSTOMER", attr.create());
     assertNotNull(customerRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region CUSTOMER created Successfully :"
             + customerRegion.toString());
 
@@ -1385,7 +1393,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     orderRegion = cache.createRegion("ORDER", attr.create());
     assertNotNull(orderRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region ORDER created Successfully :"
             + orderRegion.toString());
 
@@ -1397,7 +1405,7 @@ public class PartitionedRegionSingleHopWithServerGroupDUnitTest extends CacheTes
     attr.setPartitionAttributes(paf.create());
     shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
     assertNotNull(shipmentRegion);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region SHIPMENT created Successfully :"
             + shipmentRegion.toString());
     return port;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSizeDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSizeDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSizeDUnitTest.java
index c4c9796..cda653a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSizeDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionSizeDUnitTest.java
@@ -31,11 +31,13 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.OSProcess;
 import com.gemstone.gemfire.internal.logging.InternalLogWriter;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -118,7 +120,7 @@ public class PartitionedRegionSizeDUnitTest extends
       public void run2()
       {
         Cache cache = getCache();
-        final int oldLevel = setLogLevel(getLogWriter(), InternalLogWriter.WARNING_LEVEL);
+        final int oldLevel = setLogLevel(LogWriterUtils.getLogWriter(), InternalLogWriter.WARNING_LEVEL);
         for (int j = 0; j < MAX_REGIONS; j++) {
           Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX
               + "DistAckSync" + j);
@@ -128,7 +130,7 @@ public class PartitionedRegionSizeDUnitTest extends
             pr.put(key, value);
           }
         }
-        setLogLevel(getLogWriter(), oldLevel);
+        setLogLevel(LogWriterUtils.getLogWriter(), oldLevel);
 
       }
     });
@@ -220,7 +222,7 @@ public class PartitionedRegionSizeDUnitTest extends
       public void run2()
       {
         Cache cache = getCache();
-        final int oldLevel = setLogLevel(getLogWriter(), InternalLogWriter.WARNING_LEVEL);
+        final int oldLevel = setLogLevel(LogWriterUtils.getLogWriter(), InternalLogWriter.WARNING_LEVEL);
         for (int j = 0; j < MAX_REGIONS; j++) {
           Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX
               + "DistAckASync" + j);
@@ -230,14 +232,14 @@ public class PartitionedRegionSizeDUnitTest extends
             pr.put(key, value);
           }
         }
-        setLogLevel(getLogWriter(), oldLevel);
+        setLogLevel(LogWriterUtils.getLogWriter(), oldLevel);
       }
     });
 
-    DistributedTestCase.join(async0, 30 * 1000, getLogWriter());
+    ThreadUtils.join(async0, 30 * 1000);
 
 	if (async0.exceptionOccurred()) {
-          fail("Exception during async0", async0.getException());
+          Assert.fail("Exception during async0", async0.getException());
 	}
 				   
     
@@ -324,7 +326,7 @@ public class PartitionedRegionSizeDUnitTest extends
       public void run2()
       {
         Cache cache = getCache();
-        final int oldLevel = setLogLevel(getLogWriter(), InternalLogWriter.WARNING_LEVEL);
+        final int oldLevel = setLogLevel(LogWriterUtils.getLogWriter(), InternalLogWriter.WARNING_LEVEL);
         for (int j = 0; j < MAX_REGIONS; j++) {
           Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX
               + "DistAckSyncChangingVMCount" + j);
@@ -334,7 +336,7 @@ public class PartitionedRegionSizeDUnitTest extends
             pr.put(key, value);
           }
         }
-        setLogLevel(getLogWriter(), oldLevel);
+        setLogLevel(LogWriterUtils.getLogWriter(), oldLevel);
       }
     });
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsDUnitTest.java
index fe2a104..2a9a040 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionStatsDUnitTest.java
@@ -30,9 +30,12 @@ import com.gemstone.gemfire.cache.control.RebalanceOperation;
 import com.gemstone.gemfire.cache.control.RebalanceResults;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.distributed.DistributedSystem;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author tapshank, Created on Jan 19, 2006
@@ -358,7 +361,7 @@ public class PartitionedRegionStatsDUnitTest extends
         try {
           RebalanceResults results = op.getResults();
         } catch (Exception e) {
-          fail("ex", e);
+          Assert.fail("ex", e);
         }
       }
     };
@@ -406,7 +409,7 @@ public class PartitionedRegionStatsDUnitTest extends
           RebalanceResults results = op.getResults();
         }
         catch (Exception e) {
-          fail("ex", e);
+          Assert.fail("ex", e);
         }
       }
     };
@@ -517,7 +520,7 @@ public class PartitionedRegionStatsDUnitTest extends
         Cache cache = getCache();
         PartitionedRegion region = (PartitionedRegion) cache.getRegion("region1");
         final PartitionedRegionStats stats = region.getPrStats();
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           
           @Override
           public boolean done() {
@@ -541,7 +544,7 @@ public class PartitionedRegionStatsDUnitTest extends
         try {
           tombstoneService.forceBatchExpirationForTests(1);
         } catch (InterruptedException e) {
-          fail("interrupted", e);
+          Assert.fail("interrupted", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionTestUtilsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionTestUtilsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionTestUtilsDUnitTest.java
index f43c8b8..0690a6d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionTestUtilsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/PartitionedRegionTestUtilsDUnitTest.java
@@ -36,7 +36,9 @@ import com.gemstone.gemfire.cache.PartitionAttributesFactory;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.distributed.DistributedMember;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -90,16 +92,16 @@ public class PartitionedRegionTestUtilsDUnitTest extends
         GsRandom rand = new GsRandom(123);
         // Assert that its empty
         for(int i=0; i<5; i++) {
-          getLogWriter().info("Invocation " + i + " of getSomeKeys");
+          LogWriterUtils.getLogWriter().info("Invocation " + i + " of getSomeKeys");
           try {
             Set s = null;
             s = pr.getSomeKeys(rand);
             assertNotNull(s);
             assertTrue(s.isEmpty());
           } catch (ClassNotFoundException cnfe) {
-            fail("GetSomeKeys failed with ClassNotFoundException", cnfe);
+            Assert.fail("GetSomeKeys failed with ClassNotFoundException", cnfe);
           } catch (IOException ioe) {
-            fail("GetSomeKeys failed with IOException", ioe);
+            Assert.fail("GetSomeKeys failed with IOException", ioe);
           }
         }
         
@@ -110,26 +112,26 @@ public class PartitionedRegionTestUtilsDUnitTest extends
         
         // Assert not empty and has value in an accepable range
         for(int i=0; i<5; i++) {
-          getLogWriter().info("Invocation " + i + " of getSomeKeys");
+          LogWriterUtils.getLogWriter().info("Invocation " + i + " of getSomeKeys");
           try {
             Set s = null;
             s = pr.getSomeKeys(rand);
             assertNotNull(s);
             assertFalse(s.isEmpty());
             Integer val;
-            getLogWriter().info("Invocation " + i + " got " + s.size() + " keys");
+            LogWriterUtils.getLogWriter().info("Invocation " + i + " got " + s.size() + " keys");
             for (Iterator it = s.iterator(); it.hasNext(); ) {
               Object key = it.next();
-              getLogWriter().info("Key: " + key);
+              LogWriterUtils.getLogWriter().info("Key: " + key);
               val = (Integer) pr.get(key);
               assertNotNull(val);
               assertTrue(val.intValue() >= 0);
               assertTrue(val.intValue() < MAXKEYS); 
             }
           } catch (ClassNotFoundException cnfe) {
-            fail("GetSomeKeys failed with ClassNotFoundException", cnfe);
+            Assert.fail("GetSomeKeys failed with ClassNotFoundException", cnfe);
           } catch (IOException ioe) {
-            fail("GetSomeKeys failed with IOException", ioe);
+            Assert.fail("GetSomeKeys failed with IOException", ioe);
           }
         }
       }
@@ -511,7 +513,7 @@ public class PartitionedRegionTestUtilsDUnitTest extends
               assertEquals(0, p.getBucketOwnersForValidation(i).size());
             }
           } catch (ForceReattemptException noGood) {
-            fail("Unexpected force retry", noGood);
+            Assert.fail("Unexpected force retry", noGood);
           }
         }
       }
@@ -560,7 +562,7 @@ public class PartitionedRegionTestUtilsDUnitTest extends
               assertEquals(1, primCount);
             }
           } catch (ForceReattemptException noGood) {
-            fail("Unexpected force retry", noGood);
+            Assert.fail("Unexpected force retry", noGood);
           }
         }
       }


[28/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRColocationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRColocationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRColocationDUnitTest.java
index 63b2334..5ce60f3 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRColocationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRColocationDUnitTest.java
@@ -60,11 +60,16 @@ import com.gemstone.gemfire.internal.cache.execute.data.OrderId;
 import com.gemstone.gemfire.internal.cache.execute.data.Shipment;
 import com.gemstone.gemfire.internal.cache.execute.data.ShipmentId;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 /**
  * This is the test for the custom and colocated partitioning of
  * PartitionedRegion
@@ -712,14 +717,14 @@ public class PRColocationDUnitTest extends CacheTestCase {
         assertNotNull(basicGetCache());
         Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
         assertNotNull(pr);
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Partitioned Region " + partitionedRegionName
                 + " created Successfully :" + pr.toString());
       }
     });
 
     // add expected exception string
-    final ExpectedException ex = addExpectedException(
+    final IgnoredException ex = IgnoredException.addIgnoredException(
         "Colocated regions should have accessors at the same node", dataStore1);
     dataStore1.invoke(new CacheSerializableRunnable(
         "Colocated PR with Accessor on different nodes") {
@@ -749,7 +754,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
         }
         catch (Exception Expected) {
           Expected.printStackTrace();
-          getLogWriter().info("Expected Message : " + Expected.getMessage());
+          LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
           assertTrue(Expected.getMessage().startsWith(
               "Colocated regions should have accessors at the same node"));
         }
@@ -784,14 +789,14 @@ public class PRColocationDUnitTest extends CacheTestCase {
         assertNotNull(basicGetCache());
         Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
         assertNotNull(pr);
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Partitioned Region " + partitionedRegionName
                 + " created Successfully :" + pr.toString());
       }
     });
 
     // add expected exception string
-    final ExpectedException ex = addExpectedException(
+    final IgnoredException ex = IgnoredException.addIgnoredException(
         "Colocated regions should have accessors at the same node", dataStore1);
     dataStore1.invoke(new CacheSerializableRunnable(
         "Colocated PR with accessor on different nodes") {
@@ -820,7 +825,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
               + "should have accessors at the same node");
         }
         catch (Exception Expected) {
-          getLogWriter().info("Expected Message : " + Expected.getMessage());
+          LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
           assertTrue(Expected.getMessage().startsWith(
               "Colocated regions should have accessors at the same node"));
         }
@@ -855,7 +860,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
         assertNotNull(basicGetCache());
         Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
         assertNotNull(pr);
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Partitioned Region " + partitionedRegionName
                 + " created Successfully :" + pr.toString());
       }
@@ -883,14 +888,14 @@ public class PRColocationDUnitTest extends CacheTestCase {
         assertNotNull(basicGetCache());
         Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
         assertNotNull(pr);
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Partitioned Region " + partitionedRegionName
                 + " created Successfully :" + pr.toString());
       }
     });
 
     // add expected exception string
-    final ExpectedException ex = addExpectedException("Cannot create buckets",
+    final IgnoredException ex = IgnoredException.addIgnoredException("Cannot create buckets",
         dataStore2);
     dataStore2.invoke(new CacheSerializableRunnable(
         "Colocated PR with PR on different node") {
@@ -922,7 +927,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
               + "as colocated regions are not configured to be at the same nodes.");
         }
         catch (Exception Expected) {
-          getLogWriter().info("Expected Message : " + Expected.getMessage());
+          LogWriterUtils.getLogWriter().info("Expected Message : " + Expected.getMessage());
           assertTrue(Expected.getMessage().contains("Cannot create buckets, as "
               + "colocated regions are not configured to be at the same nodes."));
         }
@@ -958,9 +963,9 @@ public class PRColocationDUnitTest extends CacheTestCase {
         }
         catch (Exception NotExpected) {
           NotExpected.printStackTrace();
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Unexpected Exception Message : " + NotExpected.getMessage());
-          fail("Unpexpected Exception" , NotExpected);
+          Assert.fail("Unpexpected Exception" , NotExpected);
         }
       }
     });
@@ -999,7 +1004,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
     // add expected exception string
     final String expectedExMessage =
       "Any Region in colocation chain cannot be destroyed locally.";
-    final ExpectedException ex = addExpectedException(expectedExMessage,
+    final IgnoredException ex = IgnoredException.addIgnoredException(expectedExMessage,
         dataStore1);
     dataStore1.invoke(new CacheSerializableRunnable(
         "PR with Local destroy") {
@@ -1013,7 +1018,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
               + expectedExMessage);
         }
         catch (Exception Expected) {
-          getLogWriter().info("Expected Messageee : " + Expected.getMessage());
+          LogWriterUtils.getLogWriter().info("Expected Messageee : " + Expected.getMessage());
           assertTrue(Expected.getMessage().contains(expectedExMessage));
         }
       }
@@ -1031,7 +1036,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
               + expectedExMessage);
         }
         catch (Exception Expected) {
-          getLogWriter().info("Expected Messageee : " + Expected.getMessage());
+          LogWriterUtils.getLogWriter().info("Expected Messageee : " + Expected.getMessage());
           assertTrue(Expected.getMessage().contains(expectedExMessage));
         }
       }
@@ -1077,7 +1082,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
     // add expected exception string
     final String expectedExMessage = "colocation chain cannot be destroyed, "
         + "unless all its children";
-    final ExpectedException ex = addExpectedException(expectedExMessage,
+    final IgnoredException ex = IgnoredException.addIgnoredException(expectedExMessage,
         dataStore1);
     dataStore1.invoke(new CacheSerializableRunnable(
         "PR with destroy") {
@@ -1091,7 +1096,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
               + expectedExMessage);
         }
         catch (IllegalStateException expected) {
-          getLogWriter().info("Got message: " + expected.getMessage());
+          LogWriterUtils.getLogWriter().info("Got message: " + expected.getMessage());
           assertTrue(expected.getMessage().contains(expectedExMessage));
         }
       }
@@ -1109,7 +1114,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
         }
         catch (Exception unexpected) {
           unexpected.printStackTrace();
-          getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
+          LogWriterUtils.getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
           fail("Could not destroy the child region.");
         }
       }
@@ -1126,7 +1131,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
         }
         catch (Exception unexpected) {
           unexpected.printStackTrace();
-          getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
+          LogWriterUtils.getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
           fail("Could not destroy the parent region.");
         }
       }
@@ -1221,7 +1226,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
               DummyKeyBasedRoutingResolver dummy = new DummyKeyBasedRoutingResolver(1);
               prForCustomer.put(dummy, new Integer(100));
               assertEquals(prForCustomer.get(dummy), new Integer(100));
-              getLogWriter().info(
+              LogWriterUtils.getLogWriter().info(
                   "Key :" + dummy.dummyID + " Value :"
                       + prForCustomer.get(dummy));
 
@@ -1230,14 +1235,14 @@ public class PRColocationDUnitTest extends CacheTestCase {
               assertNotNull(prForOrder);
               prForOrder.put(dummy, new Integer(200));
               assertEquals(prForOrder.get(dummy), new Integer(200));
-              getLogWriter().info(
+              LogWriterUtils.getLogWriter().info(
                   "Key :" + dummy.dummyID + " Value :" + prForOrder.get(dummy));
               return null;
             }
           });
     } catch (Exception unexpected) {
       unexpected.printStackTrace();
-      getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
+      LogWriterUtils.getLogWriter().info("Unexpected Message: " + unexpected.getMessage());
       fail("Test failed");
     }
   }
@@ -1513,7 +1518,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
       @Override
       public void run2() throws CacheException {
         getCache();
-        addExpectedException("redundancy should be same as the redundancy");
+        IgnoredException.addIgnoredException("redundancy should be same as the redundancy");
         createPR(rName, red1, Integer.valueOf(100), Integer.valueOf(3), null, Boolean.FALSE, Boolean.FALSE);
         try {
           createPR(rName+"colo", red0, Integer.valueOf(100), Integer.valueOf(3), rName, Boolean.FALSE, Boolean.FALSE);
@@ -1753,7 +1758,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
         assertEquals(2, region.getDataStore().getAllLocalPrimaryBucketIds().size());
       }
     };
-    pause(5000);
+    Wait.pause(5000);
     dataStore1.invoke(checkForBuckets_ForOrder);
     
     dataStore2.invoke(PRColocationDUnitTest.class, "createPR", attributeObjects1);
@@ -1883,7 +1888,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
       throw async2.getException();
     }
     
-    pause(5000);
+    Wait.pause(5000);
     SerializableRunnable checkForBuckets_ForOrder = new SerializableRunnable("check for buckets") {
       public void run() {
         PartitionedRegion region = (PartitionedRegion) basicGetCache().getRegion(OrderPartitionedRegionName);
@@ -2067,13 +2072,13 @@ public class PRColocationDUnitTest extends CacheTestCase {
       }
       Iterator primaryBucketIterator = primaryBucketListForCustomer.iterator();
       while (primaryBucketIterator.hasNext()) {
-        getLogWriter().info("Primary Bucket : " + primaryBucketIterator.next());
+        LogWriterUtils.getLogWriter().info("Primary Bucket : " + primaryBucketIterator.next());
 
       }
       Iterator SecondaryBucketIterator = secondaryBucketListForCustomer
           .iterator();
       while (SecondaryBucketIterator.hasNext()) {
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Secondary Bucket : " + SecondaryBucketIterator.next());
       }
     }
@@ -2114,12 +2119,12 @@ public class PRColocationDUnitTest extends CacheTestCase {
       }
       Iterator primaryBucketIterator = primaryBucketListForOrder.iterator();
       while (primaryBucketIterator.hasNext()) {
-        getLogWriter().info("Primary Bucket : " + primaryBucketIterator.next());
+        LogWriterUtils.getLogWriter().info("Primary Bucket : " + primaryBucketIterator.next());
 
       }
       Iterator SecondaryBucketIterator = secondaryBucketListForOrder.iterator();
       while (SecondaryBucketIterator.hasNext()) {
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Secondary Bucket : " + SecondaryBucketIterator.next());
       }
     }
@@ -2160,13 +2165,13 @@ public class PRColocationDUnitTest extends CacheTestCase {
       }
       Iterator primaryBucketIterator = primaryBucketListForShipment.iterator();
       while (primaryBucketIterator.hasNext()) {
-        getLogWriter().info("Primary Bucket : " + primaryBucketIterator.next());
+        LogWriterUtils.getLogWriter().info("Primary Bucket : " + primaryBucketIterator.next());
 
       }
       Iterator SecondaryBucketIterator = secondaryBucketListForShipment
           .iterator();
       while (SecondaryBucketIterator.hasNext()) {
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "Secondary Bucket : " + SecondaryBucketIterator.next());
       }
     }
@@ -2206,10 +2211,10 @@ public class PRColocationDUnitTest extends CacheTestCase {
     HashMap localBucket2RegionMap = (HashMap)customerPartitionedregion
         .getDataStore().getSizeLocally();
     int customerBucketSize = localBucket2RegionMap.size();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of the " + customerPartitionedRegionName + " in this VM :- "
             + localBucket2RegionMap.size());
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of primary buckets the " + customerPartitionedRegionName + " in this VM :- "
             + customerPartitionedregion.getDataStore().getNumberOfPrimaryBucketsManaged());
     Set customerEntrySet = localBucket2RegionMap.entrySet();
@@ -2219,7 +2224,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
       Map.Entry me = (Map.Entry)customerIterator.next();
       Integer size = (Integer)me.getValue();
       assertEquals(1, size.intValue());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Size of the Bucket " + me.getKey() + ": - " + size.toString());
     }
     
@@ -2227,10 +2232,10 @@ public class PRColocationDUnitTest extends CacheTestCase {
     localBucket2RegionMap = (HashMap)orderPartitionedregion.getDataStore()
         .getSizeLocally();
     int orderBucketSize = localBucket2RegionMap.size();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of the " + orderPartitionedRegionName + " in this VM :- "
             + localBucket2RegionMap.size());
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of primary buckets the " + orderPartitionedRegionName + " in this VM :- "
             + orderPartitionedregion.getDataStore().getNumberOfPrimaryBucketsManaged());
     
@@ -2241,16 +2246,16 @@ public class PRColocationDUnitTest extends CacheTestCase {
       Map.Entry me = (Map.Entry)orderIterator.next();
       Integer size = (Integer)me.getValue();
       assertEquals(10, size.intValue());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Size of the Bucket " + me.getKey() + ": - " + size.toString());
     }
     localBucket2RegionMap = (HashMap)shipmentPartitionedregion.getDataStore()
         .getSizeLocally();
     int shipmentBucketSize = localBucket2RegionMap.size();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of the " + shipmentPartitionedRegionName + " in this VM :- "
             + localBucket2RegionMap.size());
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Size of primary buckets the " + shipmentPartitionedRegionName + " in this VM :- "
             + shipmentPartitionedregion.getDataStore().getNumberOfPrimaryBucketsManaged());
     Set shipmentEntrySet = localBucket2RegionMap.entrySet();
@@ -2260,7 +2265,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
       Map.Entry me = (Map.Entry)shipmentIterator.next();
       Integer size = (Integer)me.getValue();
       assertEquals(100, size.intValue());
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Size of the Bucket " + me.getKey() + ": - " + size.toString());
     }
     
@@ -2302,7 +2307,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
           + partitionedRegionName);
     }
     catch (Exception e) {
-      fail(
+      Assert.fail(
           "validateBeforePutCustomerPartitionedRegion : Failed while getting the region from cache",
           e);
     }
@@ -2328,7 +2333,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
           .getRegion(Region.SEPARATOR + shipmentPartitionedRegionName);
     }
     catch (Exception e) {
-      fail(
+      Assert.fail(
           "validateAfterPutPartitionedRegion : failed while getting the region",
           e);
     }
@@ -2358,7 +2363,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
           // assertNotNull(orderPartitionedregion.get(orderId));
 
           if (custId.equals(orderId.getCustId())) {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 orderId + "belongs to node " + idmForCustomer + " "
                     + idmForOrder);
             assertEquals(idmForCustomer, idmForOrder);
@@ -2370,7 +2375,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
             ShipmentId shipmentId = (ShipmentId)shipmentIterator.next();
             // assertNotNull(shipmentPartitionedregion.get(shipmentId));
             if (orderId.equals(shipmentId.getOrderId())) {
-              getLogWriter().info(
+              LogWriterUtils.getLogWriter().info(
                   shipmentId + "belongs to node " + idmForOrder + " "
                       + idmForShipment);
             }
@@ -2401,7 +2406,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
         assertEquals(customer, pr.get(custid));
       }
       catch (Exception e) {
-        fail("putInPartitionedRegion : failed while doing put operation in "
+        Assert.fail("putInPartitionedRegion : failed while doing put operation in "
             + pr.getFullPath(), e);
       }
     }
@@ -2415,7 +2420,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
     try {
       partitionedregion.close();
     } catch (Exception e) {
-      fail(
+      Assert.fail(
           "closeRegion : failed to close region : " + partitionedregion,
           e);
     }
@@ -2451,11 +2456,11 @@ public class PRColocationDUnitTest extends CacheTestCase {
         assertEquals(customer,partitionedregion.get(custid));
       }
       catch (Exception e) {
-        fail(
+        Assert.fail(
             "putCustomerPartitionedRegion : failed while doing put operation in CustomerPartitionedRegion ",
             e);
       }
-      getLogWriter().info("Customer :- { " + custid + " : " + customer + " }");
+      LogWriterUtils.getLogWriter().info("Customer :- { " + custid + " : " + customer + " }");
     }
   }
 
@@ -2477,11 +2482,11 @@ public class PRColocationDUnitTest extends CacheTestCase {
 
         }
         catch (Exception e) {
-          fail(
+          Assert.fail(
               "putOrderPartitionedRegion : failed while doing put operation in OrderPartitionedRegion ",
               e);
         }
-        getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
+        LogWriterUtils.getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
       }
     }
   }
@@ -2504,11 +2509,11 @@ public class PRColocationDUnitTest extends CacheTestCase {
 
         }
         catch (Exception e) {
-          fail(
+          Assert.fail(
               "putOrderPartitionedRegion : failed while doing put operation in OrderPartitionedRegion ",
               e);
         }
-        getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
+        LogWriterUtils.getLogWriter().info("Order :- { " + orderId + " : " + order + " }");
       }
     }
   }
@@ -2533,11 +2538,11 @@ public class PRColocationDUnitTest extends CacheTestCase {
             assertEquals(shipment,partitionedregion.get(shipmentId));
           }
           catch (Exception e) {
-            fail(
+            Assert.fail(
                 "putShipmentPartitionedRegion : failed while doing put operation in ShipmentPartitionedRegion ",
                 e);
           }
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Shipment :- { " + shipmentId + " : " + shipment + " }");
         }
       }
@@ -2571,12 +2576,12 @@ public class PRColocationDUnitTest extends CacheTestCase {
     Region pr = basicGetCache().getRegion(partitionedRegionName);
     assertNotNull(pr);
     try {
-      getLogWriter().info("Destroying Partitioned Region " + partitionedRegionName);
+      LogWriterUtils.getLogWriter().info("Destroying Partitioned Region " + partitionedRegionName);
       pr.destroyRegion();
       fail("Did not get the expected ISE");
     } catch (Exception e) {
       if (!(e instanceof IllegalStateException)) {
-        fail("Expected IllegalStateException, but it's not.", e);
+        Assert.fail("Expected IllegalStateException, but it's not.", e);
       }
     }
   }
@@ -2602,7 +2607,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
     assertNotNull(basicGetCache());
     Region pr = basicGetCache().createRegion(partitionedRegionName, attr.create());
     assertNotNull(pr);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned Region " + partitionedRegionName
             + " created Successfully :" + pr.toString());
   }
@@ -2623,7 +2628,7 @@ public class PRColocationDUnitTest extends CacheTestCase {
     attr.setPartitionAttributes(prAttr);
     Region pr = root.createSubregion(partitionedRegionName, attr.create());
     assertNotNull(pr);
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Partitioned sub region " + pr.getName()
             + " created Successfully :" + pr.toString());
     if(localMaxMemory == 0){
@@ -2636,9 +2641,8 @@ public class PRColocationDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-    invokeInEveryVM(new SerializableRunnable() {
+  protected final void postTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable() {
       public void run() {
         InternalResourceManager.setResourceObserver(null);
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRCustomPartitioningDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRCustomPartitioningDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRCustomPartitioningDUnitTest.java
index 302edd7..981db10 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRCustomPartitioningDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRCustomPartitioningDUnitTest.java
@@ -51,7 +51,9 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDataStore.BucketVisitor;
 import com.gemstone.gemfire.internal.cache.xmlcache.Declarable2;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -153,12 +155,12 @@ public class PRCustomPartitioningDUnitTest extends
     for (int b = 0; b < numBucks; b++) {
       if (par.getBucketKeys(b).contains(key)) {
         foundIt = true;
-        getLogWriter().info("Key " + key + " found in bucket " + b);
+        LogWriterUtils.getLogWriter().info("Key " + key + " found in bucket " + b);
         break;
       }
     }
     if (!foundIt) {
-      getLogWriter().severe("Key " + key + " not found in any bucket");      
+      LogWriterUtils.getLogWriter().severe("Key " + key + " not found in any bucket");      
     }
     return foundIt;
   }
@@ -204,7 +206,7 @@ public class PRCustomPartitioningDUnitTest extends
               ppr.dumpAllBuckets(false);
             }
             catch (ReplyException re) {
-              fail("dumpAllBuckets", re);
+              Assert.fail("dumpAllBuckets", re);
             }
           }
         });
@@ -240,7 +242,7 @@ public class PRCustomPartitioningDUnitTest extends
               ppr.dumpAllBuckets(false);
             }
             catch (ReplyException re) {
-              fail("dumpAllBuckets", re);
+              Assert.fail("dumpAllBuckets", re);
             }
           }
         });
@@ -275,7 +277,7 @@ public class PRCustomPartitioningDUnitTest extends
               ppr.dumpAllBuckets(false);
             }
             catch (ReplyException re) {
-              fail("dumpAllBuckets", re);
+              Assert.fail("dumpAllBuckets", re);
             }
           }
         });
@@ -310,7 +312,7 @@ public class PRCustomPartitioningDUnitTest extends
               ppr.dumpAllBuckets(false);
             }
             catch (ReplyException re) {
-              fail("dumpAllBuckets", re);
+              Assert.fail("dumpAllBuckets", re);
             }
           }
         });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionDUnitTest.java
index 359143f..0c4b7a9 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionDUnitTest.java
@@ -66,12 +66,17 @@ import com.gemstone.gemfire.internal.cache.execute.data.Order;
 import com.gemstone.gemfire.internal.cache.execute.data.OrderId;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class PRFunctionExecutionDUnitTest extends
     PartitionedRegionDUnitTestCase {
@@ -316,7 +321,7 @@ public class PRFunctionExecutionDUnitTest extends
             }
             catch (Throwable e) {
               e.printStackTrace();
-              fail("This is not expected Exception", e);
+              Assert.fail("This is not expected Exception", e);
             }
             return Boolean.TRUE;
           }
@@ -390,7 +395,7 @@ public class PRFunctionExecutionDUnitTest extends
         }
         catch (Throwable e) {
           e.printStackTrace();
-          fail("This is not expected Exception", e);
+          Assert.fail("This is not expected Exception", e);
         }
         return Boolean.TRUE;
       }
@@ -750,7 +755,7 @@ public class PRFunctionExecutionDUnitTest extends
   }
   
   public void testLocalMultiKeyExecution_BucketMoved() throws Exception {
-    addExpectedException("BucketMovedException");
+    IgnoredException.addIgnoredException("BucketMovedException");
     final String rName = getUniqueName();
     Host host = Host.getHost(0);
     final VM datastore0 = host.getVM(0);
@@ -893,7 +898,7 @@ public class PRFunctionExecutionDUnitTest extends
         }
         catch (Throwable e) {
           e.printStackTrace();
-          fail("This is not expected Exception", e);
+          Assert.fail("This is not expected Exception", e);
         }
         return Boolean.TRUE;
       }
@@ -980,7 +985,7 @@ public class PRFunctionExecutionDUnitTest extends
             return excuse;
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 3000, 200, false);
+        Wait.waitForCriterion(wc, 3000, 200, false);
         long endTime = System.currentTimeMillis();
         getCache().getLogger().fine(
             "Time wait for Cache Close = " + (endTime - startTime));
@@ -990,10 +995,10 @@ public class PRFunctionExecutionDUnitTest extends
     });
     assertEquals(Boolean.TRUE, o);
 
-    DistributedTestCase.join(async[0], 60 * 1000, getLogWriter());
+    ThreadUtils.join(async[0], 60 * 1000);
 
     if (async[0].getException() != null) {
-      fail("UnExpected Exception Occured : ", async[0].getException());
+      Assert.fail("UnExpected Exception Occured : ", async[0].getException());
     }
     List l = (List)async[0].getReturnValue();
     assertEquals(2, l.size());
@@ -1078,7 +1083,7 @@ public class PRFunctionExecutionDUnitTest extends
             return excuse;
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 3000, 200, false);
+        Wait.waitForCriterion(wc, 3000, 200, false);
         long endTime = System.currentTimeMillis();
         getCache().getLogger().fine(
             "Time wait for Cache Close = " + (endTime - startTime));
@@ -1088,10 +1093,10 @@ public class PRFunctionExecutionDUnitTest extends
     });
     assertEquals(Boolean.TRUE, o);
 
-    DistributedTestCase.join(async[0], 60 * 1000, getLogWriter());
+    ThreadUtils.join(async[0], 60 * 1000);
 
     if (async[0].getException() != null) {
-      fail("UnExpected Exception Occured : ", async[0].getException());
+      Assert.fail("UnExpected Exception Occured : ", async[0].getException());
     }
     List l = (List)async[0].getReturnValue();
     assertEquals(2, l.size());
@@ -1585,7 +1590,7 @@ public class PRFunctionExecutionDUnitTest extends
         }
         catch (Exception expected) {
           // No data should cause exec to throw
-          getLogWriter().warning("Exception Occured : "+ expected.getMessage());
+          LogWriterUtils.getLogWriter().warning("Exception Occured : "+ expected.getMessage());
           // boolean expectedStr = expected.getMessage().startsWith("No target
           // node was found for routingKey");
           // assertTrue("Unexpected exception: " + expected, expectedStr);
@@ -2242,7 +2247,7 @@ public class PRFunctionExecutionDUnitTest extends
         ResultCollector rc1 = dataSet.withArgs(Boolean.TRUE)
             .execute(function.getId());
         List l = ((List)rc1.getResult());
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "PRFunctionExecutionDUnitTest#testExecutionOnAllNodes_byName : Result size :"
                 + l.size() + " Result : " + l);
         assertEquals(4, l.size());
@@ -2427,7 +2432,7 @@ public class PRFunctionExecutionDUnitTest extends
               }
             });
         List l = ((List)rc1.getResult());
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "PRFunctionExecutionDUnitTest#testExecutionOnAllNodes_byName : Result size :"
                 + l.size() + " Result : " + l);
         assertEquals(4, l.size());
@@ -2532,7 +2537,7 @@ public class PRFunctionExecutionDUnitTest extends
               }
             });
         List l = ((List)rc1.getResult());
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "PRFunctionExecutionDUnitTest#testExecutionOnAllNodes_byName : Result size :"
                 + l.size() + " Result : " + l);
         assertEquals(4, l.size());
@@ -2603,11 +2608,11 @@ public class PRFunctionExecutionDUnitTest extends
             testKeys.add(custid);
           }
           catch (Exception e) {
-            fail(
+            Assert.fail(
                 "putCustomerPartitionedRegion : failed while doing put operation in CustomerPartitionedRegion ",
                 e);
           }
-          getLogWriter().fine("Customer :- { " + custid + " : " + customer + " }");
+          LogWriterUtils.getLogWriter().fine("Customer :- { " + custid + " : " + customer + " }");
         }
         
         Function function = new TestFunction(true,TestFunction.TEST_FUNCTION3);
@@ -2714,11 +2719,11 @@ public class PRFunctionExecutionDUnitTest extends
               testKeys.add(custid);
           }
           catch (Exception e) {
-            fail(
+            Assert.fail(
                 "putCustomerPartitionedRegion : failed while doing put operation in CustomerPartitionedRegion ",
                 e);
           }
-          getLogWriter().fine("Customer :- { " + custid + " : " + customer + " }");
+          LogWriterUtils.getLogWriter().fine("Customer :- { " + custid + " : " + customer + " }");
         }
 
         PartitionedRegion partitionedregion = (PartitionedRegion)getCache().getRegion(rName2);
@@ -2736,11 +2741,11 @@ public class PRFunctionExecutionDUnitTest extends
               
             }
             catch (Exception e) {
-              fail(
+              Assert.fail(
                   "putOrderPartitionedRegion : failed while doing put operation in OrderPartitionedRegion ",
                   e);
             }
-            getLogWriter().fine("Order :- { " + orderId + " : " + order + " }");
+            LogWriterUtils.getLogWriter().fine("Order :- { " + orderId + " : " + order + " }");
           }
         }
                 
@@ -3049,9 +3054,9 @@ public class PRFunctionExecutionDUnitTest extends
       ds.disconnect();
     }
     catch (Exception e) {
-      getLogWriter().info("Exception Occured : " + e.getMessage());
+      LogWriterUtils.getLogWriter().info("Exception Occured : " + e.getMessage());
       e.printStackTrace();
-      fail("Test failed", e);
+      Assert.fail("Test failed", e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionTimeOutDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionTimeOutDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionTimeOutDUnitTest.java
index 6ded986..169ad3a 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionTimeOutDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionTimeOutDUnitTest.java
@@ -41,6 +41,7 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegionTestHelper;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -666,7 +667,7 @@ public class PRFunctionExecutionTimeOutDUnitTest extends
    * @throws Exception
    */
   public void testLocalMultiKeyExecution_byName() throws Exception {
-    addExpectedException("BucketMovedException");
+    IgnoredException.addIgnoredException("BucketMovedException");
     final String rName = getUniqueName();
     Host host = Host.getHost(0);
     VM localOnly = host.getVM(3);
@@ -797,7 +798,7 @@ public class PRFunctionExecutionTimeOutDUnitTest extends
     final VM datastore1 = host.getVM(1);
     final VM datastore2 = host.getVM(2);
     final VM datastore3 = host.getVM(3);
-    addExpectedException("BucketMovedException");
+    IgnoredException.addIgnoredException("BucketMovedException");
     getCache();
     SerializableCallable dataStoreCreate = new SerializableCallable(
         "Create PR with Function Factory") {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionWithResultSenderDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionWithResultSenderDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionWithResultSenderDUnitTest.java
index df36f8f..458ac5d 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionWithResultSenderDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRFunctionExecutionWithResultSenderDUnitTest.java
@@ -48,6 +48,7 @@ import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionTestHelper;
 import com.gemstone.gemfire.internal.cache.functions.TestFunction;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -584,7 +585,7 @@ public class PRFunctionExecutionWithResultSenderDUnitTest extends
         ResultCollector rc1 = dataSet.withArgs(Boolean.TRUE).execute(
             function.getId());
         List l = ((List)rc1.getResult());
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "PRFunctionExecutionDUnitTest#testExecutionOnAllNodes_byName : Result size :"
                 + l.size() + " Result : " + l);
         assertEquals(4, l.size());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRPerformanceTestDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRPerformanceTestDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRPerformanceTestDUnitTest.java
index 8f99059..cd35f50 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRPerformanceTestDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRPerformanceTestDUnitTest.java
@@ -52,7 +52,9 @@ import com.gemstone.gemfire.distributed.DistributedMember;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.cache.PartitionedRegionDUnitTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -157,7 +159,7 @@ public class PRPerformanceTestDUnitTest extends
       }
     }
     if (!foundIt) {
-      getLogWriter().severe("Key " + key + " not found in any bucket");      
+      LogWriterUtils.getLogWriter().severe("Key " + key + " not found in any bucket");      
     }
     return foundIt;
   }
@@ -251,8 +253,8 @@ public class PRPerformanceTestDUnitTest extends
             list = (ArrayList)rc.getResult();
           }
           catch (Exception ex) {
-            getLogWriter().info("Exception Occured :" + ex.getMessage());
-            fail("Test failed",ex);
+            LogWriterUtils.getLogWriter().info("Exception Occured :" + ex.getMessage());
+            Assert.fail("Test failed",ex);
           }
           Object val = list.get(0);
           assertNotNull(val);
@@ -277,7 +279,7 @@ public class PRPerformanceTestDUnitTest extends
         }
         
         t.stop();        
-        getLogWriter().info("Time taken to iterate over " + vals.size()+ " no. of keys: " + t.getTimeInMs() + " ms");
+        LogWriterUtils.getLogWriter().info("Time taken to iterate over " + vals.size()+ " no. of keys: " + t.getTimeInMs() + " ms");
                 
         // Call the execute method for each key and see if this takes more time
 
@@ -304,7 +306,7 @@ public class PRPerformanceTestDUnitTest extends
         }
         t.stop();
         assertEquals(vals.size(),listOfKeys.size());            
-        getLogWriter().info("Time taken to iterate over " + vals.size()+ " no. of keys using FunctionExecution: " + t.getTimeInMs() + " ms");
+        LogWriterUtils.getLogWriter().info("Time taken to iterate over " + vals.size()+ " no. of keys using FunctionExecution: " + t.getTimeInMs() + " ms");
         
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRTransactionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRTransactionDUnitTest.java
index 75a9f20..b253dc0 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRTransactionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/PRTransactionDUnitTest.java
@@ -43,6 +43,9 @@ import com.gemstone.gemfire.internal.cache.execute.data.Order;
 import com.gemstone.gemfire.internal.cache.execute.data.OrderId;
 import com.gemstone.gemfire.internal.cache.execute.data.Shipment;
 import com.gemstone.gemfire.internal.cache.execute.data.ShipmentId;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 
 /**
@@ -152,7 +155,7 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
         filter.clear();
         args.clear();
         args.add(new Integer(VERIFY_NON_COLOCATION));
-        getLogWriter().info("VERIFY_NON_COLOCATION");
+        LogWriterUtils.getLogWriter().info("VERIFY_NON_COLOCATION");
         args.add(custId);
         args.add(newCus);
         args.add(orderId);
@@ -164,7 +167,7 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
           fail("Expected exception was not thrown");
         }
         catch (FunctionException fe) {
-          getLogWriter().info("Caught Expected exception");
+          LogWriterUtils.getLogWriter().info("Caught Expected exception");
           if(fe.getCause() instanceof TransactionDataNotColocatedException) {
           }
           else {
@@ -175,7 +178,7 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
         }
         // verify that the transaction modifications are applied        
         args.set(0, new Integer(VERIFY_TX));
-        getLogWriter().info("VERIFY_TX");        
+        LogWriterUtils.getLogWriter().info("VERIFY_TX");        
         orderpr.put(orderId, order);
         assertNotNull(orderpr.get(orderId));
         e.withFilter(filter).withArgs(args).execute(txFunction.getId())
@@ -191,17 +194,17 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
             .getResult();
         // verify that the transaction is rolled back        
         args.set(0, new Integer(VERIFY_ROLLBACK));
-        getLogWriter().info("VERIFY_ROLLBACK");        
+        LogWriterUtils.getLogWriter().info("VERIFY_ROLLBACK");        
         e.withFilter(filter).withArgs(args).execute(txFunction.getId())
             .getResult();
         // verify destroy
         args.set(0, new Integer(VERIFY_DESTROY));
-        getLogWriter().info("VERIFY_DESTROY");
+        LogWriterUtils.getLogWriter().info("VERIFY_DESTROY");
         e.withFilter(filter).withArgs(args).execute(txFunction.getId())
             .getResult();
         // verify invalidate
         args.set(0, new Integer(VERIFY_INVALIDATE));
-        getLogWriter().info("VERIFY_INVALIDATE");
+        LogWriterUtils.getLogWriter().info("VERIFY_INVALIDATE");
         e.withFilter(filter).withArgs(args).execute(txFunction.getId())
             .getResult();
         return Boolean.TRUE;
@@ -389,7 +392,7 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
           .getRegion(Region.SEPARATOR + OrderPartitionedRegionName);
     }
     catch (Exception e) {
-      fail(
+      Assert.fail(
           "validateAfterPutPartitionedRegion : failed while getting the region",
           e);
     }
@@ -400,7 +403,7 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
     orderPartitionedregion.getDataStore().dumpEntries(false);
     Iterator custIterator = customerPartitionedregion.getDataStore()
         .getEntries().iterator();
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Found " + customerPartitionedregion.getDataStore().getEntries().size()
             + " Customer entries in the partition");
     Region.Entry custEntry = null;
@@ -410,7 +413,7 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
       Customer cust = (Customer)custEntry.getValue();
       Iterator orderIterator = orderPartitionedregion.getDataStore()
           .getEntries().iterator();
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           "Found " + orderPartitionedregion.getDataStore().getEntries().size()
               + " Order entries in the partition");
       int orderPerCustomer = 0;
@@ -465,7 +468,7 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
         filter.clear();
         args.clear();
         args.add(new Integer(VERIFY_LISTENER_CALLBACK));
-        getLogWriter().info("VERIFY_LISTENER_CALLBACK");
+        LogWriterUtils.getLogWriter().info("VERIFY_LISTENER_CALLBACK");
         args.add(custId);
         args.add(newCus);
         args.add(orderId);
@@ -513,7 +516,7 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
         filter.clear();
         args.clear();
         args.add(new Integer(VERIFY_REP_READ));
-        getLogWriter().info("VERIFY_REP_READ");
+        LogWriterUtils.getLogWriter().info("VERIFY_REP_READ");
         args.add(custId);
         args.add(newCus);
         args.add(orderId);
@@ -558,7 +561,7 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
         Execution e = FunctionService.onRegion(customerPR);
         // for each customer, update order and shipment
         for (int iterations = 1; iterations <= totalIterations; iterations++) {
-          getLogWriter().info("running perfFunction");
+          LogWriterUtils.getLogWriter().info("running perfFunction");
           long startTime = 0;
           ArrayList args = new ArrayList();
           CustId custId = new CustId(iterations % 10);
@@ -599,7 +602,7 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
         Execution e = FunctionService.onRegion(customerPR);
         // for each customer, update order and shipment
         for (int iterations = 1; iterations <= totalIterations; iterations++) {
-          getLogWriter().info("Running perfFunction");
+          LogWriterUtils.getLogWriter().info("Running perfFunction");
           long startTime = 0;
           ArrayList args = new ArrayList();
           CustId custId = new CustId(iterations % 10);
@@ -635,7 +638,7 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
     double diff = (perfTime.longValue() - perfTxTime.longValue()) * 1.0;
     double percentDiff = (diff / perfTime.longValue()) * 100;
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         (totalIterations - warmupIterations) + " iterations of function took:"
             + +perfTime.longValue() + " Nanos, and transaction function took:"
             + perfTxTime.longValue() + " Nanos, difference :" + diff
@@ -677,12 +680,9 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
   public void testColocatedPRWithPROnDifferentNode1() throws Throwable {
   }
   
-  public void tearDown2() throws Exception {
-    try {
-      invokeInEveryVM(verifyNoTxState);
-    } finally {
-      super.tearDown2();
-    }
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(verifyNoTxState);
   }
 
   SerializableCallable verifyNoTxState = new SerializableCallable() {
@@ -715,11 +715,11 @@ public class PRTransactionDUnitTest extends PRColocationDUnitTest {
         }
         catch (Exception e) {
           // mgr.rollback();
-          fail(" failed while doing put operation in CacheListener ", e);
+          Assert.fail(" failed while doing put operation in CacheListener ", e);
         }
       }
       mgr.commit();
-      getLogWriter().info("COMMIT completed");
+      LogWriterUtils.getLogWriter().info("COMMIT completed");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/SingleHopGetAllPutAllDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/SingleHopGetAllPutAllDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/SingleHopGetAllPutAllDUnitTest.java
index 5ed07ab..33e22f4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/SingleHopGetAllPutAllDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/execute/SingleHopGetAllPutAllDUnitTest.java
@@ -31,7 +31,9 @@ import com.gemstone.gemfire.cache.client.internal.ClientPartitionAdvisor;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class SingleHopGetAllPutAllDUnitTest extends PRClientServerTestBase{
 
@@ -91,12 +93,12 @@ public class SingleHopGetAllPutAllDUnitTest extends PRClientServerTestBase{
       // check if the function was routed to pruned nodes
       Map resultMap = region.getAll(testKeyList);
       assertTrue(resultMap.equals(origVals));
-      pause(2000);
+      Wait.pause(2000);
       Map secondResultMap = region.getAll(testKeyList);
       assertTrue(secondResultMap.equals(origVals));
     }
     catch (Exception e) {
-      fail("Test failed after the getAll operation", e);
+      Assert.fail("Test failed after the getAll operation", e);
     }
   }
   
@@ -119,7 +121,7 @@ public class SingleHopGetAllPutAllDUnitTest extends PRClientServerTestBase{
             + regionMetaData.size();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 5000, 200, true);
+    Wait.waitForCriterion(wc, 5000, 200, true);
     assertTrue(regionMetaData.containsKey(region.getFullPath()));
     final ClientPartitionAdvisor prMetaData = regionMetaData.get(region.getFullPath());
     wc = new WaitCriterion() {
@@ -133,7 +135,7 @@ public class SingleHopGetAllPutAllDUnitTest extends PRClientServerTestBase{
             + prMetaData.getBucketServerLocationsMap_TEST_ONLY().size();
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 5000, 200, true);
+    Wait.waitForCriterion(wc, 5000, 200, true);
     for (Entry entry : prMetaData.getBucketServerLocationsMap_TEST_ONLY()
         .entrySet()) {
       assertEquals(2, ((List)entry.getValue()).size());
@@ -167,7 +169,7 @@ public class SingleHopGetAllPutAllDUnitTest extends PRClientServerTestBase{
       region.putAll(keysValuesMap);
       // check the listener
       // check how the function was executed
-      pause(2000);
+      Wait.pause(2000);
       region.putAll(keysValuesMap);
       
       // check if the client meta-data is in synch
@@ -176,7 +178,7 @@ public class SingleHopGetAllPutAllDUnitTest extends PRClientServerTestBase{
       // check if the function was routed to pruned nodes
       Map<String, String> resultMap = region.getAll(testKeysList);
       assertTrue(resultMap.equals(keysValuesMap));
-      pause(2000);
+      Wait.pause(2000);
       Map<String, String> secondResultMap = region.getAll(testKeysList);
       assertTrue(secondResultMap.equals(keysValuesMap));
 
@@ -187,17 +189,12 @@ public class SingleHopGetAllPutAllDUnitTest extends PRClientServerTestBase{
         noValueMap.put(key, null);
       }
       assertEquals(noValueMap, region.getAll(testKeysList));
-      pause(2000); // Why does this test keep pausing for 2 seconds and then do the exact same thing?
+      Wait.pause(2000); // Why does this test keep pausing for 2 seconds and then do the exact same thing?
       region.removeAll(testKeysList);
       assertEquals(noValueMap, region.getAll(testKeysList));
     }
     catch (Exception e) {
-      fail("Test failed after the putAll operation", e);
+      Assert.fail("Test failed after the putAll operation", e);
     }
   }
-  
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/functions/DistributedRegionFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/functions/DistributedRegionFunction.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/functions/DistributedRegionFunction.java
index 030980d..b8343ee 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/functions/DistributedRegionFunction.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/functions/DistributedRegionFunction.java
@@ -24,8 +24,8 @@ import com.gemstone.gemfire.cache.execute.FunctionContext;
 import com.gemstone.gemfire.cache.execute.RegionFunctionContext;
 import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
 import com.gemstone.gemfire.internal.Assert;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 @SuppressWarnings("serial")
 public class DistributedRegionFunction extends FunctionAdapter {
@@ -65,7 +65,7 @@ public class DistributedRegionFunction extends FunctionAdapter {
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 12000, 500, false);
+      Wait.waitForCriterion(wc, 12000, 500, false);
     }
     long endTime = System.currentTimeMillis();
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/functions/TestFunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/functions/TestFunction.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/functions/TestFunction.java
index 95a9da9..e789973 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/functions/TestFunction.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/functions/TestFunction.java
@@ -41,8 +41,8 @@ import com.gemstone.gemfire.internal.cache.execute.InternalFunctionInvocationTar
 import com.gemstone.gemfire.internal.cache.execute.MyFunctionExecutionException;
 import com.gemstone.gemfire.internal.cache.execute.RegionFunctionContextImpl;
 import com.gemstone.gemfire.internal.cache.xmlcache.Declarable2;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import java.io.Serializable;
 import java.util.ArrayList;
@@ -797,7 +797,7 @@ public class TestFunction extends FunctionAdapter implements Declarable2 {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 15000, 1000, false);
+    Wait.waitForCriterion(wc, 15000, 1000, false);
     if (context.getArguments() instanceof Boolean) {
       context.getResultSender().lastResult((Serializable) context.getArguments());
     }
@@ -911,7 +911,7 @@ public class TestFunction extends FunctionAdapter implements Declarable2 {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 10000, 500, false);
+    Wait.waitForCriterion(wc, 10000, 500, false);
     rcontext.getResultSender().lastResult((Serializable) rcontext.getArguments());
   }
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/BlockingHARQAddOperationJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/BlockingHARQAddOperationJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/BlockingHARQAddOperationJUnitTest.java
index 04f7f60..f299391 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/BlockingHARQAddOperationJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/BlockingHARQAddOperationJUnitTest.java
@@ -31,7 +31,8 @@ import org.junit.experimental.categories.Category;
 import com.gemstone.gemfire.cache.CacheException;
 import com.gemstone.gemfire.internal.cache.Conflatable;
 import com.gemstone.gemfire.internal.cache.EventID;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 /**
@@ -152,7 +153,7 @@ public class BlockingHARQAddOperationJUnitTest extends
     };
 
     takeThread.start();
-    DistributedTestCase.staticPause(20 * 1000);
+    Wait.pause(20 * 1000);
     if (!takeThread.isAlive()) {
       fail("take() thread died ");
     }
@@ -160,7 +161,7 @@ public class BlockingHARQAddOperationJUnitTest extends
     ConflatableObject c1 = new ConflatableObject(KEY1, VALUE1, id1,
         conflationEnabled, "region1");
     rq.put(c1);
-    DistributedTestCase.join(takeThread, 20 * 1000, null);
+    ThreadUtils.join(takeThread, 20 * 1000);
     assertEquals(1, takenObjects.size());
     Conflatable obj = (Conflatable)takenObjects.get(0);
     assertNotNull(obj);
@@ -220,7 +221,7 @@ public class BlockingHARQAddOperationJUnitTest extends
       rq.put(c);
     }
     for (int i = 0; i < totalTakeThreads; i++) {
-      DistributedTestCase.join(takeThreads[i], 20 * 1000, null);
+      ThreadUtils.join(takeThreads[i], 20 * 1000);
     }
 
     assertEquals(totalTakeThreads, takenObjects.size());

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/BlockingHARegionJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/BlockingHARegionJUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/BlockingHARegionJUnitTest.java
index e7b5314..9e79239 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/BlockingHARegionJUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/BlockingHARegionJUnitTest.java
@@ -32,8 +32,9 @@ import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.AvailablePort;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase.WaitCriterion;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 @Category(IntegrationTest.class)
@@ -76,8 +77,8 @@ public class BlockingHARegionJUnitTest
       thread1.start();
       thread2.start();
 
-      DistributedTestCase.join(thread1, 30 * 1000, null);
-      DistributedTestCase.join(thread2, 30 * 1000, null);
+      ThreadUtils.join(thread1, 30 * 1000);
+      ThreadUtils.join(thread2, 30 * 1000);
 
       if (exceptionOccured) {
         fail(" Test failed due to " + exceptionString);
@@ -124,7 +125,7 @@ public class BlockingHARegionJUnitTest
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 1000, 200, true);
+      Wait.waitForCriterion(ev, 1000, 200, true);
       assertTrue(thread1.isAlive()); //thread should still be alive (in wait state)
       
       Thread thread2 = new DoTake(hrq,1);
@@ -138,7 +139,7 @@ public class BlockingHARegionJUnitTest
         }
       };
       //sleep. take will proceed and so will sleeping put
-      DistributedTestCase.waitForCriterion(ev, 3 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 3 * 1000, 200, true);
 
       // thread should have died since put should have proceeded
       ev = new WaitCriterion() {
@@ -149,10 +150,10 @@ public class BlockingHARegionJUnitTest
           return "thread1 still alive";
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 30 * 1000, 1000, true);
+      Wait.waitForCriterion(ev, 30 * 1000, 1000, true);
       
-      DistributedTestCase.join(thread1, 30 * 1000, null); // for completeness
-      DistributedTestCase.join(thread2, 30 * 1000, null);
+      ThreadUtils.join(thread1, 30 * 1000); // for completeness
+      ThreadUtils.join(thread2, 30 * 1000);
       if (exceptionOccured) {
         fail(" Test failed due to " + exceptionString);
       }
@@ -204,7 +205,7 @@ public class BlockingHARegionJUnitTest
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 30 * 1000, 200, true);
       
       assertTrue(thread1.isAlive());
       assertTrue(thread2.isAlive());
@@ -225,11 +226,11 @@ public class BlockingHARegionJUnitTest
       
       Thread.sleep(2000);
       
-      DistributedTestCase.join(thread1, 5 * 60 * 1000, null);
-      DistributedTestCase.join(thread2, 5 * 60 * 1000, null);
-      DistributedTestCase.join(thread3, 5 * 60 * 1000, null);
-      DistributedTestCase.join(thread4, 5 * 60 * 1000, null);
-      DistributedTestCase.join(thread5, 5 * 60 * 1000, null);
+      ThreadUtils.join(thread1, 5 * 60 * 1000);
+      ThreadUtils.join(thread2, 5 * 60 * 1000);
+      ThreadUtils.join(thread3, 5 * 60 * 1000);
+      ThreadUtils.join(thread4, 5 * 60 * 1000);
+      ThreadUtils.join(thread5, 5 * 60 * 1000);
       
       cache.close();
     }
@@ -282,11 +283,11 @@ public class BlockingHARegionJUnitTest
       thread9.start();
       thread10.start();
       
-      DistributedTestCase.join(thread6, 30 * 1000, null);
-      DistributedTestCase.join(thread7, 30 * 1000, null);
-      DistributedTestCase.join(thread8, 30 * 1000, null);
-      DistributedTestCase.join(thread9, 30 * 1000, null);
-      DistributedTestCase.join(thread10, 30 * 1000, null);
+      ThreadUtils.join(thread6, 30 * 1000);
+      ThreadUtils.join(thread7, 30 * 1000);
+      ThreadUtils.join(thread8, 30 * 1000);
+      ThreadUtils.join(thread9, 30 * 1000);
+      ThreadUtils.join(thread10, 30 * 1000);
       
       WaitCriterion ev = new WaitCriterion() {
         public boolean done() {
@@ -296,7 +297,7 @@ public class BlockingHARegionJUnitTest
           return null;
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 30 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 30 * 1000, 200, true);
       
       assertTrue(thread1.isAlive());
       assertTrue(thread2.isAlive());
@@ -319,11 +320,11 @@ public class BlockingHARegionJUnitTest
       Thread.sleep(2000);
       
       
-      DistributedTestCase.join(thread1, 30 * 1000, null);
-      DistributedTestCase.join(thread2, 30 * 1000, null);
-      DistributedTestCase.join(thread3, 30 * 1000, null);
-      DistributedTestCase.join(thread4, 30 * 1000, null);
-      DistributedTestCase.join(thread5, 30 * 1000, null);
+      ThreadUtils.join(thread1, 30 * 1000);
+      ThreadUtils.join(thread2, 30 * 1000);
+      ThreadUtils.join(thread3, 30 * 1000);
+      ThreadUtils.join(thread4, 30 * 1000);
+      ThreadUtils.join(thread5, 30 * 1000);
       
       cache.close();
     }
@@ -377,7 +378,7 @@ public class BlockingHARegionJUnitTest
         }
       };
       t1.start();
-      DistributedTestCase.join(t1, 20 * 1000, null);
+      ThreadUtils.join(t1, 20 * 1000);
       if (exceptionOccured) {
         fail(" Test failed due to " + exceptionString);
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug36853EventsExpiryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug36853EventsExpiryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug36853EventsExpiryDUnitTest.java
index 2c319cd..09e0fbf 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug36853EventsExpiryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug36853EventsExpiryDUnitTest.java
@@ -36,6 +36,9 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -111,7 +114,7 @@ public class Bug36853EventsExpiryDUnitTest extends CacheTestCase
         "createServerCache")).intValue();
 
     client.invoke(Bug36853EventsExpiryDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(host), new Integer(PORT2) });
+        new Object[] { NetworkUtils.getServerHostName(host), new Integer(PORT2) });
 
   }
 
@@ -177,11 +180,11 @@ public class Bug36853EventsExpiryDUnitTest extends CacheTestCase
       public void afterCreate(EntryEvent event)
       {
         String key = (String)event.getKey();
-        getLogWriter().info("client2 : afterCreate : key =" + key);
+        LogWriterUtils.getLogWriter().info("client2 : afterCreate : key =" + key);
         if (key.equals(LAST_KEY)) {
 
           synchronized (Bug36853EventsExpiryDUnitTest.class) {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "Notifying client2 to proceed for validation");
             proceedForValidation = true;
             Bug36853EventsExpiryDUnitTest.class.notify();
@@ -239,8 +242,8 @@ public class Bug36853EventsExpiryDUnitTest extends CacheTestCase
    */
   public void testEventsExpiryBug() throws Exception
   {
-    addExpectedException("Unexpected IOException");
-    addExpectedException("Connection reset");
+    IgnoredException.addIgnoredException("Unexpected IOException");
+    IgnoredException.addIgnoredException("Connection reset");
     server.invoke(Bug36853EventsExpiryDUnitTest.class, "generateEvents");
     client.invoke(Bug36853EventsExpiryDUnitTest.class,
         "validateEventCountAtClient");
@@ -256,7 +259,7 @@ public class Bug36853EventsExpiryDUnitTest extends CacheTestCase
       synchronized (Bug36853EventsExpiryDUnitTest.class) {
         if (!proceedForValidation)
           try {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "Client2 going in wait before starting validation");
             Bug36853EventsExpiryDUnitTest.class.wait(5000);
           }
@@ -265,13 +268,13 @@ public class Bug36853EventsExpiryDUnitTest extends CacheTestCase
           }
       }
     }
-    getLogWriter().info("Starting validation on client2");
+    LogWriterUtils.getLogWriter().info("Starting validation on client2");
     Assert.assertEquals(
         "Puts recieved by client not equal to the puts done at server.",
         TOTAL_PUTS, putsRecievedByClient);
-    getLogWriter()
+    LogWriterUtils.getLogWriter()
         .info("putsRecievedByClient = " + putsRecievedByClient);
-    getLogWriter().info("Validation complete on client2");
+    LogWriterUtils.getLogWriter().info("Validation complete on client2");
 
   }
 
@@ -291,7 +294,8 @@ public class Bug36853EventsExpiryDUnitTest extends CacheTestCase
    * @throws Exception -
    *           thrown if any problem occurs in closing client and server caches.
    */
-  public void tearDown2() throws Exception
+  @Override
+  protected final void preTearDownCacheTestCase() throws Exception
   {
     // close client
     client.invoke(Bug36853EventsExpiryDUnitTest.class, "closeCache");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48571DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48571DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48571DUnitTest.java
index 6278d9e..66f1a11 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48571DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48571DUnitTest.java
@@ -39,8 +39,12 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientNotifier;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class Bug48571DUnitTest extends DistributedTestCase {
 
@@ -65,7 +69,8 @@ public class Bug48571DUnitTest extends DistributedTestCase {
     client = host.getVM(1);
   }
   
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     reset();
     server.invoke(Bug48571DUnitTest.class, "reset");
     client.invoke(Bug48571DUnitTest.class, "reset");
@@ -83,7 +88,7 @@ public class Bug48571DUnitTest extends DistributedTestCase {
   }
 
   public void testStatsMatchWithSize() throws Exception {
-    addExpectedException("Unexpected IOException||Connection reset");
+    IgnoredException.addIgnoredException("Unexpected IOException||Connection reset");
     // start a server
     int port = (Integer) server.invoke(Bug48571DUnitTest.class, "createServerCache");
     // create durable client, with durable RI
@@ -105,7 +110,7 @@ public class Bug48571DUnitTest extends DistributedTestCase {
 
   public static int createServerCache() throws Exception {
     Properties props = new Properties();
-    props.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+    props.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
     props.setProperty("log-file", "server_" + OSProcess.getId() + ".log");
     props.setProperty("log-level", "info");
     props.setProperty("statistic-archive-file", "server_" + OSProcess.getId()
@@ -239,7 +244,7 @@ public class Bug48571DUnitTest extends DistributedTestCase {
         return "Did not receive last key.";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60*1000, 500, true);
+    Wait.waitForCriterion(wc, 60*1000, 500, true);
   }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48879DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48879DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48879DUnitTest.java
index 0457ce1..c7ff13a 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48879DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/Bug48879DUnitTest.java
@@ -67,7 +67,8 @@ public class Bug48879DUnitTest extends DistributedTestCase {
     createClientCache(host, new Integer[] {port0, port1}, Boolean.TRUE);
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
 
     vm0.invoke(Bug48879DUnitTest.class, "closeCache");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/EventIdOptimizationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/EventIdOptimizationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/EventIdOptimizationDUnitTest.java
index fe1edb0..086b956 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/EventIdOptimizationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/EventIdOptimizationDUnitTest.java
@@ -43,6 +43,8 @@ import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 import com.gemstone.gemfire.cache.client.internal.ServerRegionProxy;
 import com.gemstone.gemfire.cache.client.internal.Connection;
@@ -173,9 +175,9 @@ public class EventIdOptimizationDUnitTest extends DistributedTestCase
         "createServerCache")).intValue();
 
     client1.invoke(EventIdOptimizationDUnitTest.class, "createClientCache1",
-        new Object[] { getServerHostName(host), new Integer(PORT1) });
+        new Object[] { NetworkUtils.getServerHostName(host), new Integer(PORT1) });
     client2.invoke(EventIdOptimizationDUnitTest.class, "createClientCache2",
-        new Object[] { getServerHostName(host), new Integer(PORT2) });
+        new Object[] { NetworkUtils.getServerHostName(host), new Integer(PORT2) });
 
   }
 
@@ -452,7 +454,7 @@ public class EventIdOptimizationDUnitTest extends DistributedTestCase
       synchronized (EventIdOptimizationDUnitTest.class) {
         if (!proceedForValidation)
           try {
-            getLogWriter().info(
+            LogWriterUtils.getLogWriter().info(
                 "Client2 going in wait before starting validation");
             EventIdOptimizationDUnitTest.class.wait();
           }
@@ -461,12 +463,12 @@ public class EventIdOptimizationDUnitTest extends DistributedTestCase
           }
       }
     }
-    getLogWriter().info("Starting validation on client2");
+    LogWriterUtils.getLogWriter().info("Starting validation on client2");
     if (validationFailed) {
       fail("\n The following eventIds recieved by client2 were not present in the eventId array sent by client1 \n"
           + failureMsg);
     }
-    getLogWriter().info("Validation complete on client2, goin to unregister listeners");
+    LogWriterUtils.getLogWriter().info("Validation complete on client2, goin to unregister listeners");
     
     Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
     if (region != null && !region.isDestroyed()) {
@@ -490,7 +492,7 @@ public class EventIdOptimizationDUnitTest extends DistributedTestCase
       }
     }
     
-    getLogWriter().info("Test completed, Unregistered the listeners");
+    LogWriterUtils.getLogWriter().info("Test completed, Unregistered the listeners");
   }
 
   /**
@@ -508,15 +510,14 @@ public class EventIdOptimizationDUnitTest extends DistributedTestCase
   /**
    * Closes the caches on clients and servers
    */
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     // close client
     client1.invoke(EventIdOptimizationDUnitTest.class, "closeCache");
     client2.invoke(EventIdOptimizationDUnitTest.class, "closeCache");
     // close server
     server1.invoke(EventIdOptimizationDUnitTest.class, "closeCache");
     server2.invoke(EventIdOptimizationDUnitTest.class, "closeCache");
-
   }
 
   /**
@@ -570,7 +571,7 @@ public class EventIdOptimizationDUnitTest extends DistributedTestCase
         && (eventIdAtClient2.getSequenceID() == eventIdForLastKey
             .getSequenceID())) {
       synchronized (EventIdOptimizationDUnitTest.class) {
-        getLogWriter().info("Notifying client2 to proceed for validation");
+        LogWriterUtils.getLogWriter().info("Notifying client2 to proceed for validation");
         proceedForValidation = true;
         EventIdOptimizationDUnitTest.class.notify();
       }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/FailoverDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/FailoverDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/FailoverDUnitTest.java
index fa07821..1119313 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/FailoverDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/FailoverDUnitTest.java
@@ -39,9 +39,13 @@ import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ConflationDUnitTest;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.cache.client.PoolManager;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 
@@ -86,7 +90,7 @@ public class FailoverDUnitTest extends DistributedTestCase
     PORT2 =  ((Integer)vm1.invoke(FailoverDUnitTest.class, "createServerCache" )).intValue();
 
     CacheServerTestUtil.disableShufflingOfEndpoints();
-    createClientCache(getServerHostName(host), new Integer(PORT1),new Integer(PORT2));
+    createClientCache(NetworkUtils.getServerHostName(host), new Integer(PORT1),new Integer(PORT2));
     { // calculate the primary vm
       waitForPrimaryAndBackups(1);
       PoolImpl pool = (PoolImpl)PoolManager.find("FailoverPool");
@@ -188,7 +192,7 @@ public class FailoverDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 20 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 20 * 1000, 200, true);
     assertNotNull(pool.getPrimary());
     assertTrue("backups="+pool.getRedundants() + " expected=" + numBackups,
                pool.getRedundants().size() >= numBackups);
@@ -206,7 +210,7 @@ public class FailoverDUnitTest extends DistributedTestCase
       r.registerInterest("key-5");
     }
     catch (Exception ex) {
-      fail("failed while registering keys k1 to k5", ex);
+      Assert.fail("failed while registering keys k1 to k5", ex);
     }
   }
 
@@ -224,7 +228,7 @@ public class FailoverDUnitTest extends DistributedTestCase
       r.create("key-5", "key-5");
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -255,7 +259,7 @@ public class FailoverDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
 
@@ -271,7 +275,7 @@ public class FailoverDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 20 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 20 * 1000, 200, true);
 
     assertEquals("value-1", r.getEntry("key-1").getValue());
     assertEquals("value-2", r.getEntry("key-2").getValue());
@@ -298,7 +302,7 @@ public class FailoverDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while r.putDuringFailover()", ex);
+      Assert.fail("failed while r.putDuringFailover()", ex);
     }
   }
 
@@ -314,15 +318,14 @@ public class FailoverDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 20 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 20 * 1000, 200, true);
     assertEquals("value-5", r.getEntry("key-5").getValue());
     assertEquals("value-4", r.getEntry("key-4").getValue());
   }
 
 
-  public void tearDown2() throws Exception
-  {
-	super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     closeCache();
     // then close the servers

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HABugInPutDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HABugInPutDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HABugInPutDUnitTest.java
index c356816..02cd880 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HABugInPutDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/ha/HABugInPutDUnitTest.java
@@ -36,6 +36,7 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -96,22 +97,20 @@ public class HABugInPutDUnitTest extends DistributedTestCase
         .intValue();
 
     client1.invoke(HABugInPutDUnitTest.class, "createClientCache", new Object[] {
-        getServerHostName(host), new Integer(PORT1), new Integer(PORT2) });
+        NetworkUtils.getServerHostName(host), new Integer(PORT1), new Integer(PORT2) });
     client2.invoke(HABugInPutDUnitTest.class, "createClientCache", new Object[] {
-        getServerHostName(host), new Integer(PORT1), new Integer(PORT2) });
+        NetworkUtils.getServerHostName(host), new Integer(PORT1), new Integer(PORT2) });
     //Boolean.getBoolean("")
 
   }
 
-  public void tearDown2() throws Exception
-  {
-	super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     client1.invoke(HABugInPutDUnitTest.class, "closeCache");
     client2.invoke(HABugInPutDUnitTest.class, "closeCache");
     // close server
     server1.invoke(HABugInPutDUnitTest.class, "closeCache");
     server2.invoke(HABugInPutDUnitTest.class, "closeCache");
-
   }
 
   public static void closeCache()


[22/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestTestCase.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestTestCase.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestTestCase.java
index a80b21c..d5b6252 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestTestCase.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestTestCase.java
@@ -41,7 +41,11 @@ import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.tier.InterestType;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 
 import java.io.IOException;
@@ -108,11 +112,11 @@ public class HAInterestTestCase extends DistributedTestCase {
     PORT2 = ((Integer) server2.invoke(HAInterestTestCase.class, "createServerCache")).intValue();
     PORT3 = ((Integer) server3.invoke(HAInterestTestCase.class, "createServerCache")).intValue();
     exceptionOccured = false;
-    addExpectedException("java.net.ConnectException: Connection refused: connect");
+    IgnoredException.addIgnoredException("java.net.ConnectException: Connection refused: connect");
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDown() throws Exception {
     // close the clients first
     closeCache();
 
@@ -175,7 +179,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for primary";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     int primaryPort = pool.getPrimaryPort();
     assertTrue(primaryPort != -1);
@@ -256,7 +260,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for client_k1 refresh from server";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     wc = new WaitCriterion() {
       @Override
@@ -272,7 +276,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for client_k2 refresh from server";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
   }
 
   public static void verifyDeadAndLiveServers(final int expectedDeadServers, final int expectedLiveServers) {
@@ -286,7 +290,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for pool.getConnectedServerCount() == expectedLiveServer";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
   }
 
   public static void putK1andK2() {
@@ -309,7 +313,7 @@ public class HAInterestTestCase extends DistributedTestCase {
           };
           t.start();
           try {
-            DistributedTestCase.join(t, 30 * 1000, getLogWriter());
+            ThreadUtils.join(t, 30 * 1000);
           } catch (Exception ignore) {
             exceptionOccured = true;
           }
@@ -422,7 +426,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for cache.getCacheServers().size() == 1";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     CacheServerImpl bs = (CacheServerImpl) cache.getCacheServers().iterator().next();
     assertNotNull(bs);
@@ -440,7 +444,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for ccn.getClientProxies().size() > 0";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     wc = new WaitCriterion() {
       Iterator iter_prox;
@@ -462,7 +466,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for CacheClientProxy _messageDispatcher to be alive";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
   }
 
   public static void verifyDispatcherIsNotAlive() {
@@ -476,7 +480,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "cache.getCacheServers().size() == 1";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     CacheServerImpl bs = (CacheServerImpl) cache.getCacheServers().iterator().next();
     assertNotNull(bs);
@@ -494,7 +498,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for ccn.getClientProxies().size() > 0";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     Iterator iter_prox = ccn.getClientProxies().iterator();
     if (iter_prox.hasNext()) {
@@ -590,7 +594,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "connected server count never became 3";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     // close primaryEP
     getPrimaryVM().invoke(HAInterestTestCase.class, "stopServer");
@@ -620,7 +624,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "connected server count never became 3";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     // close primaryEP
     getPrimaryVM().invoke(HAInterestTestCase.class, "stopServer");
@@ -644,7 +648,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "connected server count never became 3";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     // close primaryEP
     VM backup = getBackupVM();
@@ -681,7 +685,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "Never got three connected servers";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     // close secondary EP
     VM result = getBackupVM();
@@ -717,7 +721,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "connected server count never became 3";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     // close secondary EP
     VM result = getBackupVM();
@@ -767,7 +771,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for cache.getCacheServers().size() == 1";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     CacheServerImpl bs = (CacheServerImpl) cache.getCacheServers().iterator().next();
     assertNotNull(bs);
@@ -785,7 +789,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for ccn.getClientProxies().size() > 0";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     Iterator iter_prox = ccn.getClientProxies().iterator();
 
@@ -805,7 +809,7 @@ public class HAInterestTestCase extends DistributedTestCase {
           return "waiting for keys of interest to include 2 keys";
         }
       };
-      DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+      Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
       Set keysMap = (Set) ccp.cils[RegisterInterestTracker.interestListIndex].getProfile(Region.SEPARATOR + REGION_NAME)
           .getKeysOfInterestFor(ccp.getProxyID());
@@ -827,7 +831,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for cache.getCacheServers().size() == 1";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     CacheServerImpl bs = (CacheServerImpl) cache.getCacheServers().iterator().next();
     assertNotNull(bs);
@@ -845,7 +849,7 @@ public class HAInterestTestCase extends DistributedTestCase {
         return "waiting for ccn.getClientProxies().size() > 0";
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
     Iterator iter_prox = ccn.getClientProxies().iterator();
     if (iter_prox.hasNext()) {
@@ -864,7 +868,7 @@ public class HAInterestTestCase extends DistributedTestCase {
           return "waiting for keys of interest to not be null";
         }
       };
-      DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+      Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
 
       Set keysMap = (Set) ccp.cils[RegisterInterestTracker.interestListIndex]
           .getProfile(Region.SEPARATOR + REGION_NAME)

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAStartupAndFailoverDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAStartupAndFailoverDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAStartupAndFailoverDUnitTest.java
index 54ef4c0..01c595c 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAStartupAndFailoverDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAStartupAndFailoverDUnitTest.java
@@ -38,9 +38,14 @@ import com.gemstone.gemfire.distributed.internal.ServerLocation;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 
 /**
@@ -81,8 +86,8 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
     server2 = host.getVM(1);
     server3 = host.getVM(2);
     
-    addExpectedException("java.io.IOException");
-    addExpectedException("SocketException");
+    IgnoredException.addIgnoredException("java.io.IOException");
+    IgnoredException.addIgnoredException("SocketException");
 
     // start servers first
     PORT1 =  ((Integer) server1.invoke(HAStartupAndFailoverDUnitTest.class, "createServerCache"));
@@ -98,7 +103,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
     public void testPrimaryFailover() throws Exception
     {
 
-      createClientCache(this.getName(), getServerHostName(server1.getHost()));
+      createClientCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
       // primary
       server1.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsAlive");
 
@@ -146,7 +151,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
     public void testExceptionWhileMakingPrimary()throws Exception
     {
 
-      createClientCacheWithIncorrectPrimary(this.getName(), getServerHostName(server1.getHost()));
+      createClientCacheWithIncorrectPrimary(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
       // failed primary due to incorect host name of the server
 
       // new primary
@@ -175,7 +180,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
     public void testTwoPrimaryFailedOneAfterTheAnother() throws Exception
     {
 
-      createClientCacheWithLargeRetryInterval(this.getName(), getServerHostName(server1.getHost()));
+      createClientCacheWithLargeRetryInterval(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
       // primary
       server1.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsAlive");
 
@@ -202,7 +207,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
      */
     public void testPrimaryShouldBeNullAndEPListShouldBeEmptyWhenAllServersAreDead() throws Exception
     {
-      createClientCache(this.getName(), getServerHostName(server1.getHost()));
+      createClientCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
       verifyPrimaryShouldNotBeNullAndEPListShouldNotBeEmpty();
       server1.invoke(HAStartupAndFailoverDUnitTest.class, "stopServer");
       server2.invoke(HAStartupAndFailoverDUnitTest.class, "stopServer");
@@ -216,7 +221,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
      */
     public void testCacheClientUpdatersInitiatesFailoverOnPrimaryFailure() throws Exception
     {
-      createClientCacheWithLargeRetryInterval(this.getName(), getServerHostName(server1.getHost()));
+      createClientCacheWithLargeRetryInterval(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
       server1.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsAlive");
       server2.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsNotAlive");
       server3.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsNotAlive");
@@ -234,7 +239,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
      */
     public void testCacheClientUpdaterInitiatesFailoverOnSecondaryFailure() throws Exception
     {
-      createClientCacheWithLargeRetryInterval(this.getName(), getServerHostName(server1.getHost()));
+      createClientCacheWithLargeRetryInterval(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
       server1.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsAlive");
       server2.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsNotAlive");
       server3.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsNotAlive");
@@ -253,7 +258,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
     public void testCacheClientUpdaterInitiatesFailoverOnBothPrimaryAndSecondaryFailure() throws Exception
     {
 
-      createClientCacheWithLargeRetryInterval(this.getName(), getServerHostName(server1.getHost()));
+      createClientCacheWithLargeRetryInterval(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
       server1.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsAlive");
       server2.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsNotAlive");
       server3.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsNotAlive");
@@ -271,7 +276,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
     public void testCacheClientUpdaterInitiatesFailoverOnBothPrimaryAndSecondaryFailureWithServerMonitors() throws Exception
     {
 
-      createClientCache(this.getName(), getServerHostName(server1.getHost()));
+      createClientCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
       server1.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsAlive");
       server2.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsNotAlive");
       server3.invoke(HAStartupAndFailoverDUnitTest.class, "verifyDispatcherIsNotAlive");
@@ -290,7 +295,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
       // create a client with large retry interval for server monitors and no client updater thread
       // so that only cache operation can detect a server failure and should initiate failover
       createClientCacheWithLargeRetryIntervalAndWithoutCallbackConnection(this.getName()
-          , getServerHostName(server1.getHost()));
+          , NetworkUtils.getServerHostName(server1.getHost()));
       server2.invoke(HAStartupAndFailoverDUnitTest.class, "stopServer");
       put();
       verifyDeadAndLiveServers(1,2);
@@ -305,7 +310,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
         r1.put("key-3", "server-value-3");
       }
       catch (Exception ex) {
-        fail("failed while r.put()", ex);
+        Assert.fail("failed while r.put()", ex);
       }
     }
 
@@ -321,7 +326,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
 //     while (proxy.getDeadServers().size() != expectedDeadServers) { // wait until condition is met
 //       assertTrue("Waited over " + maxWaitTime + "for dead servers to become : " + expectedDeadServers ,
 //           //" This issue can occur on Solaris as DSM thread get stuck in connectForServer() call, and hence not recovering any newly started server. This may be beacuase of tcp_ip_abort_cinterval kernal level property on solaris which has 3 minutes as a default value ",
@@ -393,7 +398,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
       assertNotNull(" Primary endpoint should not be null", pool.getPrimaryName());
       assertTrue("Endpoint List should not be Empty as all server are live",pool.getConnectedServerCount() > 0);
       }catch(Exception e){
-        fail("failed while verifyPrimaryShouldNotBeNullAndEPListShouldNotBeEmpty()", e);
+        Assert.fail("failed while verifyPrimaryShouldNotBeNullAndEPListShouldNotBeEmpty()", e);
       }
     }
 
@@ -404,7 +409,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
       } catch (NoSubscriptionServersAvailableException e) {
         // pass
       } catch(Exception e){
-        fail("failed while verifyPrimaryShouldBeNullAndEPListShouldBeEmpty()", e);
+        Assert.fail("failed while verifyPrimaryShouldBeNullAndEPListShouldBeEmpty()", e);
       }
     }
 
@@ -462,7 +467,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       CacheServerImpl bs = (CacheServerImpl)cache.getCacheServers()
           .iterator().next();
@@ -479,7 +484,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       Collection<CacheClientProxy> proxies = ccn.getClientProxies();
       Iterator<CacheClientProxy> iter_prox = proxies.iterator();
@@ -495,7 +500,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
             return excuse;
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+        Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
         
         // assertTrue("Dispatcher on primary should be alive",
         // proxy._messageDispatcher.isAlive());
@@ -522,7 +527,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       CacheServerImpl bs = (CacheServerImpl)c.getCacheServers().iterator()
           .next();
@@ -539,7 +544,7 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
       
       Iterator iter_prox = ccn.getClientProxies().iterator();
       if (iter_prox.hasNext()) {
@@ -712,9 +717,8 @@ public class HAStartupAndFailoverDUnitTest extends DistributedTestCase
   }
 
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     closeCache();
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java
index 17ef1cc..c6f71e2 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InstantiatorPropagationDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache.tier.sockets;
 
+import static com.gemstone.gemfire.test.dunit.DistributedTestUtils.*;
+
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
@@ -45,9 +47,14 @@ import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
 import com.gemstone.gemfire.internal.cache.EventID;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
   private static Cache cache = null;
@@ -144,8 +151,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void preTearDown() throws Exception {
     // close the clients first
     closeCache();
     client1.invoke(InstantiatorPropagationDUnitTest.class, "closeCache");
@@ -163,7 +169,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
   }
   
   public static void unregisterInstantiatorsInAllVMs() {
-    invokeInEveryVM(DistributedTestCase.class, "unregisterInstantiatorsInThisVM");
+    Invoke.invokeInEveryVM(()->unregisterInstantiatorsInThisVM());
   }
 
   public static void verifyInstantiators(final int numOfInstantiators) {
@@ -180,7 +186,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
           + " instantiators=" + java.util.Arrays.toString(InternalInstantiator.getInstantiators());
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
   }
 
   public static void registerTestObject1() throws Exception {
@@ -192,7 +198,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject1", e);
+      Assert.fail("Test failed due to exception in TestObject1", e);
     }
   }
 
@@ -204,7 +210,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject2", e);
+      Assert.fail("Test failed due to exception in TestObject2", e);
     }
   }
 
@@ -216,7 +222,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject3", e);
+      Assert.fail("Test failed due to exception in TestObject3", e);
     }
   }
 
@@ -228,7 +234,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject4", e);
+      Assert.fail("Test failed due to exception in TestObject4", e);
     }
   }
 
@@ -240,7 +246,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject5", e);
+      Assert.fail("Test failed due to exception in TestObject5", e);
     }
   }
 
@@ -252,7 +258,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject6", e);
+      Assert.fail("Test failed due to exception in TestObject6", e);
     }
   }
 
@@ -264,7 +270,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject7", e);
+      Assert.fail("Test failed due to exception in TestObject7", e);
     }
   }
 
@@ -276,7 +282,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject8", e);
+      Assert.fail("Test failed due to exception in TestObject8", e);
     }
   }
 
@@ -288,7 +294,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject9", e);
+      Assert.fail("Test failed due to exception in TestObject9", e);
     }
   }
 
@@ -300,7 +306,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject10", e);
+      Assert.fail("Test failed due to exception in TestObject10", e);
     }
   }
 
@@ -312,7 +318,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject11", e);
+      Assert.fail("Test failed due to exception in TestObject11", e);
     }
   }
 
@@ -324,7 +330,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject11", e);
+      Assert.fail("Test failed due to exception in TestObject11", e);
     }
   }
 
@@ -336,7 +342,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject13", e);
+      Assert.fail("Test failed due to exception in TestObject13", e);
     }
   }
 
@@ -348,7 +354,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject14", e);
+      Assert.fail("Test failed due to exception in TestObject14", e);
     }
   }
 
@@ -360,7 +366,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject15", e);
+      Assert.fail("Test failed due to exception in TestObject15", e);
     }
   }
 
@@ -372,7 +378,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject16", e);
+      Assert.fail("Test failed due to exception in TestObject16", e);
     }
   }
 
@@ -384,7 +390,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject17", e);
+      Assert.fail("Test failed due to exception in TestObject17", e);
     }
   }
 
@@ -396,7 +402,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject18", e);
+      Assert.fail("Test failed due to exception in TestObject18", e);
     }
   }
   
@@ -408,7 +414,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject19", e);
+      Assert.fail("Test failed due to exception in TestObject19", e);
     }
   }
 
@@ -420,7 +426,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       obj.init(0);
     }
     catch (Exception e) {
-      fail("Test failed due to exception in TestObject20", e);
+      Assert.fail("Test failed due to exception in TestObject20", e);
     }
   }
 
@@ -463,7 +469,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
 
     unregisterInstantiatorsInAllVMs();
     
-    pause(3000);
+    Wait.pause(3000);
 
     server1.invoke(InstantiatorPropagationDUnitTest.class,
         "registerTestObject1");
@@ -475,11 +481,11 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
 
     client1
         .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server1.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server1.getHost()),
                 new Integer(PORT1) });
 
     // // wait for client2 to come online
-    pause(3000);
+    Wait.pause(3000);
     //
     client1.invoke(InstantiatorPropagationDUnitTest.class,
         "verifyInstantiators", new Object[] { new Integer(2) });
@@ -512,7 +518,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       }
     });
     //
-    pause(3000);
+    Wait.pause(3000);
     // Run getAll
     client1.invoke(new CacheSerializableRunnable("Get entry from client") {
       public void run2() throws CacheException {
@@ -536,26 +542,26 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
     PORT1 = initServerCache(server1);
     PORT2 = initServerCache(server2);
 
-    pause(3000);
+    Wait.pause(3000);
 
     client1
         .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server1.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server1.getHost()),
                 new Integer(PORT1) });
     client2
         .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server1.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server1.getHost()),
                 new Integer(PORT2) });
 
     unregisterInstantiatorsInAllVMs();
 
     // wait for client2 to come online
-    pause(2000);
+    Wait.pause(2000);
 
 
     client1.invoke(InstantiatorPropagationDUnitTest.class,
         "registerTestObject3");
-    pause(4000);
+    Wait.pause(4000);
 
     client1.invoke(InstantiatorPropagationDUnitTest.class,
         "verifyInstantiators", new Object[] { new Integer(1) });
@@ -584,21 +590,21 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
 
     client1
         .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server1.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server1.getHost()),
                 new Integer(PORT1) });
     client2
         .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server1.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server1.getHost()),
                 new Integer(PORT2) });
 
     unregisterInstantiatorsInAllVMs();
 
     // wait for client2 to come online
-    pause(2000);
+    Wait.pause(2000);
 
     client1.invoke(InstantiatorPropagationDUnitTest.class,
         "registerTestObject4");
-    pause(4000);
+    Wait.pause(4000);
 
     server1.invoke(InstantiatorPropagationDUnitTest.class, "stopServer");
 
@@ -639,25 +645,25 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
 
     client1
         .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server1.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server1.getHost()),
                 new Integer(PORT1) });
     client2
         .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server1.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server1.getHost()),
                 new Integer(PORT2) });
 
     unregisterInstantiatorsInAllVMs();
 
     // wait for client2 to come online
-    pause(2000);
+    Wait.pause(2000);
 
     client1.invoke(InstantiatorPropagationDUnitTest.class,
         "registerTestObject10");
-    pause(4000);
+    Wait.pause(4000);
 
     server1.invoke(InstantiatorPropagationDUnitTest.class,
         "registerTestObject11");
-    pause(4000);
+    Wait.pause(4000);
 
     server2.invoke(InstantiatorPropagationDUnitTest.class,
         "verifyInstantiators", new Object[] { new Integer(2) });
@@ -683,11 +689,11 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
     PORT2 = initServerCache(server2);
     client1
         .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server1.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server1.getHost()),
                 new Integer(PORT1) });
     client2
         .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server1.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server1.getHost()),
                 new Integer(PORT2) });
 
     unregisterInstantiatorsInAllVMs();
@@ -751,21 +757,21 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
 
     client1
         .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server1.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server1.getHost()),
                 new Integer(PORT1) });
     client2
         .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-            new Object[] { getServerHostName(server1.getHost()),
+            new Object[] { NetworkUtils.getServerHostName(server1.getHost()),
                 new Integer(PORT1) });
-    createClientCache(getServerHostName(server2.getHost()), new Integer(PORT2));
+    createClientCache(NetworkUtils.getServerHostName(server2.getHost()), new Integer(PORT2));
     unregisterInstantiatorsInAllVMs();
 
     // wait for client2 to come online
-    pause(2000);
+    Wait.pause(2000);
 
     client1.invoke(InstantiatorPropagationDUnitTest.class,
         "registerTestObject12");
-    pause(4000);
+    Wait.pause(4000);
 
     client1.invoke(InstantiatorPropagationDUnitTest.class,
         "verifyInstantiators", new Object[] { new Integer(1) });
@@ -810,20 +816,20 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
     PORT1 = initServerCache(server1, 1);
     PORT2 = initServerCache(server2, 2);
 
-    createClientCache_EventId(getServerHostName(server1.getHost()), new Integer(PORT1));
+    createClientCache_EventId(NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1));
 
     unregisterInstantiatorsInAllVMs();
     
     client2.invoke(InstantiatorPropagationDUnitTest.class,
         "createClientCache_EventId", new Object[] {
-            getServerHostName(server1.getHost()), new Integer(PORT2) });
+            NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT2) });
     setClientServerObserver1();
     client2.invoke(InstantiatorPropagationDUnitTest.class,
         "setClientServerObserver2");
 
     registerTestObject19();
 
-    pause(10000);
+    Wait.pause(10000);
 
     Boolean pass = (Boolean)client2.invoke(
         InstantiatorPropagationDUnitTest.class, "verifyResult");
@@ -841,17 +847,17 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
   
       unregisterInstantiatorsInAllVMs();
 
-      pause(3000);
+      Wait.pause(3000);
   
-      createClientCache(getServerHostName(server1.getHost()),
+      createClientCache(NetworkUtils.getServerHostName(server1.getHost()),
           new Integer(PORT1));
   
       client2
           .invoke(InstantiatorPropagationDUnitTest.class, "createClientCache",
-              new Object[] {getServerHostName(server2.getHost()),
+              new Object[] {NetworkUtils.getServerHostName(server2.getHost()),
                   new Integer(PORT2)});
   
-      pause(3000);
+      Wait.pause(3000);
       unregisterInstantiatorsInAllVMs();
   
       assertTestObject20NotLoaded();
@@ -860,7 +866,7 @@ public class InstantiatorPropagationDUnitTest extends DistributedTestCase {
       client2.invoke(InstantiatorPropagationDUnitTest.class, "assertTestObject20NotLoaded");
   
       registerTestObject20();
-      pause(5000);
+      Wait.pause(5000);
       assertTestObject20Loaded();
       server1.invoke(InstantiatorPropagationDUnitTest.class, "assertTestObject20Loaded");
       //server2.invoke(InstantiatorPropagationDUnitTest.class, "assertTestObject20Loaded"); // classes are not initialized after loading in p2p path

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListDUnitTest.java
index 8bc4e42..06f599f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListDUnitTest.java
@@ -47,10 +47,16 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.cache.client.*;
 import com.gemstone.gemfire.cache.NoSubscriptionServersAvailableException;
 
@@ -131,7 +137,7 @@ public class InterestListDUnitTest extends DistributedTestCase
   public void setUp() throws Exception {
     super.setUp();
     disconnectAllFromDS();
-    pause(10000);
+    Wait.pause(10000);
     final Host host = Host.getHost(0);
     vm0 = host.getVM(0);
     vm1 = host.getVM(1);
@@ -161,9 +167,9 @@ public class InterestListDUnitTest extends DistributedTestCase
     {
 
       vm1.invoke(InterestListDUnitTest.class, "createClientCache",new Object[] {
-        getServerHostName(vm0.getHost()), new Integer(PORT1)});
+        NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1)});
       vm2.invoke(InterestListDUnitTest.class, "createClientCache",new Object[] {
-        getServerHostName(vm0.getHost()), new Integer(PORT1)});
+        NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1)});
 
       vm1.invoke(InterestListDUnitTest.class, "createEntriesK1andK2");
       vm2.invoke(InterestListDUnitTest.class, "createEntriesK1andK2");
@@ -174,11 +180,11 @@ public class InterestListDUnitTest extends DistributedTestCase
           new Object[] { key2 });
 
       vm1.invoke(InterestListDUnitTest.class, "put", new Object[] { "vm1" });
-      pause(10000);
+      Wait.pause(10000);
       vm2.invoke(InterestListDUnitTest.class, "validateEntriesK1andK2",
           new Object[] { "vm2" });
       vm2.invoke(InterestListDUnitTest.class, "put", new Object[] { "vm2" });
-      pause(10000);
+      Wait.pause(10000);
       vm1.invoke(InterestListDUnitTest.class, "validateEntriesK1andK2",
           new Object[] { "vm1" });
 
@@ -188,11 +194,11 @@ public class InterestListDUnitTest extends DistributedTestCase
           new Object[] { key2 });
 
       vm1.invoke(InterestListDUnitTest.class, "putAgain", new Object[] { "vm1" });
-      pause(10000);
+      Wait.pause(10000);
       vm2.invoke(InterestListDUnitTest.class, "validateEntriesAgain",
           new Object[] { "vm2" });
       vm2.invoke(InterestListDUnitTest.class, "putAgain", new Object[] { "vm2" });
-      pause(10000);
+      Wait.pause(10000);
       vm1.invoke(InterestListDUnitTest.class, "validateEntriesAgain",
           new Object[] { "vm1" });
     }
@@ -226,9 +232,9 @@ public class InterestListDUnitTest extends DistributedTestCase
 
     // Initialization
     vm1.invoke(InterestListDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(Host.getHost(0)), new Integer(PORT1)});
+        new Object[] { NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1)});
     vm2.invoke(InterestListDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(Host.getHost(0)), new Integer(PORT1)});
+        new Object[] { NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1)});
 
     vm1.invoke(InterestListDUnitTest.class, "createEntriesK1andK2");
     vm2.invoke(InterestListDUnitTest.class, "createEntriesK1andK2");
@@ -236,7 +242,7 @@ public class InterestListDUnitTest extends DistributedTestCase
     // STEP 1
     vm2.invoke(InterestListDUnitTest.class, "putSingleEntry",
         new Object[] { key2, "vm2" });
-    pause(2000);
+    Wait.pause(2000);
     vm1.invoke(InterestListDUnitTest.class, "validateSingleEntry",
         new Object[] {key2, key2_originalValue});
 
@@ -254,7 +260,7 @@ public class InterestListDUnitTest extends DistributedTestCase
     // STEP 3
     vm1.invoke(InterestListDUnitTest.class, "putSingleEntry",
         new Object[] { key1, "vm1" });
-    pause(2000);
+    Wait.pause(2000);
     vm2.invoke(InterestListDUnitTest.class, "validateSingleEntry",
         new Object[] {key1, key1_originalValue}); // still unchanged
     vm2.invoke(InterestListDUnitTest.class, "registerKey",
@@ -269,7 +275,7 @@ public class InterestListDUnitTest extends DistributedTestCase
         new Object[] { key1 });
     vm1.invoke(InterestListDUnitTest.class, "putSingleEntry",
         new Object[] { key1, key1_originalValue });
-    pause(2000);
+    Wait.pause(2000);
     vm2.invoke(InterestListDUnitTest.class, "validateSingleEntry",
         new Object[] {key1, "vm1"}); // update lost
   }
@@ -284,9 +290,9 @@ public class InterestListDUnitTest extends DistributedTestCase
   {
 
     vm1.invoke(InterestListDUnitTest.class, "createClientCache",new Object[] {
-      getServerHostName(vm0.getHost()), new Integer(PORT1)});
+      NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1)});
     vm2.invoke(InterestListDUnitTest.class, "createClientCache",new Object[] {
-      getServerHostName(vm0.getHost()), new Integer(PORT1)});
+      NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1)});
 
     vm1.invoke(InterestListDUnitTest.class, "createEntriesK1andK2");
     vm2.invoke(InterestListDUnitTest.class, "createEntriesK1andK2");
@@ -294,7 +300,7 @@ public class InterestListDUnitTest extends DistributedTestCase
     vm2.invoke(InterestListDUnitTest.class, "registerALL_KEYS");
 
     vm1.invoke(InterestListDUnitTest.class, "put_ALL_KEYS");
-    pause(10000);
+    Wait.pause(10000);
     vm2.invoke(InterestListDUnitTest.class, "validate_ALL_KEYS");
 
   }
@@ -311,17 +317,17 @@ public class InterestListDUnitTest extends DistributedTestCase
   {
     // directly put on server
     vm0.invoke(InterestListDUnitTest.class, "multiple_put");
-    pause(1000);
+    Wait.pause(1000);
     // create clients to connect to that server
     vm1.invoke(InterestListDUnitTest.class, "createClientCache",new Object[] {
-      getServerHostName(vm0.getHost()), new Integer(PORT1)});
+      NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1)});
     vm2.invoke(InterestListDUnitTest.class, "createClientCache",new Object[] {
-      getServerHostName(vm0.getHost()), new Integer(PORT1)});
+      NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1)});
 
     // register interest
     vm1.invoke(InterestListDUnitTest.class, "registerKeys");
     vm2.invoke(InterestListDUnitTest.class, "registerKeysAgain");
-    pause(10000);
+    Wait.pause(10000);
     // verify the values for registered keys
     vm1.invoke(InterestListDUnitTest.class,
         "validateRegionEntriesFromInterestListInVm1");
@@ -352,10 +358,10 @@ public class InterestListDUnitTest extends DistributedTestCase
 
       DistributedMember c1 = (DistributedMember)vm1
         .invoke(InterestListDUnitTest.class, "createClientCache",new Object[] {
-          getServerHostName(vm0.getHost()), PORT1});
+          NetworkUtils.getServerHostName(vm0.getHost()), PORT1});
       DistributedMember c2 = (DistributedMember)vm2
         .invoke(InterestListDUnitTest.class, "createClientCache",new Object[] {
-          getServerHostName(vm0.getHost()), PORT1});
+          NetworkUtils.getServerHostName(vm0.getHost()), PORT1});
 
       vm1.invoke(InterestListDUnitTest.class, "createEntriesK1andK2");
       vm2.invoke(InterestListDUnitTest.class, "createEntriesK1andK2");
@@ -413,7 +419,7 @@ public class InterestListDUnitTest extends DistributedTestCase
         server = addCacheServer();
         port2 = server.getPort();
       } catch (Exception ex) {
-        fail("Cache creation threw an exception", ex);
+        Assert.fail("Cache creation threw an exception", ex);
       }
 
       addRegisterInterestListener();
@@ -422,17 +428,17 @@ public class InterestListDUnitTest extends DistributedTestCase
       // servers are set up, now do the clients
       DistributedMember c1 = (DistributedMember)vm1
       .invoke(InterestListDUnitTest.class, "createClientCache",new Object[] {
-        getServerHostName(vm0.getHost()), PORT1, port2});
+        NetworkUtils.getServerHostName(vm0.getHost()), PORT1, port2});
       DistributedMember c2 = (DistributedMember)vm2
       .invoke(InterestListDUnitTest.class, "createClientCache",new Object[] {
-        getServerHostName(vm0.getHost()), PORT1, port2});
+        NetworkUtils.getServerHostName(vm0.getHost()), PORT1, port2});
 
       vm1.invoke(InterestListDUnitTest.class, "createEntriesK1andK2");
       vm2.invoke(InterestListDUnitTest.class, "createEntriesK1andK2");
 
       // interest registration from clients should cause listeners to be invoked
       // in both servers
-      getLogWriter().info("test phase 1");
+      LogWriterUtils.getLogWriter().info("test phase 1");
       vm1.invoke(InterestListDUnitTest.class, "registerKey",
           new Object[] { key1 });
       vm2.invoke(InterestListDUnitTest.class, "registerKey",
@@ -446,7 +452,7 @@ public class InterestListDUnitTest extends DistributedTestCase
           new Object[]{ two, zero });
 
       // unregistration from clients should invoke listeners on both servers
-      getLogWriter().info("test phase 2");
+      LogWriterUtils.getLogWriter().info("test phase 2");
       vm1.invoke(InterestListDUnitTest.class, "unregisterKey",
           new Object[] { key1 });
       vm2.invoke(InterestListDUnitTest.class, "unregisterKey",
@@ -457,7 +463,7 @@ public class InterestListDUnitTest extends DistributedTestCase
           new Object[]{ zero, two });
 
       // now the primary server for eache client will register and unregister
-      getLogWriter().info("test phase 3");
+      LogWriterUtils.getLogWriter().info("test phase 3");
       registerKeyForClient(c1, key1);
       vm0.invoke(InterestListDUnitTest.class, "registerKeyForClient",
           new Object[] { c1, key1 });
@@ -469,7 +475,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       vm0.invoke(InterestListDUnitTest.class, "verifyCountsAndClear",
           new Object[]{ two, zero });
 
-      getLogWriter().info("test phase 4");
+      LogWriterUtils.getLogWriter().info("test phase 4");
       unregisterKeyForClient(c1, key1);
       vm0.invoke(InterestListDUnitTest.class, "unregisterKeyForClient",
           new Object[] { c1, key1 });
@@ -490,7 +496,7 @@ public class InterestListDUnitTest extends DistributedTestCase
 
     // Register interest in key1.
     vm1.invoke(InterestListDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(vm0.getHost()), new Integer(PORT1) });
+        new Object[] { NetworkUtils.getServerHostName(vm0.getHost()), new Integer(PORT1) });
     vm1.invoke(InterestListDUnitTest.class, "registerKey",
         new Object[] { key1 });
 
@@ -529,7 +535,7 @@ public class InterestListDUnitTest extends DistributedTestCase
 
     // Create client cache
     vm1.invoke(InterestListDUnitTest.class, "createClientCache",new Object[] {
-      getServerHostName(vm0.getHost()), port1, port2});
+      NetworkUtils.getServerHostName(vm0.getHost()), port1, port2});
     
     // Register interest in all keys
     vm1.invoke(InterestListDUnitTest.class, "registerALL_KEYS");
@@ -657,7 +663,7 @@ public class InterestListDUnitTest extends DistributedTestCase
           return "waiting for queues to drain for " + fproxy.getProxyID();
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 5 * 10 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 5 * 10 * 1000, 200, true);
     }
   }
 
@@ -703,7 +709,7 @@ public class InterestListDUnitTest extends DistributedTestCase
         return "waiting for " + fCacheListener.getExpectedCreates() + " create events";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 5 * 10 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 5 * 10 * 1000, 200, true);
   }
   
   private static void confirmNoCacheListenerUpdates() throws Exception {
@@ -742,7 +748,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       assertEquals(r.getEntry(key2).getValue(), key2_originalValue);
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -759,7 +765,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       registerKeyOnly(key);
     }
     catch (Exception ex) {
-      fail("failed while registering key(" + key + ")", ex);
+      Assert.fail("failed while registering key(" + key + ")", ex);
     }
   }
 
@@ -777,7 +783,7 @@ public class InterestListDUnitTest extends DistributedTestCase
           InterestResultPolicy.KEYS_VALUES, false);
       }
     } catch (Exception ex) {
-      fail("failed while registering key(" + key + ")", ex);
+      Assert.fail("failed while registering key(" + key + ")", ex);
     }
   }
 
@@ -789,7 +795,7 @@ public class InterestListDUnitTest extends DistributedTestCase
     }
     catch (NoSubscriptionServersAvailableException ex) {
       // expected an exception
-      getLogWriter().info("Got expected exception in registerKey: ");
+      LogWriterUtils.getLogWriter().info("Got expected exception in registerKey: ");
     }
   }
 
@@ -801,7 +807,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       r.registerInterest("ALL_KEYS");
     }
     catch (Exception ex) {
-      fail("failed while registering ALL_KEYS", ex);
+      Assert.fail("failed while registering ALL_KEYS", ex);
     }
   }
 
@@ -817,7 +823,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       assertEquals(r.getEntry(key2).getValue(), "vm1-key-2");
     }
     catch (Exception ex) {
-      fail("failed while put_ALL_KEY()", ex);
+      Assert.fail("failed while put_ALL_KEY()", ex);
     }
   }
 
@@ -830,7 +836,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       assertEquals(r.getEntry(key2).getValue(), "vm1-key-2");
     }
     catch (Exception ex) {
-      fail("failed while validate_ALL_KEY", ex);
+      Assert.fail("failed while validate_ALL_KEY", ex);
     }
   }
 
@@ -845,7 +851,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       r.registerInterest(list);
     }
     catch (Exception ex) {
-      fail("failed while registering keys" + list + "", ex);
+      Assert.fail("failed while registering keys" + list + "", ex);
     }
   }
 
@@ -860,7 +866,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       r.registerInterest(list);
     }
     catch (Exception ex) {
-      fail("failed while registering keys" + list + "", ex);
+      Assert.fail("failed while registering keys" + list + "", ex);
     }
   }
 
@@ -877,7 +883,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       unregisterKeyOnly(key);
     }
     catch (Exception ex) {
-      fail("failed while un-registering key(" + key + ")", ex);
+      Assert.fail("failed while un-registering key(" + key + ")", ex);
     }
   }
 
@@ -895,7 +901,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       }
     }
     catch (Exception ex) {
-      fail("failed while un-registering key(" + key + ") for client " + clientId, ex);
+      Assert.fail("failed while un-registering key(" + key + ") for client " + clientId, ex);
     }
   }
 
@@ -934,7 +940,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       assertEquals(r.getEntry(key).getValue(), value);
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
 
@@ -960,7 +966,7 @@ public class InterestListDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
 
@@ -977,7 +983,7 @@ public class InterestListDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
 
@@ -1003,7 +1009,7 @@ public class InterestListDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while r.putAgain()", ex);
+      Assert.fail("failed while r.putAgain()", ex);
     }
   }
 
@@ -1036,7 +1042,7 @@ public class InterestListDUnitTest extends DistributedTestCase
         return "waiting for client to apply events from server";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 5 * 10 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 5 * 10 * 1000, 200, true);
   }
 
   public static void validateSingleEntry(Object key, String value) {
@@ -1045,7 +1051,7 @@ public class InterestListDUnitTest extends DistributedTestCase
       assertEquals(value, r.getEntry(key).getValue());
     }
     catch (Exception ex) {
-      fail("failed while validateSingleEntry()", ex);
+      Assert.fail("failed while validateSingleEntry()", ex);
     }
   }
 
@@ -1067,19 +1073,19 @@ public class InterestListDUnitTest extends DistributedTestCase
       }
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
 
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     vm1.invoke(InterestListDUnitTest.class, "closeCache");
     vm2.invoke(InterestListDUnitTest.class, "closeCache");
     // then close the servers
     vm0.invoke(InterestListDUnitTest.class, "closeCache");
     cache = null;
-    invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+    Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
   }
 
   public static void closeCache()
@@ -1098,7 +1104,7 @@ public class InterestListDUnitTest extends DistributedTestCase
      * @see com.gemstone.gemfire.cache.InterestRegistrationListener#afterRegisterInterest(com.gemstone.gemfire.cache.InterestRegistrationEvent)
      */
     public void afterRegisterInterest(InterestRegistrationEvent event) {
-      getLogWriter().info("InterestListener.afterRegisterInterest invoked with this event: " + event);
+      LogWriterUtils.getLogWriter().info("InterestListener.afterRegisterInterest invoked with this event: " + event);
       registrationCount++;
     }
 
@@ -1106,7 +1112,7 @@ public class InterestListDUnitTest extends DistributedTestCase
      * @see com.gemstone.gemfire.cache.InterestRegistrationListener#afterUnregisterInterest(com.gemstone.gemfire.cache.InterestRegistrationEvent)
      */
     public void afterUnregisterInterest(InterestRegistrationEvent event) {
-      getLogWriter().info("InterestListener.afterUnregisterInterest invoked with this event: " + event);
+      LogWriterUtils.getLogWriter().info("InterestListener.afterUnregisterInterest invoked with this event: " + event);
       unregistrationCount++;
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListEndpointDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListEndpointDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListEndpointDUnitTest.java
index 1faa051..b8db844 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListEndpointDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListEndpointDUnitTest.java
@@ -46,8 +46,12 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  *
@@ -89,7 +93,7 @@ public class InterestListEndpointDUnitTest extends DistributedTestCase
   public void setUp() throws Exception {
     super.setUp();
     disconnectAllFromDS();
-    pause(5000);
+    Wait.pause(5000);
     final Host host = Host.getHost(0);
     server1 = host.getVM(0);
     server2 = host.getVM(1);
@@ -105,9 +109,9 @@ public class InterestListEndpointDUnitTest extends DistributedTestCase
     PORT2 = initServerCache(server2);
 
     // then create client
-    pause(5000);  // [bruce] avoid ConnectException
+    Wait.pause(5000);  // [bruce] avoid ConnectException
     client1.invoke(impl.getClass(), "createClientCache", new Object[] {
-      getServerHostName(server1.getHost()), new Integer(PORT1),new Integer(PORT2)});
+      NetworkUtils.getServerHostName(server1.getHost()), new Integer(PORT1),new Integer(PORT2)});
 
   }
 
@@ -168,7 +172,7 @@ public class InterestListEndpointDUnitTest extends DistributedTestCase
     VM primary = firstIsPrimary? server1 : server2;
 
     primary.invoke(impl.getClass(), "stopILEndpointServer");
-    pause(5000);
+    Wait.pause(5000);
 
     //Since the loadbalancing policy is roundrobin & there are two servers so
     // do two dumb puts, which will ensure that fail over happens from the
@@ -201,7 +205,7 @@ public class InterestListEndpointDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, maxWaitTime, 200, true);
+        Wait.waitForCriterion(ev, maxWaitTime, 200, true);
       }
     });
 
@@ -416,7 +420,7 @@ public class InterestListEndpointDUnitTest extends DistributedTestCase
       }
     }
     catch (Exception ex) {
-      fail("failed while verifyIfNotInterestListEndpointAndThenPut()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while verifyIfNotInterestListEndpointAndThenPut()", ex);
     }
   }
 
@@ -428,7 +432,7 @@ public class InterestListEndpointDUnitTest extends DistributedTestCase
       r.registerInterest(k1, InterestResultPolicy.KEYS);
     }
     catch (Exception ex) {
-      fail("failed while region.registerInterest()", ex);
+      com.gemstone.gemfire.test.dunit.Assert.fail("failed while region.registerInterest()", ex);
     }
   }
 
@@ -466,7 +470,7 @@ public class InterestListEndpointDUnitTest extends DistributedTestCase
           return "Test missed a success";
         }
       };
-      DistributedTestCase.waitForCriterion(ev, 20 * 1000, 200, true);
+      Wait.waitForCriterion(ev, 20 * 1000, 200, true);
       
       //yes update
       assertEquals(server_k1, r.getEntry(k1).getValue());
@@ -486,15 +490,14 @@ public class InterestListEndpointDUnitTest extends DistributedTestCase
     }
   }
 
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     // Close client cache first, then server caches
     client1.invoke(impl.getClass(), "closeCache");
     server2.invoke(impl.getClass(), "closeCache");
     server1.invoke(impl.getClass(), "closeCache");
     CacheServerTestUtil.resetDisableShufflingOfEndpointsFlag();
     cache = null;
-    invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
+    Invoke.invokeInEveryVM(new SerializableRunnable() { public void run() { cache = null; } });
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListFailoverDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListFailoverDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListFailoverDUnitTest.java
index 1b0ecb1..d5e32bd 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListFailoverDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListFailoverDUnitTest.java
@@ -25,9 +25,13 @@ import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache.client.*;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.PoolFactoryImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 
 /**
@@ -95,9 +99,9 @@ public class InterestListFailoverDUnitTest extends DistributedTestCase
     vm1.invoke(CacheServerTestUtil.class, "disableShufflingOfEndpoints");
     vm2.invoke(CacheServerTestUtil.class, "disableShufflingOfEndpoints");
     vm1.invoke(CacheServerTestUtil.class, "createCacheClient", new Object[] {
-        getClientPool(getServerHostName(host),redundancyLevel), REGION_NAME });
+        getClientPool(NetworkUtils.getServerHostName(host),redundancyLevel), REGION_NAME });
     vm2.invoke(CacheServerTestUtil.class, "createCacheClient", new Object[] {
-        getClientPool(getServerHostName(host),0), REGION_NAME });
+        getClientPool(NetworkUtils.getServerHostName(host),0), REGION_NAME });
   }
 
 /**
@@ -165,7 +169,7 @@ public class InterestListFailoverDUnitTest extends DistributedTestCase
       assertEquals(r.getEntry("key-6").getValue(), "key-6");
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -182,7 +186,7 @@ public class InterestListFailoverDUnitTest extends DistributedTestCase
       }
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -201,7 +205,7 @@ public class InterestListFailoverDUnitTest extends DistributedTestCase
       return new Integer(p.getPrimaryPort());
     }
     catch (Exception ex) {
-      fail("failed while registering keys k1 to k5", ex);
+      Assert.fail("failed while registering keys k1 to k5", ex);
       return null;
     }
   }
@@ -247,7 +251,7 @@ public class InterestListFailoverDUnitTest extends DistributedTestCase
       assertEquals(r.getEntry("key-6").getValue(), "vm2-key-6" + v);
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
 
@@ -288,17 +292,18 @@ public class InterestListFailoverDUnitTest extends DistributedTestCase
           return excuse;
         }
       };
-      DistributedTestCase.waitForCriterion(wc, 40 * 1000, 1000, true);
+      Wait.waitForCriterion(wc, 40 * 1000, 1000, true);
       
       // Verify that 'key-6' was not invalidated
       assertEquals("key-6", r.getEntry("key-6").getValue());
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeAll();
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListRecoveryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListRecoveryDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListRecoveryDUnitTest.java
index 5359cb8..19e7e16 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListRecoveryDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestListRecoveryDUnitTest.java
@@ -39,9 +39,13 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.cache.client.*;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 import com.gemstone.gemfire.cache.client.internal.RegisterInterestTracker;
@@ -87,7 +91,7 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
   @Override
   public void setUp() throws Exception {
     disconnectAllFromDS();
-    pause(2000);
+    Wait.pause(2000);
     super.setUp();
     final Host host = Host.getHost(0);
     server1 = host.getVM(0);
@@ -96,10 +100,10 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
     PORT1 =  ((Integer)server1.invoke(InterestListRecoveryDUnitTest.class, "createServerCache" )).intValue();
     PORT2 =  ((Integer)server2.invoke(InterestListRecoveryDUnitTest.class, "createServerCache" )).intValue();
 
-    getLogWriter().info("server1 port is " + String.valueOf(PORT1));
-    getLogWriter().info("server2 port is " + String.valueOf(PORT2));
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("server1 port is " + String.valueOf(PORT1));
+    com.gemstone.gemfire.test.dunit.LogWriterUtils.getLogWriter().info("server2 port is " + String.valueOf(PORT2));
 
-    createClientCache(getServerHostName(host), new Integer(PORT1), new Integer(PORT2));
+    createClientCache(NetworkUtils.getServerHostName(host), new Integer(PORT1), new Integer(PORT2));
   }
 
   // this test fails because of bug# 35352 , hence commented the bug is Deferred to: Danube
@@ -109,12 +113,12 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
     server1.invoke(InterestListRecoveryDUnitTest.class, "createEntries");
     registerK1toK5();
     setServerUnavailable("localhost"+PORT1);
-    pause(20000);
+    Wait.pause(20000);
     unregisterK1toK3();
     setServerAvailable("localhost"+PORT1);
-    pause(20000);
+    Wait.pause(20000);
     setServerUnavailable("localhost"+PORT2);
-    pause(20000);
+    Wait.pause(20000);
     server1.invoke(InterestListRecoveryDUnitTest.class, "verifyUnregisterK1toK3");
 
   }
@@ -177,7 +181,7 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
       assertNotNull(pool);
       return (pool.getPrimaryPort() == PORT1);
     } catch (Exception ex) {
-      fail("while isInterestListRegisteredToServer1", ex);
+      Assert.fail("while isInterestListRegisteredToServer1", ex);
     }
     // never reached
     return false;
@@ -257,7 +261,7 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
       }
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -270,7 +274,7 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
       }
     }
     catch (Exception ex) {
-      fail("failed while registering keys", ex);
+      Assert.fail("failed while registering keys", ex);
     }
   }
 
@@ -283,7 +287,7 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
       }
     }
     catch (Exception ex) {
-      fail("failed while un-registering keys", ex);
+      Assert.fail("failed while un-registering keys", ex);
     }
   }
 
@@ -295,7 +299,7 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
       //ConnectionProxyImpl.markServerUnavailable(server);
     }
     catch (Exception ex) {
-      fail("while setting server unavailable  "+ server, ex);
+      Assert.fail("while setting server unavailable  "+ server, ex);
     }
   }
   public static void setServerAvailable(String server)
@@ -305,7 +309,7 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
       //ConnectionProxyImpl.markServerAvailable(server);
     }
     catch (Exception ex) {
-      fail("while setting server available  "+ server, ex);
+      Assert.fail("while setting server available  "+ server, ex);
     }
   }
 
@@ -333,7 +337,7 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
       r1.put(key, "server-"+key);
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
 
@@ -360,7 +364,7 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 20 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 20 * 1000, 200, true);
   }
   
    public static void verifyRegionToProxyMapForFullRegistration()
@@ -402,7 +406,7 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
         return "verifyRegisterK4toK5Retry";
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 20 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 20 * 1000, 200, true);
   }
 
    public static void verifyRegisterK4toK5() {
@@ -441,7 +445,7 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 20 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 20 * 1000, 200, true);
   }
 
  public static void verifyRegionToProxyMapForNoRegistration()
@@ -477,9 +481,8 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
      .getProfile(regionName).getKeysOfInterestFor(proxy.getProxyID());
  }
 
-  @Override
-  public void tearDown2() throws Exception
-  {
+ @Override
+ protected final void preTearDown() throws Exception {
     // close the clients first
     server2.invoke(InterestListRecoveryDUnitTest.class, "closeCache");
     closeCache();
@@ -509,6 +512,6 @@ public class InterestListRecoveryDUnitTest extends DistributedTestCase
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+    Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestRegrListenerDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestRegrListenerDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestRegrListenerDUnitTest.java
index 090577b..f122fe6 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestRegrListenerDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/InterestRegrListenerDUnitTest.java
@@ -40,7 +40,10 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * Written to test fix for Bug #47132
@@ -136,7 +139,7 @@ public class InterestRegrListenerDUnitTest extends DistributedTestCase {
   }
   
   private void doExpressInterestOnServer(boolean isDurable) {    
-    getLogWriter().info("Total ClientSessions " + cacheServer.getAllClientSessions().size());    
+    LogWriterUtils.getLogWriter().info("Total ClientSessions " + cacheServer.getAllClientSessions().size());    
     for(ClientSession c : this.cacheServer.getAllClientSessions()) {
       c.registerInterestRegex("/serverRegion", ".*", isDurable);
     }    
@@ -155,7 +158,7 @@ public class InterestRegrListenerDUnitTest extends DistributedTestCase {
           intCount = count.intValue();
         intCount++;
         InterestRegrListenerDUnitTest.this.listnerMap.put(UNREGISTER_INTEREST, intCount);
-        getLogWriter().info("InterestRegistrationListener afterUnregisterInterest  for " 
+        LogWriterUtils.getLogWriter().info("InterestRegistrationListener afterUnregisterInterest  for " 
             + event.getRegionName() + " keys " + event.getKeysOfInterest() + "Count " + intCount + " Client : " + event.getClientSession().toString());   
       }
       
@@ -167,11 +170,11 @@ public class InterestRegrListenerDUnitTest extends DistributedTestCase {
           intCount = count.intValue();
         intCount++;
         InterestRegrListenerDUnitTest.this.listnerMap.put(REGISTER_INTEREST, intCount);
-        getLogWriter().info("InterestRegistrationListener afterRegisterInterest  for " 
+        LogWriterUtils.getLogWriter().info("InterestRegistrationListener afterRegisterInterest  for " 
             + event.getRegionName() + " keys " + event.getKeysOfInterest() + "Count " + intCount + " Client : " + event.getClientSession().toString());
       }
     };
-    getLogWriter().info("Registered InterestRegistationLister");
+    LogWriterUtils.getLogWriter().info("Registered InterestRegistationLister");
     this.cacheServer.registerInterestRegistrationListener(listener);    
   }
   
@@ -189,7 +192,7 @@ public class InterestRegrListenerDUnitTest extends DistributedTestCase {
     ClientRegionFactory<String,String> regionFactory = clientCache.createClientRegionFactory(ClientRegionShortcut.PROXY);
     Region<String, String> region = regionFactory.create("serverRegion");   
     
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "Client Cache is created in this vm connected to cacheServer " + host
             + ":" + port + " durable? " + isDurable + " with VMID=" + vmID + " region " + region.getFullPath() + " regionSize " + region.size());
     assertNotNull(clientCache);
@@ -266,15 +269,15 @@ public class InterestRegrListenerDUnitTest extends DistributedTestCase {
     params[1] = port;
     params[2] = true;
     params[3] = "VM_1";
-    getLogWriter().info("Starting client1 with server endpoint <" + hostName + ">:" + port);
+    LogWriterUtils.getLogWriter().info("Starting client1 with server endpoint <" + hostName + ">:" + port);
     clientVM_1.invoke(InterestRegrListenerDUnitTest.class, "setUpClientVMTask", params);
     
     params[3] = "VM_2";
-    getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
+    LogWriterUtils.getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
     clientVM_2.invoke(InterestRegrListenerDUnitTest.class, "setUpClientVMTask", params);
     
     params[3] = "VM_3";
-    getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
+    LogWriterUtils.getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
     clientVM_3.invoke(InterestRegrListenerDUnitTest.class, "setUpClientVMTask", params);
     
     
@@ -296,15 +299,15 @@ public class InterestRegrListenerDUnitTest extends DistributedTestCase {
     clientVM_3.invoke(InterestRegrListenerDUnitTest.class, "closeClientCacheTask", new Object[]{true});
     Thread.sleep(2);
     Map<String,Integer> listnerMap = (Map<String, Integer>) serverVM.invoke(InterestRegrListenerDUnitTest.class, "getListenerMapTask");
-    getLogWriter().info("Listener Map " + listnerMap);
+    LogWriterUtils.getLogWriter().info("Listener Map " + listnerMap);
     int registerCount = getMapValueForKey(listnerMap,REGISTER_INTEREST);
     int unregisterCount = getMapValueForKey(listnerMap,UNREGISTER_INTEREST);
     assertEquals(3, registerCount);
     assertEquals(0, unregisterCount);
-    getLogWriter().info("Sleeping till durable client queue are expired and unregister event is called on to listener");
+    LogWriterUtils.getLogWriter().info("Sleeping till durable client queue are expired and unregister event is called on to listener");
     Thread.sleep((DURABLE_CLIENT_TIMEOUT+5)*1000);    
     listnerMap = (Map<String, Integer>) serverVM.invoke(InterestRegrListenerDUnitTest.class, "getListenerMapTask");
-    getLogWriter().info("Listener Map after sleeping " + listnerMap);
+    LogWriterUtils.getLogWriter().info("Listener Map after sleeping " + listnerMap);
     registerCount = getMapValueForKey(listnerMap,REGISTER_INTEREST);
     unregisterCount = getMapValueForKey(listnerMap,UNREGISTER_INTEREST);
     assertEquals(3, registerCount);
@@ -331,15 +334,15 @@ public class InterestRegrListenerDUnitTest extends DistributedTestCase {
     params[1] = port;
     params[2] = true;
     params[3] = "VM_1";
-    getLogWriter().info("Starting client1 with server endpoint <" + hostName + ">:" + port);
+    LogWriterUtils.getLogWriter().info("Starting client1 with server endpoint <" + hostName + ">:" + port);
     clientVM_1.invoke(InterestRegrListenerDUnitTest.class, "setUpClientVMTask", params);
     
     params[3] = "VM_2";
-    getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
+    LogWriterUtils.getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
     clientVM_2.invoke(InterestRegrListenerDUnitTest.class, "setUpClientVMTask", params);
     
     params[3] = "VM_3";
-    getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
+    LogWriterUtils.getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
     clientVM_3.invoke(InterestRegrListenerDUnitTest.class, "setUpClientVMTask", params);
     
     
@@ -358,15 +361,15 @@ public class InterestRegrListenerDUnitTest extends DistributedTestCase {
     clientVM_3.invoke(InterestRegrListenerDUnitTest.class, "closeClientCacheTask", new Object[]{true});
     Thread.sleep(2);
     Map<String,Integer> listnerMap = (Map<String, Integer>) serverVM.invoke(InterestRegrListenerDUnitTest.class, "getListenerMapTask");
-    getLogWriter().info("Listener Map " + listnerMap);
+    LogWriterUtils.getLogWriter().info("Listener Map " + listnerMap);
     int registerCount = getMapValueForKey(listnerMap,REGISTER_INTEREST);
     int unregisterCount = getMapValueForKey(listnerMap,UNREGISTER_INTEREST);
     assertEquals(3, registerCount);
     assertEquals(0, unregisterCount);
-    getLogWriter().info("Sleeping till durable client queue are expired and unregister event is called on to listener");
+    LogWriterUtils.getLogWriter().info("Sleeping till durable client queue are expired and unregister event is called on to listener");
     Thread.sleep((DURABLE_CLIENT_TIMEOUT+5)*1000);    
     listnerMap = (Map<String, Integer>) serverVM.invoke(InterestRegrListenerDUnitTest.class, "getListenerMapTask");
-    getLogWriter().info("Listener Map after sleeping " + listnerMap);
+    LogWriterUtils.getLogWriter().info("Listener Map after sleeping " + listnerMap);
     registerCount = getMapValueForKey(listnerMap,REGISTER_INTEREST);
     unregisterCount = getMapValueForKey(listnerMap,UNREGISTER_INTEREST);
     assertEquals(3, registerCount);
@@ -395,15 +398,15 @@ public class InterestRegrListenerDUnitTest extends DistributedTestCase {
     params[1] = port;
     params[2] = true;
     params[3] = "VM_1";
-    getLogWriter().info("Starting client1 with server endpoint <" + hostName + ">:" + port);
+    LogWriterUtils.getLogWriter().info("Starting client1 with server endpoint <" + hostName + ">:" + port);
     clientVM_1.invoke(InterestRegrListenerDUnitTest.class, "setUpClientVMTask", params);
     
     params[3] = "VM_2";
-    getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
+    LogWriterUtils.getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
     clientVM_2.invoke(InterestRegrListenerDUnitTest.class, "setUpClientVMTask", params);
     
     params[3] = "VM_3";
-    getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
+    LogWriterUtils.getLogWriter().info("Starting client2 with server endpoint <" + hostName + ">:" + port);
     clientVM_3.invoke(InterestRegrListenerDUnitTest.class, "setUpClientVMTask", params);
     
     
@@ -431,13 +434,13 @@ public class InterestRegrListenerDUnitTest extends DistributedTestCase {
       @Override
       public boolean done() {
         Map<String,Integer> listnerMap = (Map<String, Integer>) serverVM.invoke(InterestRegrListenerDUnitTest.class, "getListenerMapTask");
-        getLogWriter().info("Listener Map " + listnerMap);
+        LogWriterUtils.getLogWriter().info("Listener Map " + listnerMap);
         registerCount = getMapValueForKey(listnerMap,REGISTER_INTEREST);
         unregisterCount = getMapValueForKey(listnerMap,UNREGISTER_INTEREST);
         if (registerCount == 3 && unregisterCount == 3) {
           return true;
         }
-        getLogWriter().info("Waiting for counts to each reach 3.  Current registerCount="+registerCount+"; unregisterCount="+unregisterCount);
+        LogWriterUtils.getLogWriter().info("Waiting for counts to each reach 3.  Current registerCount="+registerCount+"; unregisterCount="+unregisterCount);
         return false;
       }
       
@@ -447,9 +450,9 @@ public class InterestRegrListenerDUnitTest extends DistributedTestCase {
       }
     };
     
-    waitForCriterion(wc, 20000, 500, true);
+    Wait.waitForCriterion(wc, 20000, 500, true);
     
-    getLogWriter().info("Sleeping till durable client queue are expired and unregister event is called on to listener");
+    LogWriterUtils.getLogWriter().info("Sleeping till durable client queue are expired and unregister event is called on to listener");
     Thread.sleep((DURABLE_CLIENT_TIMEOUT+5)*1000);
     serverVM.invoke(InterestRegrListenerDUnitTest.class, "closeCacheTask");
   }


[10/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataUsingPoolDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataUsingPoolDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataUsingPoolDUnitTest.java
index 5feb1b5..82d6279 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataUsingPoolDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataUsingPoolDUnitTest.java
@@ -57,10 +57,17 @@ import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.internal.cache.PoolFactoryImpl;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheServerTestUtil;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This class tests the ContiunousQuery mechanism in GemFire.
@@ -81,7 +88,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     // avoid IllegalStateException from HandShake by connecting all vms tor
     // system before creating ConnectionPools
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -107,7 +114,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
 
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testClientWithFeederAndCQ";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -154,7 +161,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server1);
     
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     
@@ -177,7 +184,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
       cqDUnitTest.executeCQ(client, "testCQHAWithState_" + i, false, null);
     }
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     int size = 10;
     
@@ -199,7 +206,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     // Close server1.
     // To maintain the redundancy; it will make connection to endpoint-3.
     cqDUnitTest.closeServer(server1);
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
     
     // UPDATE-1.
@@ -217,12 +224,12 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     //Stop cq.
     cqDUnitTest.stopCQ(client, "testCQHAWithState_0");
     
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     
     // UPDATE with stop.
     cqDUnitTest.createServer(server3, ports[1]);
     server3.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     
     cqDUnitTest.clearCQListenerEvents(client, "testCQHAWithState_0");
     
@@ -273,7 +280,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server, 0, true);
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testCQWithDestroysAndInvalidates";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -352,7 +359,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     /* Create Server and Client */
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName1 = "testCQWithMultipleClients1";
     String poolName2 = "testCQWithMultipleClients2";
@@ -428,7 +435,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server2, 0, false, MirrorType.KEYS);
         
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     String poolName = "testCQWithLoad";
     cqDUnitTest.createPool(client, poolName, host0, port1);
@@ -439,7 +446,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     cqDUnitTest.createCQ(client, poolName, "testCQWithLoad_0", cqDUnitTest.cqs[0]);  
     cqDUnitTest.executeCQ(client, "testCQWithLoad_0", false, null); 
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     final int size = 10;
     
@@ -483,7 +490,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     final int evictionThreshold = 5;
     server1.invoke(new CacheSerializableRunnable("Create Cache Server") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Cache Server. ###");
+        LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.DISTRIBUTED_ACK);
         factory.setMirrorType(MirrorType.NONE);
@@ -493,16 +500,16 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
         for (int i = 0; i < cqDUnitTest.regions.length; i++) { 
           Region region = createRegion(cqDUnitTest.regions[i], factory.createRegionAttributes());
           // Set CacheListener.
-          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(getLogWriter()));  
+          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(LogWriterUtils.getLogWriter()));  
         } 
-        pause(2000);
+        Wait.pause(2000);
         
         try {
           cqDUnitTest.startBridgeServer(0, true);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
-        pause(2000);
+        Wait.pause(2000);
 
       }
     });
@@ -510,7 +517,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server2, 0, false, MirrorType.NONE);
         
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
       
     String poolName = "testCQWithEviction";
     cqDUnitTest.createPool(client, poolName, host0, port1);
@@ -529,7 +536,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
       // Ignore  expected.
     }
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     final int size = 10;
     
@@ -549,7 +556,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     //  cqDUnitTest.waitForCreated(client, "testCQWithEviction_0", cqDUnitTest.KEY + i);
     //}
         
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     
     server1.invoke(new CacheSerializableRunnable("validate destroy") {
       public void run2() throws CacheException {
@@ -591,7 +598,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server1, 0, false, MirrorType.KEYS_VALUES);
 
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
 //    final String[] regions = cqDUnitTest.regions;
 //    final int[] serverPorts = new int[] {port1};
@@ -641,7 +648,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     String poolName = "testRegionEvents";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -659,7 +666,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     // Test for Event on Region Clear.
     server.invoke(new CacheSerializableRunnable("testRegionEvents"){
       public void run2()throws CacheException {
-        getLogWriter().info("### Clearing the region on the server ###");
+        LogWriterUtils.getLogWriter().info("### Clearing the region on the server ###");
         Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
         for (int i = 1; i <=5; i++) {
           region.put(CqQueryUsingPoolDUnitTest.KEY+i, new Portfolio(i));
@@ -673,7 +680,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     // Test for Event on Region invalidate.
     server.invoke(new CacheSerializableRunnable("testRegionEvents"){
       public void run2()throws CacheException {
-        getLogWriter().info("### Invalidate the region on the server ###");
+        LogWriterUtils.getLogWriter().info("### Invalidate the region on the server ###");
         Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
         for (int i = 1; i <=5; i++) {
           region.put(CqQueryUsingPoolDUnitTest.KEY+i, new Portfolio(i));
@@ -687,7 +694,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     // Test for Event on Region destroy.
     server.invoke(new CacheSerializableRunnable("testRegionEvents"){
       public void run2()throws CacheException {
-        getLogWriter().info("### Destroying the region on the server ###");
+        LogWriterUtils.getLogWriter().info("### Destroying the region on the server ###");
         Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[1]);
         for (int i = 1; i <=5; i++) {
           region.put(CqQueryUsingPoolDUnitTest.KEY+i, new Portfolio(i));
@@ -697,7 +704,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
       }
     });
 
-    pause(1000); // wait for cq to close becuse of region destroy on server.
+    Wait.pause(1000); // wait for cq to close becuse of region destroy on server.
     //cqDUnitTest.waitForClose(client,"testRegionEvents_1");
     cqDUnitTest.validateCQCount(client,1);
 
@@ -725,7 +732,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server);
     
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
  
     String poolName = "testEventsDuringQueryExecution";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -764,12 +771,12 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqResults = cq1.executeWithInitialResults();
         } catch (Exception ex){
-          fail("CQ execution failed", ex);
+          Assert.fail("CQ execution failed", ex);
         }
 
         // Check num of events received during executeWithInitialResults.
         final TestHook testHook = CqQueryImpl.testHook;
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
 
           @Override
           public boolean done() {
@@ -828,7 +835,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     // Keep updating region (async invocation).
     server.invoke(new CacheSerializableRunnable("Update Region"){
       public void run2()throws CacheException {
-        pause(200);
+        Wait.pause(200);
         client.invoke(new CacheSerializableRunnable("Releasing the latch") {
           public void run2()throws CacheException {
             // Now release the testhook so that CQListener can proceed.
@@ -853,7 +860,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
   public void testCqStatInitializationTimingIssue() {
     
     //The async close can cause this exception on the server
-    addExpectedException("java.net.SocketException: Broken pipe");
+    IgnoredException.addIgnoredException("java.net.SocketException: Broken pipe");
     final String regionName = "testCqStatInitializationTimingIssue";
     final String cq1Name = "testCq1";
     final Host host = Host.getHost(0);
@@ -868,11 +875,11 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     
     // Start a  client
     client.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(client.getHost()), server1Port), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(client.getHost()), server1Port), regionName});
     
     // Start a pub client
     client2.invoke(CacheServerTestUtil.class, "createCacheClient", 
-        new Object[] {getClientPool(getServerHostName(client2.getHost()), server1Port), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(client2.getHost()), server1Port), regionName});
     
     //client has thread that invokes new and remove cq over and over
     client.invokeAsync(new CacheSerializableRunnable("Register cq") {
@@ -937,7 +944,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     /* Create Server and Client */
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
   
     final String poolName1 = "pool1";
     final String poolName2 = "pool2";
@@ -952,7 +959,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
         try {
           queryService = (PoolManager.find(poolName1)).getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         try {
           CqAttributesFactory cqAf = new CqAttributesFactory();
@@ -987,7 +994,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
         try {
           queryService = (PoolManager.find(poolName2)).getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         try {
           CqAttributesFactory cqAf = new CqAttributesFactory();
@@ -1021,7 +1028,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
         try {
           queryService = (PoolManager.find(poolName2)).getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         List<String> list;
         try {
@@ -1046,7 +1053,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
         try {
           queryService = (PoolManager.find(poolName1)).getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         List<String> list;
         try {
@@ -1076,7 +1083,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     /* Create Server and Client */
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
   
     final String poolName1 = "pool1";
   
@@ -1088,7 +1095,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
         try {
           queryService = (PoolManager.find(poolName1)).getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         List<String> list;
         try {
@@ -1124,11 +1131,11 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     
     // Start client 1
     client1.invoke(CacheServerTestUtil.class, "createClientCache", 
-        new Object[] {getClientPool(getServerHostName(client1.getHost()), server1Port), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(client1.getHost()), server1Port), regionName});
     
     // Start client 2
     client2.invoke(CacheServerTestUtil.class, "createClientCache", 
-        new Object[] {getClientPool(getServerHostName(client2.getHost()), server1Port), regionName});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(client2.getHost()), server1Port), regionName});
     
     createClient1CqsAndDurableCqs(client1, regionName);
     createClient2CqsAndDurableCqs(client2, regionName);
@@ -1186,11 +1193,11 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     
     // Start client 1
     client1.invoke(CacheServerTestUtil.class, "createClientCache", 
-        new Object[] {getClientPool(getServerHostName(client1.getHost()), server1Port), regionName, getDurableClientProperties("client1_dc", timeout)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(client1.getHost()), server1Port), regionName, getDurableClientProperties("client1_dc", timeout)});
     
     // Start client 2
     client2.invoke(CacheServerTestUtil.class, "createClientCache", 
-        new Object[] {getClientPool(getServerHostName(client2.getHost()), server1Port), regionName, getDurableClientProperties("client2_dc", timeout)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(client2.getHost()), server1Port), regionName, getDurableClientProperties("client2_dc", timeout)});
     
     createClient1CqsAndDurableCqs(client1, regionName);
     createClient2CqsAndDurableCqs(client2, regionName);
@@ -1251,11 +1258,11 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     
     // Start client 1
     client1.invoke(CacheServerTestUtil.class, "createClientCache", 
-        new Object[] {getClientPool(getServerHostName(client1.getHost()), server1Port), regionName, getDurableClientProperties("client1_dc", timeout)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(client1.getHost()), server1Port), regionName, getDurableClientProperties("client1_dc", timeout)});
     
     // Start client 2
     client2.invoke(CacheServerTestUtil.class, "createClientCache", 
-        new Object[] {getClientPool(getServerHostName(client2.getHost()), server1Port), regionName, getDurableClientProperties("client2_dc", timeout)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(client2.getHost()), server1Port), regionName, getDurableClientProperties("client2_dc", timeout)});
     
     //create the test cqs
     createClient1CqsAndDurableCqs(client1, regionName);
@@ -1456,7 +1463,7 @@ public class CqDataUsingPoolDUnitTest extends CacheTestCase {
     });
     
     client.invoke(CacheServerTestUtil.class, "createClientCache", 
-        new Object[] {getClientPool(getServerHostName(client.getHost()), serverPort), regionName, getDurableClientProperties(dcName, durableClientTimeout)});
+        new Object[] {getClientPool(NetworkUtils.getServerHostName(client.getHost()), serverPort), regionName, getDurableClientProperties(dcName, durableClientTimeout)});
   }
   
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataUsingPoolOptimizedExecuteDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataUsingPoolOptimizedExecuteDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataUsingPoolOptimizedExecuteDUnitTest.java
index e157fb0..47b9a89 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataUsingPoolOptimizedExecuteDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataUsingPoolOptimizedExecuteDUnitTest.java
@@ -18,6 +18,8 @@ package com.gemstone.gemfire.cache.query.cq.dunit;
 
 
 import com.gemstone.gemfire.cache.query.internal.cq.CqServiceImpl;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -31,10 +33,10 @@ public class CqDataUsingPoolOptimizedExecuteDUnitTest extends CqDataUsingPoolDUn
   }
 
   public void setUp() throws Exception {
-    addExpectedException("Read timed out");
-    addExpectedException("java.net.SocketException");
+    IgnoredException.addIgnoredException("Read timed out");
+    IgnoredException.addIgnoredException("java.net.SocketException");
     super.setUp();
-    invokeInEveryVM(new SerializableRunnable("set test hook") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("set test hook") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = false;
       }
@@ -42,12 +44,11 @@ public class CqDataUsingPoolOptimizedExecuteDUnitTest extends CqDataUsingPoolDUn
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = true;
       }
     });
-    super.tearDown2();
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqPerfDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqPerfDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqPerfDUnitTest.java
index cdc11f2..a9747f2 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqPerfDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqPerfDUnitTest.java
@@ -35,9 +35,14 @@ import com.gemstone.gemfire.cache.query.internal.cq.CqServiceImpl;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * This class tests the ContiunousQuery mechanism in GemFire.
@@ -59,7 +64,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     // avoid IllegalStateException from HandShake by connecting all vms tor
     // system before creating connection pools
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -82,7 +87,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     
     final int port = server.invokeInt(CqQueryDUnitTest.class,
     "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     // Create client.
     cqDUnitTest.createClient(client, port, host0);
@@ -90,7 +95,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     
     client.invoke(new CacheSerializableRunnable("Create CQ :" + cqName) {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
@@ -101,7 +106,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
         }
         // Create CQ Attributes.
         CqAttributesFactory cqf = new CqAttributesFactory();
-        CqListener[] cqListeners = {new CqTimeTestListener(getLogWriter())};
+        CqListener[] cqListeners = {new CqTimeTestListener(LogWriterUtils.getLogWriter())};
         ((CqTimeTestListener)cqListeners[0]).cqName = cqName;
         
         cqf.initCqListeners(cqListeners);
@@ -113,7 +118,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
           assertTrue("newCq() state mismatch", cq1.getState().isStopped());
           cq1.execute();
         } catch (Exception ex){
-          getLogWriter().info("CqService is :" + cqService);
+          LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
           ex.printStackTrace();
           AssertionError err = new AssertionError("Failed to create CQ " + cqName + " . ");
           err.initCause(ex);
@@ -126,14 +131,14 @@ public class CqPerfDUnitTest extends CacheTestCase {
     
     // Create values.
     cqDUnitTest.createValuesWithTime(client, cqDUnitTest.regions[0], size);
-    pause(5000);
+    Wait.pause(5000);
     
     // Update values
     cqDUnitTest.createValuesWithTime(client, cqDUnitTest.regions[0], size);
     
     client.invoke(new CacheSerializableRunnable("Validate CQs") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating CQ. ### " + cqName);
+        LogWriterUtils.getLogWriter().info("### Validating CQ. ### " + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {          
@@ -167,7 +172,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
       }
     });
     
-    pause( 10 * 60 * 1000);
+    Wait.pause( 10 * 60 * 1000);
     
     // Close.
     cqDUnitTest.closeClient(client);
@@ -187,7 +192,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     cqDUnitTest.createClient(client, port, host0);
     
 
@@ -213,8 +218,8 @@ public class CqPerfDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
 
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
@@ -244,8 +249,8 @@ public class CqPerfDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
         for (InternalCqQuery cq: cqs){
@@ -275,8 +280,8 @@ public class CqPerfDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
 
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
@@ -307,8 +312,8 @@ public class CqPerfDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
         for (InternalCqQuery cq: cqs){
@@ -337,8 +342,8 @@ public class CqPerfDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
 
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
@@ -369,8 +374,8 @@ public class CqPerfDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
         for (InternalCqQuery cq: cqs){
@@ -407,7 +412,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     cqDUnitTest.createClient(client, port, host0);
     
     // Create and Execute same kind of CQs.
@@ -522,7 +527,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
         
     for (int clientIndex=0; clientIndex < 3; clientIndex++){
       cqDUnitTest.createClient(clients[clientIndex], port, host0);
@@ -681,7 +686,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     VM clients[] = new VM[]{client1, client2};
     
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     // Create client.
     
     // Create client with redundancyLevel -1
@@ -706,7 +711,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     validateMatchingCqs(server1, numCQs, cqDUnitTest.cqs[0], 1 * clients.length);
     validateMatchingCqs(server1, numCQs, cqDUnitTest.cqs[1], 1 * clients.length);
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     // CREATE.
     cqDUnitTest.createValues(server1, cqDUnitTest.regions[0], 10);
@@ -716,7 +721,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
       cqDUnitTest.waitForCreated(client1, "testMatchingCQsWithMultipleServers_0", CqQueryDUnitTest.KEY+i);
     }
 
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     cqDUnitTest.createServer(server2, ports[0]);
     
@@ -725,7 +730,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     System.out.println("### Port on which server1 running : " + port1 + 
         " Server2 running : " + port2);
 
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
 
     // UPDATE - 1.
@@ -767,7 +772,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     cqDUnitTest.closeServer(server1);
     
     // Fail over should happen.
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     
     validateMatchingCqs(server2, numCQs, cqDUnitTest.cqs[0], 1 * clients.length);
 
@@ -844,7 +849,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     VM clients[] = new VM[]{client1, client2};
     
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
     
     cqDUnitTest.createLocalRegion(client1, new int[] {port1, ports[0]}, host0, "-1", cqDUnitTest.regions);
@@ -893,7 +898,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
     
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
     final int port2 = server2.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     // Create client.
 //    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
@@ -922,7 +927,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
           false, null);
     }
 
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     // CREATE.
     int size = 1000;
@@ -949,7 +954,7 @@ public class CqPerfDUnitTest extends CacheTestCase {
           "testPerformanceForMatchingCQs_0", CqQueryDUnitTest.KEY+k);
     }
  
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     printCqQueryExecutionTime(server1);
     printCqQueryExecutionTime(server2);
     
@@ -968,8 +973,8 @@ public class CqPerfDUnitTest extends CacheTestCase {
         try {
           cqService = (CqServiceImpl) ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
 
         HashMap matchedCqMap = cqService.getMatchingCqMap();
@@ -994,12 +999,12 @@ public class CqPerfDUnitTest extends CacheTestCase {
         try {
           cqService = (CqServiceImpl) ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
 
         long timeTaken = cqService.getCqServiceVsdStats().getCqQueryExecutionTime();
-        getLogWriter().info("Total Time taken to Execute CQ Query :" + timeTaken);
+        LogWriterUtils.getLogWriter().info("Total Time taken to Execute CQ Query :" + timeTaken);
         //System.out.println("Total Time taken to Execute CQ Query :" + timeTaken);
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqPerfUsingPoolDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqPerfUsingPoolDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqPerfUsingPoolDUnitTest.java
index 059c8a0..3c7e3b5 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqPerfUsingPoolDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqPerfUsingPoolDUnitTest.java
@@ -35,9 +35,14 @@ import com.gemstone.gemfire.cache.query.internal.cq.InternalCqQuery;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * This class tests the ContiunousQuery mechanism in GemFire.
@@ -59,7 +64,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     // avoid IllegalStateException from HandShake by connecting all vms tor
     // system before creating connection pools
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -82,7 +87,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class,
     "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     // Create client.
     cqDUnitTest.createClient(client, port, host0);
@@ -90,7 +95,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     
     client.invoke(new CacheSerializableRunnable("Create CQ :" + cqName) {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
@@ -101,7 +106,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
         }
         // Create CQ Attributes.
         CqAttributesFactory cqf = new CqAttributesFactory();
-        CqListener[] cqListeners = {new CqTimeTestListener(getLogWriter())};
+        CqListener[] cqListeners = {new CqTimeTestListener(LogWriterUtils.getLogWriter())};
         ((CqTimeTestListener)cqListeners[0]).cqName = cqName;
         
         cqf.initCqListeners(cqListeners);
@@ -113,7 +118,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
           assertTrue("newCq() state mismatch", cq1.getState().isStopped());
           cq1.execute();
         } catch (Exception ex){
-          getLogWriter().info("CqService is :" + cqService);
+          LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
           ex.printStackTrace();
           AssertionError err = new AssertionError("Failed to create CQ " + cqName + " . ");
           err.initCause(ex);
@@ -126,14 +131,14 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     
     // Create values.
     cqDUnitTest.createValuesWithTime(client, cqDUnitTest.regions[0], size);
-    pause(5000);
+    Wait.pause(5000);
     
     // Update values
     cqDUnitTest.createValuesWithTime(client, cqDUnitTest.regions[0], size);
     
     client.invoke(new CacheSerializableRunnable("Validate CQs") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating CQ. ### " + cqName);
+        LogWriterUtils.getLogWriter().info("### Validating CQ. ### " + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {          
@@ -167,7 +172,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
       }
     });
     
-    pause( 10 * 60 * 1000);
+    Wait.pause( 10 * 60 * 1000);
     
     // Close.
     cqDUnitTest.closeClient(client);
@@ -187,7 +192,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     // cqDUnitTest.createClient(client, port, host0);
 
     String poolName = "testKeyMaintainance";
@@ -215,8 +220,8 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
 
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
@@ -247,8 +252,8 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
         for (InternalCqQuery cq: cqs){
@@ -278,8 +283,8 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
 
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
@@ -311,8 +316,8 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
         for (InternalCqQuery cq: cqs){
@@ -340,8 +345,8 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
 
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
@@ -371,8 +376,8 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
         for (InternalCqQuery cq: cqs){
@@ -407,7 +412,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     //cqDUnitTest.createClient(client, port, host0);
     
     String poolName = "testMatchingCqs";
@@ -525,7 +530,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     String poolName = "testMatchingCQWithMultipleClients";     
     for (int clientIndex=0; clientIndex < 3; clientIndex++){
       String cPoolName = "testMatchingCQWithMultipleClients" + clientIndex;
@@ -688,7 +693,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     VM clients[] = new VM[]{client1, client2};
     
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     // Create client.
     
     // Create client with redundancyLevel -1
@@ -718,7 +723,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     validateMatchingCqs(server1, numCQs, cqDUnitTest.cqs[0], 1 * clients.length);
     validateMatchingCqs(server1, numCQs, cqDUnitTest.cqs[1], 1 * clients.length);
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     // CREATE.
     cqDUnitTest.createValues(server1, cqDUnitTest.regions[0], 10);
@@ -729,7 +734,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
           "testMatchingCQsWithMultipleServers_0", CqQueryUsingPoolDUnitTest.KEY+i);
     }
 
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     cqDUnitTest.createServer(server2, ports[0]);
     
@@ -738,7 +743,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     System.out.println("### Port on which server1 running : " + port1 + 
         " Server2 running : " + port2);
 
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
 
     // UPDATE - 1.
@@ -778,7 +783,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     cqDUnitTest.closeServer(server1);
     
     // Fail over should happen.
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     
     validateMatchingCqs(server2, numCQs, cqDUnitTest.cqs[0], 1 * clients.length);
 
@@ -853,7 +858,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
     
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
     final int port2 = server2.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     // Create client.
 //    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
@@ -882,7 +887,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
           false, null);
     }
 
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     // CREATE.
     int size = 1000;
@@ -909,7 +914,7 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
           "testPerformanceForMatchingCQs_0", CqQueryUsingPoolDUnitTest.KEY+k);
     }
  
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     printCqQueryExecutionTime(server1);
     printCqQueryExecutionTime(server2);
     
@@ -928,8 +933,8 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = (CqServiceImpl) ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
 
         HashMap matchedCqMap = cqService.getMatchingCqMap();
@@ -954,12 +959,12 @@ public class CqPerfUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = (CqServiceImpl) ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
 
         long timeTaken = cqService.getCqServiceVsdStats().getCqQueryExecutionTime();
-        getLogWriter().info("Total Time taken to Execute CQ Query :" + timeTaken);
+        LogWriterUtils.getLogWriter().info("Total Time taken to Execute CQ Query :" + timeTaken);
         System.out.println("Total Time taken to Execute CQ Query :" + timeTaken);
       }
     });


[11/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/NetworkUtils.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/NetworkUtils.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/NetworkUtils.java
new file mode 100755
index 0000000..d83aecd
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/NetworkUtils.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+import java.net.UnknownHostException;
+
+import com.gemstone.gemfire.internal.SocketCreator;
+
+/**
+ * <code>NetworkUtils</code> provides static utility methods to perform
+ * network DNS lookups or similar actions.
+ * 
+ * These methods can be used directly: <code>NetworkUtils.getIPLiteral()</code>, 
+ * however, they are intended to be referenced through static import:
+ *
+ * <pre>
+ * import static com.gemstone.gemfire.test.dunit.NetworkUtils.*;
+ *    ...
+ *    String hostName = getIPLiteral();
+ * </pre>
+ *
+ * Extracted from DistributedTestCase.
+ */
+public class NetworkUtils {
+
+  protected NetworkUtils() {
+  }
+  
+  /** 
+   * Get the IP literal name for the current host. Use this instead of  
+   * "localhost" to avoid IPv6 name resolution bugs in the JDK/machine config.
+   * This method honors java.net.preferIPvAddresses
+   * 
+   * @return an IP literal which honors java.net.preferIPvAddresses
+   */
+  public static String getIPLiteral() {
+    try {
+      return SocketCreator.getLocalHost().getHostAddress();
+    } catch (UnknownHostException e) {
+      throw new Error("Problem determining host IP address", e);
+    }
+  }
+
+  /** 
+   * Get the host name to use for a server cache in client/server dunit
+   * testing.
+   * 
+   * @param host the dunit Host to get a machine host name for
+   * @return the host name
+   */
+  public static String getServerHostName(final Host host) {
+    String serverBindAddress = System.getProperty("gemfire.server-bind-address");
+    return serverBindAddress != null ? serverBindAddress : host.getHostName();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/RMIException.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/RMIException.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/RMIException.java
index 8a555d2..1a5fac4 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/RMIException.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/RMIException.java
@@ -42,8 +42,8 @@ import com.gemstone.gemfire.GemFireException;
  * see hydra.RemoteTestModuleIF
  *
  * @author David Whitlock
- *
  */
+@SuppressWarnings("serial")
 public class RMIException extends GemFireException {
 
   /** SHADOWED FIELD that holds the cause exception (as opposed to the

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/RepeatableRunnable.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/RepeatableRunnable.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/RepeatableRunnable.java
index 32e4369..9695c32 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/RepeatableRunnable.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/RepeatableRunnable.java
@@ -20,8 +20,10 @@ package com.gemstone.gemfire.test.dunit;
  * A RepeatableRunnable is an object that implements a method that
  * can be invoked repeatably without causing any side affects.
  *
- * @author  dmonnie
+ * @author dmonnie
+ * @deprecated Please use SerializableRunnable with {@link com.jayway.awaitility.Awaitility} instead.
  */
+@Deprecated
 public interface RepeatableRunnable {
   
   public void runRepeatingIfNecessary(long repeatTimeoutMs);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableCallableIF.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableCallableIF.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableCallableIF.java
index c3d3ae7..ddeb71e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableCallableIF.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableCallableIF.java
@@ -19,6 +19,8 @@ package com.gemstone.gemfire.test.dunit;
 import java.io.Serializable;
 import java.util.concurrent.Callable;
 
+/**
+ * Interface for {@link SerializableCallable} to enable use with lambdas.
+ */
 public interface SerializableCallableIF<T> extends Serializable, Callable<T> {
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableRunnable.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableRunnable.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableRunnable.java
index 658924a..353cdc7 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableRunnable.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableRunnable.java
@@ -46,8 +46,7 @@ import java.io.Serializable;
  *  }
  * </PRE>
  */
-public abstract class SerializableRunnable
-  implements SerializableRunnableIF {
+public abstract class SerializableRunnable implements SerializableRunnableIF {
 
   private static final long serialVersionUID = 7584289978241650456L;
   

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableRunnableIF.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableRunnableIF.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableRunnableIF.java
index 648e4f8..5e5467d 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableRunnableIF.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/SerializableRunnableIF.java
@@ -18,6 +18,8 @@ package com.gemstone.gemfire.test.dunit;
 
 import java.io.Serializable;
 
+/**
+ * Interface for {@link SerializableRunnable} to enable use with lambdas.
+ */
 public interface SerializableRunnableIF extends Serializable, Runnable {
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/StoppableWaitCriterion.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/StoppableWaitCriterion.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/StoppableWaitCriterion.java
new file mode 100755
index 0000000..b7be9c5
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/StoppableWaitCriterion.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+/**
+ * Defines an asynchronous criterion with an optional method to fail early
+ * before timeout.
+ *
+ * Extracted from DistributedTestCase.
+ * 
+ * @deprecated Use {@link com.jayway.awaitility.Awaitility} instead.
+ */
+public interface StoppableWaitCriterion extends WaitCriterion {
+  
+  /**
+   * If this method returns true then quit waiting even if we are not done.
+   * This allows a wait to fail early.
+   */
+  public boolean stopWaiting();
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/ThreadUtils.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/ThreadUtils.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/ThreadUtils.java
new file mode 100755
index 0000000..6ba87ed
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/ThreadUtils.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+import static org.junit.Assert.fail;
+import static com.gemstone.gemfire.test.dunit.Jitter.*;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.internal.OSProcess;
+import com.gemstone.gemfire.internal.logging.LogService;
+
+/**
+ * <code>ThreadUtils</code> provides static utility methods to perform thread
+ * related actions such as dumping thread stacks.
+ * 
+ * These methods can be used directly: <code>ThreadUtils.dumpAllStacks()</code>, 
+ * however, they are intended to be referenced through static import:
+ *
+ * <pre>
+ * import static com.gemstone.gemfire.test.dunit.ThreadUtils.*;
+ *    ...
+ *    dumpAllStacks();
+ * </pre>
+ *
+ * Extracted from DistributedTestCase.
+ */
+public class ThreadUtils {
+  
+  private static final Logger logger = LogService.getLogger();
+
+  protected ThreadUtils() {
+  }
+  
+  /**
+   * Print stack dumps for all vms.
+   * 
+   * @author bruce
+   * @since 5.0
+   */
+  public static void dumpAllStacks() {
+    for (int h=0; h < Host.getHostCount(); h++) {
+      dumpStack(Host.getHost(h));
+    }
+  }
+  
+  /**
+   * Dump all thread stacks
+   */
+  public static void dumpMyThreads() {
+    OSProcess.printStacks(0, false);
+  }
+
+  /** 
+   * Print a stack dump for this vm.
+   * 
+   * @author bruce
+   * @since 5.0
+   */
+  public static void dumpStack() {
+    OSProcess.printStacks(0, false);
+  }
+
+  /** 
+   * Print stack dumps for all vms on the given host.
+   * 
+   * @author bruce
+   * @since 5.0
+   */
+  public static void dumpStack(final Host host) {
+    for (int v=0; v < host.getVMCount(); v++) {
+      host.getVM(v).invoke(com.gemstone.gemfire.test.dunit.DistributedTestCase.class, "dumpStack");
+    }
+  }
+
+  /** 
+   * Print a stack dump for the given vm.
+   * 
+   * @author bruce
+   * @since 5.0
+   */
+  public static void dumpStack(final VM vm) {
+    vm.invoke(com.gemstone.gemfire.test.dunit.DistributedTestCase.class, "dumpStack");
+  }
+
+  public static void dumpStackTrace(final Thread thread, final StackTraceElement[] stackTrace) {
+    StringBuilder msg = new StringBuilder();
+    msg.append("Thread=<")
+      .append(thread)
+      .append("> stackDump:\n");
+    for (int i=0; i < stackTrace.length; i++) {
+      msg.append("\t")
+        .append(stackTrace[i])
+        .append("\n");
+    }
+    logger.info(msg.toString());
+  }
+
+  /**
+   * Wait for a thread to join.
+   * 
+   * @param thread thread to wait on
+   * @param timeoutMilliseconds maximum time to wait
+   * @throws AssertionError if the thread does not terminate
+   */
+  public static void join(final Thread thread, final long timeoutMilliseconds) {
+    final long tilt = System.currentTimeMillis() + timeoutMilliseconds;
+    final long incrementalWait = jitterInterval(timeoutMilliseconds);
+    final long start = System.currentTimeMillis();
+    for (;;) {
+      // I really do *not* understand why this check is necessary
+      // but it is, at least with JDK 1.6.  According to the source code
+      // and the javadocs, one would think that join() would exit immediately
+      // if the thread is dead.  However, I can tell you from experimentation
+      // that this is not the case. :-(  djp 2008-12-08
+      if (!thread.isAlive()) {
+        break;
+      }
+      try {
+        thread.join(incrementalWait);
+      } catch (InterruptedException e) {
+        fail("interrupted");
+      }
+      if (System.currentTimeMillis() >= tilt) {
+        break;
+      }
+    } // for
+    if (thread.isAlive()) {
+      logger.info("HUNG THREAD");
+      ThreadUtils.dumpStackTrace(thread, thread.getStackTrace());
+      ThreadUtils.dumpMyThreads();
+      thread.interrupt(); // We're in trouble!
+      fail("Thread did not terminate after " + timeoutMilliseconds + " ms: " + thread);
+    }
+    long elapsedMs = (System.currentTimeMillis() - start);
+    if (elapsedMs > 0) {
+      String msg = "Thread " + thread + " took " + elapsedMs + " ms to exit.";
+      logger.info(msg);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/VM.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/VM.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/VM.java
index 0d16196..db3e302 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/VM.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/VM.java
@@ -18,9 +18,9 @@ package com.gemstone.gemfire.test.dunit;
 
 import java.io.File;
 import java.io.PrintWriter;
+import java.io.Serializable;
 import java.io.StringWriter;
 import java.rmi.RemoteException;
-import java.util.concurrent.Callable;
 
 import com.gemstone.gemfire.test.dunit.standalone.BounceResult;
 import com.gemstone.gemfire.test.dunit.standalone.RemoteDUnitVMIF;
@@ -31,9 +31,9 @@ import hydra.MethExecutorResult;
  * This class represents a Java Virtual Machine that runs on a host.
  *
  * @author David Whitlock
- *
  */
-public class VM implements java.io.Serializable {
+@SuppressWarnings("serial")
+public class VM implements Serializable {
 
   /** The host on which this VM runs */
   private Host host;
@@ -53,7 +53,7 @@ public class VM implements java.io.Serializable {
    * Creates a new <code>VM</code> that runs on a given host with a
    * given process id.
    */
-  public VM(Host host, int pid, RemoteDUnitVMIF client) {
+  public VM(final Host host, final int pid, final RemoteDUnitVMIF client) {
     this.host = host;
     this.pid = pid;
     this.client = client;
@@ -83,7 +83,7 @@ public class VM implements java.io.Serializable {
    * <code>void</code> return type in this VM.  If the return type of
    * the method is <code>void</code>, <code>null</code> is returned.
    *
-   * @param c
+   * @param targetClass
    *        The class on which to invoke the method
    * @param methodName
    *        The name of the method to invoke
@@ -92,8 +92,8 @@ public class VM implements java.io.Serializable {
    *         An exception occurred on while invoking the method in
    *         this VM
    */
-  public Object invoke(Class c, String methodName) {
-    return invoke(c, methodName, new Object[0]);
+  public Object invoke(final Class targetClass, final String methodName) {
+    return invoke(targetClass, methodName, new Object[0]);
   }
 
   /**
@@ -102,13 +102,13 @@ public class VM implements java.io.Serializable {
    * return type of the method is <code>void</code>, <code>null</code>
    * is returned.
    *
-   * @param c
+   * @param targetClass
    *        The class on which to invoke the method
    * @param methodName
    *        The name of the method to invoke
    */
-  public AsyncInvocation invokeAsync(Class c, String methodName) {
-    return invokeAsync(c, methodName, null);
+  public AsyncInvocation invokeAsync(final Class targetClass, final String methodName) {
+    return invokeAsync(targetClass, methodName, null);
   }
 
   /**
@@ -116,7 +116,7 @@ public class VM implements java.io.Serializable {
    * <code>void</code> return type in this VM.  If the return type of
    * the method is <code>void</code>, <code>null</code> is returned.
    *
-   * @param c
+   * @param targetClass
    *        The class on which to invoke the method
    * @param methodName
    *        The name of the method to invoke
@@ -128,17 +128,17 @@ public class VM implements java.io.Serializable {
    *         An exception occurred on while invoking the method in
    *         this VM
    */
-  public Object invoke(Class c, String methodName, Object[] args) {
+  public Object invoke(Class targetClass, String methodName, Object[] args) {
     if (!this.available) {
       String s = "VM not available: " + this;
-      throw new RMIException(this, c.getName(), methodName,
+      throw new RMIException(this, targetClass.getName(), methodName,
             new IllegalStateException(s));
     }
     MethExecutorResult result = null;
     int retryCount = 120;
     do {
     try {
-      result = this.client.executeMethodOnClass(c.getName(), methodName, args);
+      result = this.client.executeMethodOnClass(targetClass.getName(), methodName, args);
       break; // out of while loop
     } catch( RemoteException e ) {
       boolean isWindows = false;
@@ -157,7 +157,7 @@ public class VM implements java.io.Serializable {
           }
         }
       } else {
-        throw new RMIException(this, c.getName(), methodName, e );
+        throw new RMIException(this, targetClass.getName(), methodName, e );
       }
     }
     } while (true);
@@ -167,7 +167,7 @@ public class VM implements java.io.Serializable {
 
     } else {
       Throwable thr = result.getException();
-      throw new RMIException(this, c.getName(), methodName, thr,
+      throw new RMIException(this, targetClass.getName(), methodName, thr,
                              result.getStackTrace()); 
     }
   }
@@ -177,7 +177,7 @@ public class VM implements java.io.Serializable {
    * <code>void</code> return type in this VM.  If the return type of
    * the method is <code>void</code>, <code>null</code> is returned.
    *
-   * @param c
+   * @param targetClass
    *        The class on which to invoke the method
    * @param methodName
    *        The name of the method to invoke
@@ -185,13 +185,13 @@ public class VM implements java.io.Serializable {
    *        Arguments passed to the method call (must be {@link
    *        java.io.Serializable}). 
    */
-  public AsyncInvocation invokeAsync(final Class c, 
+  public AsyncInvocation invokeAsync(final Class targetClass, 
                                      final String methodName,
                                      final Object[] args) {
     AsyncInvocation ai =
-      new AsyncInvocation(c, methodName, new Runnable() {
+      new AsyncInvocation(targetClass, methodName, new Runnable() {
         public void run() {
-          final Object o = invoke(c, methodName, args);
+          final Object o = invoke(targetClass, methodName, args);
           AsyncInvocation.setReturnValue(o);
         }
       });
@@ -282,12 +282,14 @@ public class VM implements java.io.Serializable {
   }
   
   /**
-   * Invokes the <code>run</code method of a {@link Runnable} in this
+   * Invokes the <code>run</code> method of a {@link Runnable} in this
    * VM.  If the invocation throws AssertionFailedError, and repeatTimeoutMs
    * is >0, the <code>run</code> method is invoked repeatedly until it
    * either succeeds, or repeatTimeoutMs has passed.  The AssertionFailedError
    * is thrown back to the sender of this method if <code>run</code> has not
    * completed successfully before repeatTimeoutMs has passed.
+   * 
+   * @deprecated Please use {@link com.jayway.awaitility.Awaitility} with {@link #invoke(SerializableCallableIF)} instead.
    */
   public void invokeRepeatingIfNecessary(RepeatableRunnable o, long repeatTimeoutMs) {
     invoke(o, "runRepeatingIfNecessary", new Object[] {new Long(repeatTimeoutMs)});
@@ -374,15 +376,15 @@ public class VM implements java.io.Serializable {
   /**
    * Invokes the <code>main</code> method of a given class
    *
-   * @param c
+   * @param targetClass
    *        The class on which to invoke the <code>main</code> method
    * @param args
    *        The "command line" arguments to pass to the
    *        <code>main</code> method
    */
-  public void invokeMain(Class c, String[] args) {
+  public void invokeMain(Class targetClass, String[] args) {
     Object[] stupid = new Object[] { args };
-    invoke(c, "main", stupid);
+    invoke(targetClass, "main", stupid);
   }
 
   /**
@@ -1342,4 +1344,14 @@ public class VM implements java.io.Serializable {
     return DUnitEnv.get().getWorkingDirectory(this.getPid());
   }
 
+  /** Return the total number of VMs on all hosts */
+  public static int getVMCount() {
+    int count = 0;
+    for (int h = 0; h < Host.getHostCount(); h++) {
+      Host host = Host.getHost(h);
+      count += host.getVMCount();
+    }
+    return count;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Wait.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Wait.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Wait.java
new file mode 100755
index 0000000..3e218df
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/Wait.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+import static org.junit.Assert.fail;
+import static com.gemstone.gemfire.test.dunit.Jitter.*;
+
+import org.apache.logging.log4j.Logger;
+
+import com.gemstone.gemfire.internal.cache.LocalRegion;
+import com.gemstone.gemfire.internal.logging.LogService;
+
+/**
+ * <code>Wait</code> provides static utility methods to wait for some
+ * asynchronous action with intermittent polling.
+ * 
+ * These methods can be used directly: <code>Wait.waitForCriterion(...)</code>, 
+ * however, they are intended to be referenced through static import:
+ *
+ * <pre>
+ * import static com.gemstone.gemfire.test.dunit.Wait.*;
+ *    ...
+ *    waitForCriterion(...);
+ * </pre>
+ *
+ * Extracted from DistributedTestCase.
+ * 
+ * @deprecated Use {@link com.jayway.awaitility.Awaitility} instead.
+ */
+@Deprecated
+public class Wait {
+  
+  private static final Logger logger = LogService.getLogger();
+
+  protected Wait() {
+  }
+  
+  /**
+   * Pause for a default interval (250 milliseconds).
+   *  
+   * @deprecated Please use {@link com.jayway.awaitility.Awaitility} instead.
+   */
+  public static void pause() {
+    pause(250);
+  }
+
+  /**
+   * Pause for the specified milliseconds. Make sure system clock has advanced
+   * by the specified number of millis before returning.
+   * 
+   * @deprecated Please use {@link com.jayway.awaitility.Awaitility} instead.
+   */
+  public static final void pause(final int milliseconds) {
+    if (milliseconds >= 1000 || logger.isDebugEnabled()) { // check for debug but log at info
+      logger.info("Pausing for {} ms...", milliseconds);
+    }
+    final long target = System.currentTimeMillis() + milliseconds;
+    try {
+      for (;;) {
+        long msLeft = target - System.currentTimeMillis();
+        if (msLeft <= 0) {
+          break;
+        }
+        Thread.sleep(msLeft);
+      }
+    }
+    catch (InterruptedException e) {
+      Assert.fail("interrupted", e);
+    }
+  }
+  
+  /**
+   * Wait until given criterion is met
+   * 
+   * @param waitCriterion criterion to wait on
+   * @param timeoutMillis total time to wait, in milliseconds
+   * @param pollingInterval pause interval between waits
+   * @param throwOnTimeout if false, don't generate an error
+   * @deprecated Please use {@link com.jayway.awaitility.Awaitility} instead.
+   */
+  @Deprecated
+  public static void waitForCriterion(final WaitCriterion waitCriterion, final long timeoutMillis, final long pollingInterval, final boolean throwOnTimeout) {
+    long waitThisTime = jitterInterval(pollingInterval);
+    final long tilt = System.currentTimeMillis() + timeoutMillis;
+    for (;;) {
+      if (waitCriterion.done()) {
+        return; // success
+      }
+      if (waitCriterion instanceof StoppableWaitCriterion) {
+        StoppableWaitCriterion ev2 = (StoppableWaitCriterion)waitCriterion;
+        if (ev2.stopWaiting()) {
+          if (throwOnTimeout) {
+            fail("stopWaiting returned true: " + waitCriterion.description());
+          }
+          return;
+        }
+      }
+
+      // Calculate time left
+      long timeLeft = tilt - System.currentTimeMillis();
+      if (timeLeft <= 0) {
+        if (!throwOnTimeout) {
+          return; // not an error, but we're done
+        }
+        fail("Event never occurred after " + timeoutMillis + " ms: " + waitCriterion.description());
+      }
+      
+      if (waitThisTime > timeLeft) {
+        waitThisTime = timeLeft;
+      }
+      
+      // Wait a little bit
+      Thread.yield();
+      try {
+        Thread.sleep(waitThisTime);
+      } catch (InterruptedException e) {
+        fail("interrupted");
+      }
+    }
+  }
+
+  /**
+   * Blocks until the clock used for expiration moves forward.
+   * 
+   * @param cacheTimeMillisSource region that provides cacheTimeMillis
+   * @return the last time stamp observed
+   * @deprecated Please use {@link com.jayway.awaitility.Awaitility} instead.
+   */
+  public static final long waitForExpiryClockToChange(final LocalRegion cacheTimeMillisSource) {
+    return waitForExpiryClockToChange(cacheTimeMillisSource, cacheTimeMillisSource.cacheTimeMillis());
+  }
+
+  /**
+   * Blocks until the clock used for expiration moves forward.
+   * 
+   * @param cacheTimeMillisSource region that provides cacheTimeMillis
+   * @param baseTime the timestamp that the clock must exceed
+   * @return the last time stamp observed
+   * @deprecated Please use {@link com.jayway.awaitility.Awaitility} instead.
+   */
+  public static final long waitForExpiryClockToChange(final LocalRegion cacheTimeMillisSource, final long baseTime) {
+    long nowTime;
+    do {
+      Thread.yield();
+      nowTime = cacheTimeMillisSource.cacheTimeMillis();
+    } while ((nowTime - baseTime) <= 0L);
+    return nowTime;
+  }
+
+  /**
+   * Wait on a mutex.  This is done in a loop in order to address the 
+   * "spurious wakeup" "feature" in Java.
+   * 
+   * @param waitCriterion condition to test
+   * @param mutex object to lock and wait on
+   * @param milliseconds total amount of time to wait
+   * @param pollingInterval interval to pause for the wait
+   * @param throwOnTimeout if false, no error is thrown.
+   * @deprecated Please use {@link com.jayway.awaitility.Awaitility} instead.
+   */
+  public static void waitMutex(final WaitCriterion waitCriterion, final Object mutex, final long milliseconds, final long pollingInterval, final boolean throwOnTimeout) {
+    final long tilt = System.currentTimeMillis() + milliseconds;
+    long waitThisTime = jitterInterval(pollingInterval);
+    synchronized (mutex) {
+      for (;;) {
+        if (waitCriterion.done()) {
+          break;
+        }
+        
+        long timeLeft = tilt - System.currentTimeMillis();
+        if (timeLeft <= 0) {
+          if (!throwOnTimeout) {
+            return; // not an error, but we're done
+          }
+          fail("Event never occurred after " + milliseconds + " ms: " + waitCriterion.description());
+        }
+        
+        if (waitThisTime > timeLeft) {
+          waitThisTime = timeLeft;
+        }
+        
+        try {
+          mutex.wait(waitThisTime);
+        } catch (InterruptedException e) {
+          fail("interrupted");
+        }
+      } // for
+    } // synchronized
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/WaitCriterion.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/WaitCriterion.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/WaitCriterion.java
new file mode 100755
index 0000000..7575f8c
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/WaitCriterion.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit;
+
+/**
+ * Defines an asynchronous criterion to wait for by invoking a method in 
+ * {@link Wait}.
+ *
+ * Extracted from DistributedTestCase.
+ * 
+ * @deprecated Use {@link com.jayway.awaitility.Awaitility} instead.
+ */
+public interface WaitCriterion {
+  
+  public boolean done();
+  
+  public String description();
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedDisconnectRule.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedDisconnectRule.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedDisconnectRule.java
new file mode 100755
index 0000000..125fc06
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedDisconnectRule.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit.rules;
+
+// TODO: import static com.gemstone.gemfire.test.dunit.DistributedTestRule.*;
+
+import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+
+@SuppressWarnings("serial")
+public class DistributedDisconnectRule extends DistributedExternalResource {
+
+  private final boolean disconnectBefore;
+  private final boolean disconnectAfter;
+  private final boolean disconnectBeforeClass;
+  private final boolean disconnectAfterClass;
+  
+  public static Builder builder() {
+    return new Builder();
+  }
+  
+  public DistributedDisconnectRule(final Builder builder) {
+    this(new RemoteInvoker(), builder);
+  }
+   
+  public DistributedDisconnectRule(final RemoteInvoker invoker, final Builder builder) {
+    super(invoker);
+    this.disconnectBeforeClass = builder.disconnectBeforeClass;
+    this.disconnectAfterClass = builder.disconnectAfterClass;
+    this.disconnectBefore = builder.disconnectBefore;
+    this.disconnectAfter = builder.disconnectAfter;
+  }
+
+  @Override
+  protected void before() throws Throwable {
+    if (this.disconnectBefore) {
+      invoker().invokeEverywhere(serializableRunnable());
+    }
+  }
+
+  @Override
+  protected void after() throws Throwable {
+    if (this.disconnectAfter) {
+      invoker().invokeEverywhere(serializableRunnable());
+    }
+  }
+
+  @Override
+  protected void beforeClass() throws Throwable {
+    if (this.disconnectBeforeClass) {
+      invoker().invokeEverywhere(serializableRunnable());
+    }
+  }
+
+  @Override
+  protected void afterClass() throws Throwable {
+    if (this.disconnectAfterClass) {
+      invoker().invokeEverywhere(serializableRunnable());
+    }
+  }
+
+  private static SerializableRunnable serializableRunnable() {
+    return new SerializableRunnable() {
+      @Override
+      public void run() {
+        // TODO: disconnectFromDS();
+      }
+    };
+  }
+  
+  /**
+   * Builds an instance of DistributedDisconnectRule
+   * 
+   * @author Kirk Lund
+   */
+  public static class Builder {
+    private boolean disconnectBeforeClass;
+    private boolean disconnectAfterClass;
+    private boolean disconnectBefore;
+    private boolean disconnectAfter;
+    
+    public Builder() {}
+
+    public Builder disconnectBeforeClass(final boolean disconnectBeforeClass) {
+      this.disconnectBeforeClass = disconnectBeforeClass;
+      return this;
+    }
+    
+    public Builder disconnectBefore(final boolean disconnectBefore) {
+      this.disconnectBefore = disconnectBefore;
+      return this;
+    }
+    
+    public Builder disconnectAfterClass(final boolean disconnectAfterClass) {
+      this.disconnectAfterClass = disconnectAfterClass;
+      return this;
+    }
+    
+    public Builder disconnectAfter(final boolean disconnectAfter) {
+      this.disconnectAfter = disconnectAfter;
+      return this;
+    }
+    
+    public DistributedDisconnectRule build() {
+      return new DistributedDisconnectRule(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedExternalResource.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedExternalResource.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedExternalResource.java
new file mode 100755
index 0000000..d3b7319
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedExternalResource.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit.rules;
+
+import com.gemstone.gemfire.test.junit.rules.SerializableExternalResource;
+
+@SuppressWarnings("serial")
+public class DistributedExternalResource extends SerializableExternalResource {
+
+  private final RemoteInvoker invoker;
+
+  public DistributedExternalResource() {
+    this(new RemoteInvoker());
+  }
+   
+  public DistributedExternalResource(final RemoteInvoker invoker) {
+    super();
+    this.invoker = invoker;
+  }
+
+  protected RemoteInvoker invoker() {
+    return this.invoker;
+  }
+  
+  @Override
+  protected void before() throws Throwable {
+    // do nothing
+  }
+
+  @Override
+  protected void after() throws Throwable {
+    // do nothing
+  }
+
+  @Override
+  protected void beforeClass() throws Throwable {
+    // do nothing
+  }
+
+  @Override
+  protected void afterClass() throws Throwable {
+    // do nothing
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedRestoreSystemProperties.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedRestoreSystemProperties.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedRestoreSystemProperties.java
new file mode 100755
index 0000000..1711b21
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/DistributedRestoreSystemProperties.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit.rules;
+
+import static java.lang.System.getProperties;
+import static java.lang.System.setProperties;
+
+import java.util.Properties;
+
+import org.junit.contrib.java.lang.system.RestoreSystemProperties;
+
+import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.junit.rules.SerializableTestRule;
+
+/**
+ * Distributed version of RestoreSystemProperties which affects all DUnit 
+ * JVMs including the Locator JVM.
+ * 
+ * @author Kirk Lund
+ */
+@SuppressWarnings("serial")
+public class DistributedRestoreSystemProperties extends RestoreSystemProperties implements SerializableTestRule {
+  
+  private static volatile Properties originalProperties;
+
+  private final RemoteInvoker invoker;
+  
+  public DistributedRestoreSystemProperties() {
+   this(new RemoteInvoker());
+  }
+  
+  public DistributedRestoreSystemProperties(final RemoteInvoker invoker) {
+    super();
+    this.invoker = invoker;
+  }
+  
+  @Override
+  protected void before() throws Throwable {
+    super.before();
+    this.invoker.remoteInvokeInEveryVMAndLocator(new SerializableRunnable() {
+      @Override
+      public void run() { 
+        originalProperties = getProperties();
+        setProperties(new Properties(originalProperties));
+      }
+    });
+  }
+
+  @Override
+  protected void after() {
+    super.after();
+    this.invoker.remoteInvokeInEveryVMAndLocator(new SerializableRunnable() {
+      @Override
+      public void run() { 
+        setProperties(originalProperties);
+        originalProperties = null;
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/RemoteInvoker.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/RemoteInvoker.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/RemoteInvoker.java
new file mode 100755
index 0000000..e7e523f
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/rules/RemoteInvoker.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit.rules;
+
+import static com.gemstone.gemfire.test.dunit.Invoke.invokeInEveryVM;
+import static com.gemstone.gemfire.test.dunit.Invoke.invokeInLocator;
+
+import java.io.Serializable;
+
+import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+
+@SuppressWarnings("serial")
+public class RemoteInvoker implements Serializable {
+
+  public void invokeEverywhere(final SerializableRunnable runnable) {
+    runnable.run();
+    invokeInEveryVM(runnable);
+    invokeInLocator(runnable);
+  }
+
+  public void remoteInvokeInEveryVMAndLocator(final SerializableRunnable runnable) {
+    invokeInEveryVM(runnable);
+    invokeInLocator(runnable);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/BasicDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/BasicDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/BasicDUnitTest.java
index 18e5f72..f9d033c 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/BasicDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/BasicDUnitTest.java
@@ -18,6 +18,7 @@ package com.gemstone.gemfire.test.dunit.tests;
 
 import java.util.Properties;
 
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
 import com.gemstone.gemfire.test.dunit.DUnitEnv;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
@@ -117,14 +118,14 @@ public class BasicDUnitTest extends DistributedTestCase {
     ai.join();
     // TODO shouldn't we call fail() here?
     if (ai.exceptionOccurred()) {
-      fail("remoteBind failed", ai.getException());
+      Assert.fail("remoteBind failed", ai.getException());
     }
 
     ai = vm.invokeAsync(this.getClass(), "remoteValidateBind",
                         new Object[] {name, value });
     ai.join();
     if (ai.exceptionOccurred()) {
-      fail("remoteValidateBind failed", ai.getException());
+      Assert.fail("remoteValidateBind failed", ai.getException());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/GetDefaultDiskStoreNameDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/GetDefaultDiskStoreNameDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/GetDefaultDiskStoreNameDUnitTest.java
new file mode 100755
index 0000000..99dcc29
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/GetDefaultDiskStoreNameDUnitTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit.tests;
+
+import static org.assertj.core.api.Assertions.*;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+
+@SuppressWarnings("serial")
+@Category(DistributedTest.class)
+public class GetDefaultDiskStoreNameDUnitTest extends DistributedTestCase {
+
+  public GetDefaultDiskStoreNameDUnitTest(final String name) {
+    super(name);
+  }
+
+  public void testGetTestMethodName() {
+    String expected = createDefaultDiskStoreName(0, -1, "testGetTestMethodName");
+    assertGetDefaultDiskStoreName(expected);
+  }
+  
+  public void testGetTestMethodNameChanges() {
+    String expected = createDefaultDiskStoreName(0, -1, "testGetTestMethodNameChanges");
+    assertGetDefaultDiskStoreName(expected);
+  }
+  
+  public void testGetTestMethodNameInAllVMs() {
+    String expected = createDefaultDiskStoreName(0, -1, "testGetTestMethodNameInAllVMs");
+    assertGetDefaultDiskStoreName(expected);
+    
+    for (int vmIndex = 0; vmIndex < Host.getHost(0).getVMCount(); vmIndex++) {
+      String expectedInVM = createDefaultDiskStoreName(0, vmIndex, "testGetTestMethodNameInAllVMs");
+      Host.getHost(0).getVM(vmIndex).invoke(()->assertGetDefaultDiskStoreName(expectedInVM));
+    }
+  }
+  
+  private void assertGetDefaultDiskStoreName(final String expected) {
+    assertThat(getDefaultDiskStoreName()).isEqualTo(expected);
+  }
+  
+  private String createDefaultDiskStoreName(final int hostIndex, final int vmIndex, final String methodName) {
+    return "DiskStore-" + hostIndex + "-" + vmIndex + "-" + getClass().getCanonicalName() + "." + methodName;
+  }
+  
+  private String getDefaultDiskStoreName() {
+    return GemFireCacheImpl.DEFAULT_DS_NAME; // TODO: not thread safe
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/GetTestMethodNameDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/GetTestMethodNameDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/GetTestMethodNameDUnitTest.java
new file mode 100755
index 0000000..9bad472
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/test/dunit/tests/GetTestMethodNameDUnitTest.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.test.dunit.tests;
+
+import static org.assertj.core.api.Assertions.*;
+
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.junit.categories.DistributedTest;
+
+@SuppressWarnings("serial")
+@Category(DistributedTest.class)
+public class GetTestMethodNameDUnitTest extends DistributedTestCase {
+
+  public GetTestMethodNameDUnitTest(final String name) {
+    super(name);
+  }
+
+  public void testGetTestMethodName() {
+    assertGetTestMethodName("testGetTestMethodName");
+  }
+  
+  public void testGetTestMethodNameChanges() {
+    assertGetTestMethodName("testGetTestMethodNameChanges");
+  }
+  
+  public void testGetTestMethodNameInAllVMs() {
+    assertGetTestMethodName("testGetTestMethodNameInAllVMs");
+    
+    for (int vmIndex = 0; vmIndex < Host.getHost(0).getVMCount(); vmIndex++) {
+      Host.getHost(0).getVM(vmIndex).invoke(()->assertGetTestMethodName("testGetTestMethodNameInAllVMs"));
+    }
+  }
+  
+  private void assertGetTestMethodName(final String expected) {
+    assertThat(getTestMethodName()).isEqualTo(expected);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/hydra/MethExecutor.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/hydra/MethExecutor.java b/gemfire-core/src/test/java/hydra/MethExecutor.java
index c38a803..8aaf3dc 100644
--- a/gemfire-core/src/test/java/hydra/MethExecutor.java
+++ b/gemfire-core/src/test/java/hydra/MethExecutor.java
@@ -147,6 +147,7 @@ public class MethExecutor {
       }
       sb.append("] in class ");
       sb.append(c.getName());
+      sb.append(" methods=" + matchingMethods);
       throw new NoSuchMethodException(sb.toString());
     }
     else return (Method) matchingMethods.get(0);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataDUnitTest.java
index c26282c..5850e6f 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataDUnitTest.java
@@ -38,11 +38,17 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.cache30.CertifiableTestCacheListener;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * This class tests the ContiunousQuery mechanism in GemFire.
@@ -64,7 +70,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     // avoid IllegalStateException from HandShake by connecting all vms tor
     // system before creating ConnectionPools
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -90,7 +96,7 @@ public class CqDataDUnitTest extends CacheTestCase {
 
     final int port = server.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Create client.
     cqDUnitTest.createClient(client, port, host0);
@@ -132,11 +138,11 @@ public class CqDataDUnitTest extends CacheTestCase {
     VM client = host.getVM(3);
     
     //Killing servers can cause this message on the client side.
-    addExpectedException("Could not find any server");
+    IgnoredException.addIgnoredException("Could not find any server");
     cqDUnitTest.createServer(server1);
     
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     
@@ -156,7 +162,7 @@ public class CqDataDUnitTest extends CacheTestCase {
       cqDUnitTest.executeCQ(client, "testCQHAWithState_" + i, false, null);
     }
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     int size = 10;
     
@@ -178,7 +184,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     // Close server1.
     // To maintain the redundancy; it will make connection to endpoint-3.
     cqDUnitTest.closeServer(server1);
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
     
     // UPDATE-1.
@@ -196,12 +202,12 @@ public class CqDataDUnitTest extends CacheTestCase {
     //Stop cq.
     cqDUnitTest.stopCQ(client, "testCQHAWithState_0");
     
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     
     // UPDATE with stop.
     cqDUnitTest.createServer(server3, ports[1]);
     server3.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     
     cqDUnitTest.clearCQListenerEvents(client, "testCQHAWithState_0");
     
@@ -217,7 +223,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     }
     
     cqDUnitTest.executeCQ(client, "testCQHAWithState_0", false, null);
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     
     // Update - 2 
     cqDUnitTest.createValues(server3, cqDUnitTest.regions[0], 10);    
@@ -253,7 +259,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server, 0, true);
     final int port = server.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Create client.
     cqDUnitTest.createClient(client, port, host0);
@@ -328,7 +334,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     /* Create Server and Client */
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     cqDUnitTest.createClient(client1, port, host0);
     cqDUnitTest.createClient(client2, port, host0);
     
@@ -397,7 +403,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server2, 0, false, MirrorType.KEYS);
         
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
       
     cqDUnitTest.createClient(client, port1, host0);
     
@@ -405,7 +411,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     cqDUnitTest.createCQ(client, "testCQWithLoad_0", cqDUnitTest.cqs[0]);  
     cqDUnitTest.executeCQ(client, "testCQWithLoad_0", false, null); 
     
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     
     final int size = 10;
     
@@ -445,7 +451,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     final int evictionThreshold = 1;
     server1.invoke(new CacheSerializableRunnable("Create Cache Server") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Cache Server. ###");
+        LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.DISTRIBUTED_ACK);
         factory.setDataPolicy(DataPolicy.REPLICATE);
@@ -458,22 +464,22 @@ public class CqDataDUnitTest extends CacheTestCase {
         for (int i = 0; i < cqDUnitTest.regions.length; i++) { 
           Region region = createRegion(cqDUnitTest.regions[i], factory.createRegionAttributes());
           // Set CacheListener.
-          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(getLogWriter()));  
+          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(LogWriterUtils.getLogWriter()));  
         } 
-        pause(2000);
+        Wait.pause(2000);
         
         try {
           cqDUnitTest.startBridgeServer(0, true);
         } catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
-        pause(2000);
+        Wait.pause(2000);
 
       }
     });
         
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
       
     cqDUnitTest.createClient(client, port1, host0);
     
@@ -487,7 +493,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     
     cqDUnitTest.executeCQ(client, "testCQWithEviction_0", false, "CqException"); 
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
 
     // Update VALUES.
     cqDUnitTest.createValues(server1, cqDUnitTest.regions[0], size);
@@ -515,7 +521,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server1, 0, false, MirrorType.KEYS_VALUES);
 
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     final String[] regions = cqDUnitTest.regions;
     final int[] serverPorts = new int[] {port1};
@@ -524,7 +530,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     SerializableRunnable createClientWithPool =
       new CacheSerializableRunnable("createClientWithPool") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Client. ###");
+        LogWriterUtils.getLogWriter().info("### Create Client. ###");
         // Initialize CQ Service.
         try {
           getCache().getQueryService();
@@ -538,7 +544,7 @@ public class CqDataDUnitTest extends CacheTestCase {
         ClientServerTestCase.configureConnectionPool(regionFactory, serverHost, serverPorts[0], -1, false, -1, -1, null);
         for (int i=0; i < regions.length; i++) {        
           createRegion(regions[i], regionFactory.create() );
-          getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
+          LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
         }
       }
     };
@@ -573,7 +579,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server1, 0, false, MirrorType.KEYS_VALUES);
 
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     final String[] regions = cqDUnitTest.regions;
     final int[] serverPorts = new int[] {port1};
@@ -582,7 +588,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     SerializableRunnable createClientWithPool =
       new CacheSerializableRunnable("createClientWithPool") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Client. ###");
+        LogWriterUtils.getLogWriter().info("### Create Client. ###");
         //Region region1 = null;
         // Initialize CQ Service.
         try {
@@ -598,7 +604,7 @@ public class CqDataDUnitTest extends CacheTestCase {
 
         for (int i=0; i < regions.length; i++) {        
           createRegion(regions[i], regionFactory.createRegionAttributes());
-          getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
+          LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
         }
       }
     };
@@ -628,7 +634,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server1, 0, false, MirrorType.KEYS_VALUES);
 
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     final String[] regions = cqDUnitTest.regions;
     final int[] serverPorts = new int[] {port1};
@@ -637,7 +643,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     SerializableRunnable createClientWithConnectionPool =
       new CacheSerializableRunnable("createClientWithConnectionPool") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Client. ###");
+        LogWriterUtils.getLogWriter().info("### Create Client. ###");
         //Region region1 = null;
         // Initialize CQ Service.
         try {
@@ -652,7 +658,7 @@ public class CqDataDUnitTest extends CacheTestCase {
         ClientServerTestCase.configureConnectionPool(regionFactory, serverHost, serverPorts[0], -1, true, -1, -1, null);
         for (int i=0; i < regions.length; i++) {        
           createRegion(regions[i], regionFactory.createRegionAttributes());
-          getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
+          LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
         }
       }
     };
@@ -682,7 +688,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server1, 0, false, MirrorType.KEYS_VALUES);
 
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String serverHost = getServerHostName(server1.getHost());
+    final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
 
     final String[] regions = cqDUnitTest.regions;
     final int[] serverPorts = new int[] {port1};
@@ -691,7 +697,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     SerializableRunnable createClientWithPool =
       new CacheSerializableRunnable("createClientWithPool") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Client. ###");
+        LogWriterUtils.getLogWriter().info("### Create Client. ###");
         //Region region1 = null;
         // Initialize CQ Service.
         try {
@@ -708,7 +714,7 @@ public class CqDataDUnitTest extends CacheTestCase {
 
         for (int i=0; i < regions.length; i++) {        
           createRegion(regions[i], regionFactory.createRegionAttributes());
-          getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
+          LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
         }
       }
     };
@@ -747,7 +753,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     cqDUnitTest.createClient(client, port, host0);
     
@@ -762,7 +768,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     // Test for Event on Region Clear.
     server.invoke(new CacheSerializableRunnable("testRegionEvents"){
       public void run2()throws CacheException {
-        getLogWriter().info("### Clearing the region on the server ###");
+        LogWriterUtils.getLogWriter().info("### Clearing the region on the server ###");
         Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
         for (int i = 1; i <=5; i++) {
           region.put(CqQueryDUnitTest.KEY+i, new Portfolio(i));
@@ -776,7 +782,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     // Test for Event on Region invalidate.
     server.invoke(new CacheSerializableRunnable("testRegionEvents"){
       public void run2()throws CacheException {
-        getLogWriter().info("### Invalidate the region on the server ###");
+        LogWriterUtils.getLogWriter().info("### Invalidate the region on the server ###");
         Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[0]);
         for (int i = 1; i <=5; i++) {
           region.put(CqQueryDUnitTest.KEY+i, new Portfolio(i));
@@ -790,7 +796,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     // Test for Event on Region destroy.
     server.invoke(new CacheSerializableRunnable("testRegionEvents"){
       public void run2()throws CacheException {
-        getLogWriter().info("### Destroying the region on the server ###");
+        LogWriterUtils.getLogWriter().info("### Destroying the region on the server ###");
         Region region = getCache().getRegion("/root/" + cqDUnitTest.regions[1]);
         for (int i = 1; i <=5; i++) {
           region.put(CqQueryDUnitTest.KEY+i, new Portfolio(i));
@@ -800,7 +806,7 @@ public class CqDataDUnitTest extends CacheTestCase {
       }
     });
 
-    pause(1000); // wait for cq to close becuse of region destroy on server.
+    Wait.pause(1000); // wait for cq to close becuse of region destroy on server.
     //cqDUnitTest.waitForClose(client,"testRegionEvents_1");
     cqDUnitTest.validateCQCount(client,1);
 
@@ -828,7 +834,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     final String cqName = "testEventsDuringQueryExecution_0";
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     // Initialize Client.
     cqDUnitTest.createClient(client, port, host0);
@@ -937,7 +943,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     });
 
     //wait for 60 seconds for test to complete
-    DistributedTestCase.join(processCqs, 60 * 1000, getLogWriter());
+    ThreadUtils.join(processCqs, 60 * 1000);
     
     // Close.
     cqDUnitTest.closeClient(client);
@@ -968,7 +974,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server);
     final int port = server.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Initialize Client.
     cqDUnitTest.createClient(client, port, host0);
@@ -1113,7 +1119,7 @@ public class CqDataDUnitTest extends CacheTestCase {
     });
 
     //wait for 60 seconds for test to complete
-    DistributedTestCase.join(processCqs, 60 * 1000, getLogWriter());
+    ThreadUtils.join(processCqs, 60 * 1000);
     // Close.
     cqDUnitTest.closeClient(client);
     cqDUnitTest.closeServer(server);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataOptimizedExecuteDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataOptimizedExecuteDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataOptimizedExecuteDUnitTest.java
index 45f61f8..e185af1 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataOptimizedExecuteDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqDataOptimizedExecuteDUnitTest.java
@@ -18,6 +18,8 @@ package com.gemstone.gemfire.cache.query.cq.dunit;
 
 
 import com.gemstone.gemfire.cache.query.internal.cq.CqServiceImpl;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -34,8 +36,8 @@ public class CqDataOptimizedExecuteDUnitTest extends CqDataDUnitTest{
     super.setUp();
     //We're seeing this on the server when the client
     //disconnects.
-    addExpectedException("Connection reset");
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    IgnoredException.addIgnoredException("Connection reset");
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = false;
       }
@@ -43,12 +45,11 @@ public class CqDataOptimizedExecuteDUnitTest extends CqDataDUnitTest{
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = true;
       }
     });
-    super.tearDown2();
   }
 }


[23/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DestroyEntryPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DestroyEntryPropagationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DestroyEntryPropagationDUnitTest.java
index ba8ee2e..5ba03bd 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DestroyEntryPropagationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DestroyEntryPropagationDUnitTest.java
@@ -38,9 +38,14 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EventID;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CertifiableTestCacheListener;
 import com.gemstone.gemfire.cache.client.*;
@@ -100,9 +105,9 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
     PORT2 =  ((Integer)vm1.invoke(DestroyEntryPropagationDUnitTest.class, "createServerCache" )).intValue();
 
     vm2.invoke(DestroyEntryPropagationDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(Host.getHost(0)), new Integer(PORT1),new Integer(PORT2)});
+        new Object[] { NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1),new Integer(PORT2)});
     vm3.invoke(DestroyEntryPropagationDUnitTest.class, "createClientCache",
-        new Object[] { getServerHostName(Host.getHost(0)), new Integer(PORT1),new Integer(PORT2)});
+        new Object[] { NetworkUtils.getServerHostName(Host.getHost(0)), new Integer(PORT1),new Integer(PORT2)});
 
   }
 
@@ -221,7 +226,7 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, maxWaitTime, 200, true);
+        Wait.waitForCriterion(ev, maxWaitTime, 200, true);
       }
     });
 
@@ -245,7 +250,7 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
             return null;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, maxWaitTime, 200, true);
+        Wait.waitForCriterion(ev, maxWaitTime, 200, true);
       }
     });
 
@@ -296,10 +301,10 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
   {
     try {
       Iterator iter = cache.getCacheServers().iterator();
-      getLogWriter().fine ("Asif: servers running = "+cache.getCacheServers().size());
+      LogWriterUtils.getLogWriter().fine ("Asif: servers running = "+cache.getCacheServers().size());
       if (iter.hasNext()) {
         CacheServer server = (CacheServer)iter.next();
-        getLogWriter().fine("asif : server running on port="+server.getPort()+ " asked to kill serevre onport="+port);
+        LogWriterUtils.getLogWriter().fine("asif : server running on port="+server.getPort()+ " asked to kill serevre onport="+port);
          if(port.intValue() == server.getPort()){
          server.stop();
         }
@@ -342,7 +347,7 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
       assertEquals(r1.getEntry("key2").getValue(), "key-2");
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -359,7 +364,7 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
       r.destroy("key2");
     }
     catch (Exception ex) {
-      fail("failed while destroyEntry()", ex);
+      Assert.fail("failed while destroyEntry()", ex);
     }
   }
 
@@ -372,7 +377,7 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
       assertNotNull(r.getEntry("key2"));
     }
     catch (Exception ex) {
-      fail("failed while verifyDestroyEntry in C1", ex);
+      Assert.fail("failed while verifyDestroyEntry in C1", ex);
     }
   }
 
@@ -386,7 +391,7 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
       assertNull(r.getEntry("key2"));
     }
     catch (Exception ex) {
-      fail("failed while verifyDestroyEntry in C1", ex);
+      Assert.fail("failed while verifyDestroyEntry in C1", ex);
     }
   }
 
@@ -400,7 +405,7 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
       assertNotNull(r.getEntry("key2"));
     }
     catch (Exception ex) {
-      fail("failed while verifyDestroyEntry for key1", ex);
+      Assert.fail("failed while verifyDestroyEntry for key1", ex);
     }
   }
 
@@ -414,7 +419,7 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
         return "waiting for destroy event for " + key;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 10 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 10 * 1000, 200, true);
     ccl.destroys.remove(key);
   }
 
@@ -447,7 +452,7 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setPoolName(p.getName());
-    factory.setCacheListener(new CertifiableTestCacheListener(getLogWriter()));
+    factory.setCacheListener(new CertifiableTestCacheListener(LogWriterUtils.getLogWriter()));
     RegionAttributes attrs = factory.create();
     cache.createRegion(REGION_NAME, attrs);
 
@@ -459,7 +464,7 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
     AttributesFactory factory = new AttributesFactory();
     factory.setScope(Scope.DISTRIBUTED_ACK);
     factory.setDataPolicy(DataPolicy.REPLICATE);
-    factory.setCacheListener(new CertifiableTestCacheListener(getLogWriter()));
+    factory.setCacheListener(new CertifiableTestCacheListener(LogWriterUtils.getLogWriter()));
     RegionAttributes attrs = factory.create();
     cache.createRegion(REGION_NAME, attrs);
 
@@ -482,7 +487,7 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while registering interest", ex);
+      Assert.fail("failed while registering interest", ex);
     }
   }
 
@@ -494,15 +499,13 @@ public class DestroyEntryPropagationDUnitTest extends DistributedTestCase
     }
   }
 
-  public void tearDown2() throws Exception
-  {
+  @Override
+  protected final void preTearDown() throws Exception {
     //close client
     vm2.invoke(DestroyEntryPropagationDUnitTest.class, "closeCache");
     vm3.invoke(DestroyEntryPropagationDUnitTest.class, "closeCache");
     //close server
     vm0.invoke(DestroyEntryPropagationDUnitTest.class, "closeCache");
     vm1.invoke(DestroyEntryPropagationDUnitTest.class, "closeCache");
-
   }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientBug39997DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientBug39997DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientBug39997DUnitTest.java
index d40563b..fb8fb3e 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientBug39997DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientBug39997DUnitTest.java
@@ -30,9 +30,13 @@ import com.gemstone.gemfire.cache.client.internal.PoolImpl;
 import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class DurableClientBug39997DUnitTest extends CacheTestCase {
 
@@ -47,7 +51,7 @@ public class DurableClientBug39997DUnitTest extends CacheTestCase {
     VM vm0 = host.getVM(0);
     VM vm1 = host.getVM(1);
 
-    final String hostName = getServerHostName(host);
+    final String hostName = NetworkUtils.getServerHostName(host);
     final int port = AvailablePortHelper.getRandomAvailableTCPPort();
     vm0.invoke(new SerializableRunnable("create cache") {
       public void run() {
@@ -84,7 +88,7 @@ public class DurableClientBug39997DUnitTest extends CacheTestCase {
         try {
           server.start();
         } catch (IOException e) {
-          fail("couldn't start server", e);
+          Assert.fail("couldn't start server", e);
         }
       }
     });
@@ -93,7 +97,7 @@ public class DurableClientBug39997DUnitTest extends CacheTestCase {
       public void run() {
         Cache cache = getCache();
         final Region region = cache.getRegion("region");
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
 
           public String description() {
             return "Wait for register interest to succeed";

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientQueueSizeDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientQueueSizeDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientQueueSizeDUnitTest.java
index 3e23ab5..dbe2355 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientQueueSizeDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientQueueSizeDUnitTest.java
@@ -35,7 +35,9 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -85,11 +87,12 @@ public class DurableClientQueueSizeDUnitTest extends DistributedTestCase {
         "createCacheServer", new Object[] { });
     port1 = (Integer) vm1.invoke(DurableClientQueueSizeDUnitTest.class,
         "createCacheServer", new Object[] { });
-    addExpectedException("java.net.SocketException");
-    addExpectedException("Unexpected IOException");
+    IgnoredException.addIgnoredException("java.net.SocketException");
+    IgnoredException.addIgnoredException("Unexpected IOException");
   }
 
-  public void tearDown2() throws Exception {
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
 
     vm2.invoke(DurableClientQueueSizeDUnitTest.class, "closeCache");
@@ -275,7 +278,7 @@ public class DurableClientQueueSizeDUnitTest extends DistributedTestCase {
   public static Integer createCacheServer(Integer serverPort)
       throws Exception {
     Properties props = new Properties();
-    props.setProperty("locators", "localhost["+getDUnitLocatorPort()+"]");
+    props.setProperty("locators", "localhost["+DistributedTestUtils.getDUnitLocatorPort()+"]");
 //    props.setProperty("log-level", "fine");
 //    props.setProperty("log-file", "server_" + OSProcess.getId() + ".log");
 //    props.setProperty("statistic-archive-file", "server_" + OSProcess.getId()

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientReconnectAutoDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientReconnectAutoDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientReconnectAutoDUnitTest.java
index 5e015ea..f9e7d87 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientReconnectAutoDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientReconnectAutoDUnitTest.java
@@ -19,7 +19,9 @@ package com.gemstone.gemfire.internal.cache.tier.sockets;
 import com.gemstone.gemfire.cache.client.PoolFactory;
 import com.gemstone.gemfire.cache.client.PoolManager;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.DistributedTestUtils;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 
 /**
  * @author dsmith
@@ -50,7 +52,7 @@ public class DurableClientReconnectAutoDUnitTest extends
   protected PoolFactory getPoolFactory() {
     Host host = Host.getHost(0);
     PoolFactory factory = PoolManager.createFactory()
-    .addLocator(getServerHostName(host), getDUnitLocatorPort());
+    .addLocator(NetworkUtils.getServerHostName(host), DistributedTestUtils.getDUnitLocatorPort());
     return factory;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientReconnectDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientReconnectDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientReconnectDUnitTest.java
index 309f44a..6ade4bd 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientReconnectDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientReconnectDUnitTest.java
@@ -43,9 +43,15 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.ServerLocation;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 
 /**      
@@ -104,10 +110,10 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     PORT2 =  ((Integer) server2.invoke(DurableClientReconnectDUnitTest.class, "createServerCache"));
     PORT3 =  ((Integer) server3.invoke(DurableClientReconnectDUnitTest.class, "createServerCache"));
     PORT4 =  ((Integer) server4.invoke(DurableClientReconnectDUnitTest.class, "createServerCache"));
-    SERVER1 = getServerHostName(host)+PORT1;
-    SERVER2 = getServerHostName(host)+PORT2;
-    SERVER3 = getServerHostName(host)+PORT3;
-    SERVER4 = getServerHostName(host)+PORT4;
+    SERVER1 = NetworkUtils.getServerHostName(host)+PORT1;
+    SERVER2 = NetworkUtils.getServerHostName(host)+PORT2;
+    SERVER3 = NetworkUtils.getServerHostName(host)+PORT3;
+    SERVER4 = NetworkUtils.getServerHostName(host)+PORT4;
     
     //CacheServerTestUtil.disableShufflingOfEndpoints();
     System.setProperty("gemfire.bridge.disableShufflingOfEndpoints", "false");
@@ -115,7 +121,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
   }
   public void testDurableReconnectSingleServer() throws Exception
   {
-    createCacheClientAndConnectToSingleServer(getServerHostName(Host.getHost(0)), 0);
+    createCacheClientAndConnectToSingleServer(NetworkUtils.getServerHostName(Host.getHost(0)), 0);
     List redundantServers = pool.getRedundantNames();    
     String primaryName = pool.getPrimaryName();
     assertTrue(redundantServers.isEmpty());
@@ -123,9 +129,9 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     
     //Wait for server to cleanup client resources
     //temporary fix for bug 38345.
-    pause(2000);
+    Wait.pause(2000);
     
-    createCacheClientAndConnectToSingleServer(getServerHostName(Host.getHost(0)), 0);
+    createCacheClientAndConnectToSingleServer(NetworkUtils.getServerHostName(Host.getHost(0)), 0);
     List redundantServers2 = pool.getRedundantNames();
     String primaryName2 = pool.getPrimaryName();
     assertTrue(redundantServers2.isEmpty());
@@ -133,13 +139,13 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
   }
   public void testDurableReconnectSingleServerWithZeroConnPerServer() throws Exception
   {
-    createCacheClientAndConnectToSingleServerWithZeroConnPerServer(getServerHostName(Host.getHost(0)), 0);
+    createCacheClientAndConnectToSingleServerWithZeroConnPerServer(NetworkUtils.getServerHostName(Host.getHost(0)), 0);
     List redundantServers = pool.getRedundantNames();
     String primaryName = pool.getPrimaryName();
     assertTrue(redundantServers.isEmpty());
     closeCache(true);
     
-    createCacheClientAndConnectToSingleServerWithZeroConnPerServer(getServerHostName(Host.getHost(0)), 0);
+    createCacheClientAndConnectToSingleServerWithZeroConnPerServer(NetworkUtils.getServerHostName(Host.getHost(0)), 0);
     List redundantServers2 = pool.getRedundantNames();
     String primaryName2 = pool.getPrimaryName();
     assertTrue(redundantServers2.isEmpty());
@@ -155,7 +161,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     
     //Wait for server to cleanup client resources
     //temporary fix for bug 38345.
-    pause(2000);
+    Wait.pause(2000);
     
     createCacheClient(0);
     List redundantServers2 = pool.getRedundantNames();
@@ -182,7 +188,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     
     //Wait for server to cleanup client resources
     //temporary fix for bug 38345.
-    pause(2000);
+    Wait.pause(2000);
     
     createCacheClient();
     
@@ -211,7 +217,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     
     //Wait for server to cleanup client resources
     //temporary fix for bug 38345.
-    pause(2000);
+    Wait.pause(2000);
     
     createCacheClient();
     
@@ -333,7 +339,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     String rServer2 = (String)serverArray[1];
 
     // can see sporadic socket closed exceptions
-    final ExpectedException expectedEx = addExpectedException(
+    final IgnoredException expectedEx = IgnoredException.addIgnoredException(
         SocketException.class.getName());
 
     instance.closeServer(rServer1);    
@@ -376,51 +382,51 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     instance.determineAndVerfiyRedundantServers(redundantServers);
     instance.determineAndVerfiyNonRedundantServers(redundantServers);
     
-    getLogWriter().info("TEST - Durable client initialially has servers " + redundantServers);
+    LogWriterUtils.getLogWriter().info("TEST - Durable client initialially has servers " + redundantServers);
      
-    getLogWriter().info("TEST - Closing durable client for the first time");
+    LogWriterUtils.getLogWriter().info("TEST - Closing durable client for the first time");
     // Stop the durable client
     closeCache(true);
     
-    getLogWriter().info("TEST - Durable client closed for the first time");
+    LogWriterUtils.getLogWriter().info("TEST - Durable client closed for the first time");
     
     //Wait for server to cleanup client resources
     //temporary fix for bug 38345.
-    pause(2000);
+    Wait.pause(2000);
     
-    getLogWriter().info("TEST - Creating the durable client with one fewer servers");
+    LogWriterUtils.getLogWriter().info("TEST - Creating the durable client with one fewer servers");
     //We recreate the durable client, but this
     //Time we won't have it create any queues
     createCacheClient(2, 20, false);
     
     HashSet redundantServers2 = new HashSet(pool.getRedundantNames());
     redundantServers2.add(pool.getPrimaryName());
-    getLogWriter().info("TEST - Durable client created again, now with servers " + redundantServers2);
+    LogWriterUtils.getLogWriter().info("TEST - Durable client created again, now with servers " + redundantServers2);
     Host host = Host.getHost(0);
     //Make sure we create client to server connections to all of the servers 
-    pool.acquireConnection(new ServerLocation(getServerHostName(host), PORT1.intValue()));
-    pool.acquireConnection(new ServerLocation(getServerHostName(host), PORT2.intValue()));
-    pool.acquireConnection(new ServerLocation(getServerHostName(host), PORT3.intValue()));
-    pool.acquireConnection(new ServerLocation(getServerHostName(host), PORT4.intValue()));
+    pool.acquireConnection(new ServerLocation(NetworkUtils.getServerHostName(host), PORT1.intValue()));
+    pool.acquireConnection(new ServerLocation(NetworkUtils.getServerHostName(host), PORT2.intValue()));
+    pool.acquireConnection(new ServerLocation(NetworkUtils.getServerHostName(host), PORT3.intValue()));
+    pool.acquireConnection(new ServerLocation(NetworkUtils.getServerHostName(host), PORT4.intValue()));
     
-    getLogWriter().info("TEST - All pool connections are now aquired");
+    LogWriterUtils.getLogWriter().info("TEST - All pool connections are now aquired");
     
     closeCache(true);
     
-    getLogWriter().info("TEST - closed durable client for the second time");
+    LogWriterUtils.getLogWriter().info("TEST - closed durable client for the second time");
     
   //Wait for server to cleanup client resources
     //temporary fix for bug 38345.
-    pause(2000);
+    Wait.pause(2000);
     
-    getLogWriter().info("TEST - creating durable client for the third time");
+    LogWriterUtils.getLogWriter().info("TEST - creating durable client for the third time");
     //Now we should connect to all of the servers we were originally connected to
     createCacheClient(2, 20);
     
     HashSet redundantServersAfterReconnect = new HashSet(pool.getRedundantNames());
     redundantServersAfterReconnect.add(pool.getPrimaryName());
     
-    getLogWriter().info("TEST - durable client created for the third time, now with servers " + redundantServersAfterReconnect);
+    LogWriterUtils.getLogWriter().info("TEST - durable client created for the third time, now with servers " + redundantServersAfterReconnect);
     
     instance.determineAndVerfiyRedundantServers(redundantServersAfterReconnect);
     instance.determineAndVerfiyNonRedundantServers(redundantServersAfterReconnect);
@@ -428,9 +434,9 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     assertEquals(redundantServers, redundantServersAfterReconnect);
     
     //Now we wait to make sure the durable client expiration task isn't fired.
-    pause(25000);
+    Wait.pause(25000);
     
-    getLogWriter().info("TEST - Finished waiting for durable client expiration task");
+    LogWriterUtils.getLogWriter().info("TEST - Finished waiting for durable client expiration task");
     
     redundantServersAfterReconnect = new HashSet(pool.getRedundantNames());
     redundantServersAfterReconnect.add(pool.getPrimaryName());
@@ -450,7 +456,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
         assertTrue(redundantServersAfterReconnect.contains(endpointName));
       }      
     }catch (Exception e){
-      fail("test failed due to" , e);
+      Assert.fail("test failed due to" , e);
     }    
   }
   
@@ -458,7 +464,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     try{
       checkNumberOfClientProxies(0);
     }catch (Exception e){
-      fail("test failed due to" , e);
+      Assert.fail("test failed due to" , e);
     }    
   }
   
@@ -536,7 +542,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     assertEquals("DurableClientReconnectDUnitTest_client", proxy.getDurableId());
 //    assertEquals(60, proxy.getDurableTimeout());
     }catch (Exception e){
-      fail("test failed due to" , e);
+      Assert.fail("test failed due to" , e);
     }    
   }
   
@@ -563,7 +569,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 15 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 15 * 1000, 200, true);
   }
   
   protected static int getNumberOfClientProxies() {
@@ -585,7 +591,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     cache = CacheFactory.create(ds);
     assertNotNull(cache);    
   } catch(Exception e){
-    fail("test failed due to " , e ); 
+    Assert.fail("test failed due to " , e ); 
   }
   }
 
@@ -597,10 +603,10 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
   protected PoolFactory getPoolFactory() {
     Host host = Host.getHost(0);
     PoolFactory factory = PoolManager.createFactory()
-    .addServer(getServerHostName(host), PORT1.intValue())
-    .addServer(getServerHostName(host), PORT2.intValue())
-    .addServer(getServerHostName(host), PORT3.intValue())
-    .addServer(getServerHostName(host), PORT4.intValue());
+    .addServer(NetworkUtils.getServerHostName(host), PORT1.intValue())
+    .addServer(NetworkUtils.getServerHostName(host), PORT2.intValue())
+    .addServer(NetworkUtils.getServerHostName(host), PORT3.intValue())
+    .addServer(NetworkUtils.getServerHostName(host), PORT4.intValue());
     return factory;
   }
   
@@ -641,7 +647,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     cache.readyForEvents();
     
     }catch(Exception e){
-      fail("test failed due to " , e );
+      Assert.fail("test failed due to " , e );
     }
     
   }
@@ -676,7 +682,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
     cache.readyForEvents();
     
     }catch(Exception e){
-      fail("test failed due to " , e );
+      Assert.fail("test failed due to " , e );
     }    
   }
 
@@ -711,7 +717,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
       cache.readyForEvents();
       
       }catch(Exception e){
-        fail("test failed due to " , e );
+        Assert.fail("test failed due to " , e );
       }    
   }
 
@@ -726,8 +732,7 @@ public class DurableClientReconnectDUnitTest extends DistributedTestCase
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void preTearDown() throws Exception {
     // close the clients first
     closeCache();
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientStatsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientStatsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientStatsDUnitTest.java
index 08eaa9d..97cb31c 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientStatsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableClientStatsDUnitTest.java
@@ -30,9 +30,12 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CacheServerImpl;
 import com.gemstone.gemfire.cache.client.*;
 import com.gemstone.gemfire.internal.cache.PoolFactoryImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * 
@@ -77,7 +80,7 @@ public class DurableClientStatsDUnitTest extends DistributedTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDown() throws Exception {
     // Stop server 1
     this.server1VM.invoke(CacheServerTestUtil.class, "closeCache");
     CacheServerTestUtil.resetDisableShufflingOfEndpointsFlag();
@@ -99,23 +102,23 @@ public class DurableClientStatsDUnitTest extends DistributedTestCase {
 
     startAndCloseNonDurableClientCache(durableClientTimeout);
     startAndCloseNonDurableClientCache(1);      //////// -> Reconnection1
-    pause(1400);        //////// -> Queue Dropped1
+    Wait.pause(1400);        //////// -> Queue Dropped1
     startAndCloseNonDurableClientCache(1);
-    pause(1400);        //////// -> Queue Dropped2
+    Wait.pause(1400);        //////// -> Queue Dropped2
     
     startRegisterAndCloseNonDurableClientCache( durableClientTimeout);
-    pause(500);
+    Wait.pause(500);
 
     this.server1VM.invoke(DurableClientStatsDUnitTest.class, "putValue",
         new Object[] { K1, "Value1" });         //////// -> Enqueue Message1
 
-    pause(500);
+    Wait.pause(500);
     startAndCloseNonDurableClientCache(1);      //////// -> Reconnection2
-    pause(1400);        //////// -> Queue Dropped3
+    Wait.pause(1400);        //////// -> Queue Dropped3
     startAndCloseNonDurableClientCache(1);
-    pause(1400);        //////// -> Queue Dropped4
+    Wait.pause(1400);        //////// -> Queue Dropped4
     startRegisterAndCloseNonDurableClientCache( durableClientTimeout);
-    pause(500);
+    Wait.pause(500);
 
     this.server1VM.invoke(DurableClientStatsDUnitTest.class, "putValue",
         new Object[] { K1, "NewValue1" });      //////// -> Enqueue Message2
@@ -143,23 +146,23 @@ public class DurableClientStatsDUnitTest extends DistributedTestCase {
 
     startAndCloseDurableClientCache(durableClientTimeout);
     startAndCloseDurableClientCache(1);      //////// -> Reconnection1
-    pause(1400);        //////// -> Queue Dropped1
+    Wait.pause(1400);        //////// -> Queue Dropped1
     startAndCloseDurableClientCache(1);
-    pause(1400);        //////// -> Queue Dropped2
+    Wait.pause(1400);        //////// -> Queue Dropped2
     
     startRegisterAndCloseDurableClientCache( durableClientTimeout);
-    pause(500);
+    Wait.pause(500);
 
     this.server1VM.invoke(DurableClientStatsDUnitTest.class, "putValue",
         new Object[] { K1, "Value1" });         //////// -> Enqueue Message1
 
-    pause(500);
+    Wait.pause(500);
     startAndCloseDurableClientCache(1);      //////// -> Reconnection2
-    pause(1400);        //////// -> Queue Dropped3
+    Wait.pause(1400);        //////// -> Queue Dropped3
     startAndCloseDurableClientCache(1);
-    pause(1400);        //////// -> Queue Dropped4
+    Wait.pause(1400);        //////// -> Queue Dropped4
     startRegisterAndCloseDurableClientCache( durableClientTimeout);
-    pause(500);
+    Wait.pause(500);
 
     this.server1VM.invoke(DurableClientStatsDUnitTest.class, "putValue",
         new Object[] { K1, "NewValue1" });      //////// -> Enqueue Message2
@@ -178,7 +181,7 @@ public class DurableClientStatsDUnitTest extends DistributedTestCase {
 
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, true, 0),
+            getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, true, 0),
             regionName,
             getDurableClientDistributedSystemProperties(durableClientId,
                 durableClientTimeout), Boolean.TRUE });
@@ -203,7 +206,7 @@ public class DurableClientStatsDUnitTest extends DistributedTestCase {
 
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, true, 0),
+            getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, true, 0),
             regionName,
             getNonDurableClientDistributedSystemProperties(durableClientId,
                 durableClientTimeout), Boolean.TRUE });
@@ -228,7 +231,7 @@ public class DurableClientStatsDUnitTest extends DistributedTestCase {
 
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, true, 0),
+            getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, true, 0),
             regionName,
             getDurableClientDistributedSystemProperties(durableClientId,
                 durableClientTimeout), Boolean.TRUE });
@@ -252,7 +255,7 @@ public class DurableClientStatsDUnitTest extends DistributedTestCase {
 
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, true, 0),
+            getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, true, 0),
             regionName,
             getNonDurableClientDistributedSystemProperties(durableClientId,
                 durableClientTimeout), Boolean.TRUE });
@@ -336,7 +339,7 @@ public class DurableClientStatsDUnitTest extends DistributedTestCase {
       region.registerInterest(key, InterestResultPolicy.NONE, isDurable);
     }
     catch (Exception ex) {
-      fail("failed while registering interest in registerKey function", ex);
+      Assert.fail("failed while registering interest in registerKey function", ex);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableRegistrationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableRegistrationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableRegistrationDUnitTest.java
index f2ef6d1..141bb0b 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableRegistrationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableRegistrationDUnitTest.java
@@ -39,9 +39,14 @@ import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.cache.client.*;
 import com.gemstone.gemfire.cache.query.data.Portfolio;
 import com.gemstone.gemfire.internal.cache.PoolFactoryImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * 
@@ -124,7 +129,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     // seconds
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 0),
+            getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 0),
             regionName,
             getClientDistributedSystemProperties(durableClientId,
                 durableClientTimeout), Boolean.TRUE });
@@ -162,7 +167,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     this.server2VM.invoke(DurableRegistrationDUnitTest.class, "putValue",
         new Object[] { K4, "Value4" });
 
-    pause(1000);
+    Wait.pause(1000);
     // Step 5: Verify Updates on the Client
 
     assertEquals("Value1", this.server2VM.invoke(
@@ -197,7 +202,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     // Step 8: Re-start the Client
     this.durableClientVM
         .invoke(CacheServerTestUtil.class, "createCacheClient",
-            new Object[] { getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 0),
+            new Object[] { getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 0),
                 regionName,
                 getClientDistributedSystemProperties(durableClientId),
                 Boolean.TRUE });
@@ -210,7 +215,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
       }
     });
 
-    pause(5000);
+    Wait.pause(5000);
 
     assertNull(this.durableClientVM.invoke(DurableRegistrationDUnitTest.class,
         "getValue", new Object[] { K1 }));
@@ -220,7 +225,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     this.durableClientVM.invoke(DurableRegistrationDUnitTest.class,
         "registerKey", new Object[] { K1, new Boolean(false) });
 
-    pause(5000);
+    Wait.pause(5000);
     assertNull(this.durableClientVM.invoke(DurableRegistrationDUnitTest.class,
         "getValue", new Object[] { K1 }));
     assertNull(this.durableClientVM.invoke(DurableRegistrationDUnitTest.class,
@@ -235,7 +240,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     this.server2VM.invoke(DurableRegistrationDUnitTest.class, "putValue",
         new Object[] { K4, "PingPong_updated_4" });
 
-    pause(5000);
+    Wait.pause(5000);
 
     // Step 9: Verify Updates on the Client
     assertEquals("PingPong_updated_1", this.durableClientVM.invoke(
@@ -276,7 +281,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     final int durableClientTimeout = 600;
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 0),
+            getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 0),
             regionName,
             getClientDistributedSystemProperties(durableClientId,
                 durableClientTimeout) });
@@ -314,7 +319,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     this.server2VM.invoke(DurableRegistrationDUnitTest.class, "putValue",
         new Object[] { K4, "Value4" });
 
-    pause(1000);
+    Wait.pause(1000);
     // Step 5: Verify Updates on the Client
 
     assertEquals("Value1", this.server2VM.invoke(
@@ -349,7 +354,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     // Step 8: Re-start the Client
     this.durableClientVM
         .invoke(CacheServerTestUtil.class, "createCacheClient",
-            new Object[] { getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 0),
+            new Object[] { getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 0),
                 regionName,
                 getClientDistributedSystemProperties(durableClientId),
                 Boolean.TRUE });
@@ -381,7 +386,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     this.durableClientVM.invoke(DurableRegistrationDUnitTest.class,
         "unregisterKey", new Object[] { K3 });
 
-    pause(5000);
+    Wait.pause(5000);
 
     // Step 12: Modify values on the server for all the Keys
     this.server2VM.invoke(DurableRegistrationDUnitTest.class, "putValue",
@@ -393,7 +398,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     this.server2VM.invoke(DurableRegistrationDUnitTest.class, "putValue",
         new Object[] { K4, "PingPong_updated_4" });
 
-    pause(5000);
+    Wait.pause(5000);
 
     // Step 13: Check the values for the ones not unregistered and the
     // Unregistered Keys' Values should be null
@@ -467,7 +472,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     final int durableClientTimeout = 600;
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 1),
+            getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 1),
             regionName,
             getClientDistributedSystemProperties(durableClientId,
                 durableClientTimeout) });
@@ -496,13 +501,13 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
         "createCacheServer", new Object[] { regionName, new Boolean(true),
              PORT2 });
 
-    pause(3000);
+    Wait.pause(3000);
 
     // Check server2 got all the interests registered by the durable client.    
     server2VM.invoke(new CacheSerializableRunnable("Verify Interests.") {
       public void run2() throws CacheException
       {
-        getLogWriter().info("### Verifying interests registered by DurableClient. ###");
+        LogWriterUtils.getLogWriter().info("### Verifying interests registered by DurableClient. ###");
         CacheClientNotifier ccn = CacheClientNotifier.getInstance();
         CacheClientProxy p = null;
         
@@ -510,7 +515,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
         for (int i=0; i < 60; i++) {
           Iterator ps = ccn.getClientProxies().iterator();
           if (!ps.hasNext()) {
-            pause(1000);
+            Wait.pause(1000);
             continue;
           } else {
             p = (CacheClientProxy)ps.next();
@@ -567,7 +572,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     final int durableClientTimeout = 600;
     this.durableClientVM.invoke(CacheServerTestUtil.class, "createCacheClient",
         new Object[] {
-            getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 1),
+            getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 1),
             regionName,
             getClientDistributedSystemProperties(durableClientId,
                 durableClientTimeout) });
@@ -594,12 +599,12 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     this.durableClientVM.invoke(DurableRegistrationDUnitTest.class,
         "closeCache");
 
-    pause(2000);
+    Wait.pause(2000);
 
     //Re-start the Client
     this.durableClientVM
         .invoke(CacheServerTestUtil.class, "createCacheClient",
-            new Object[] { getClientPool(getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 1),
+            new Object[] { getClientPool(NetworkUtils.getServerHostName(durableClientVM.getHost()), PORT1, PORT2, true, 1),
                 regionName,
                 getClientDistributedSystemProperties(durableClientId),
                 Boolean.TRUE });
@@ -617,13 +622,13 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
         "createCacheServer", new Object[] { regionName, new Boolean(true),
              PORT2 });
 
-    pause(3000);
+    Wait.pause(3000);
 
     // Check server2 got all the interests registered by the durable client.    
     server2VM.invoke(new CacheSerializableRunnable("Verify Interests.") {
       public void run2() throws CacheException
       {
-        getLogWriter().info("### Verifying interests registered by DurableClient. ###");
+        LogWriterUtils.getLogWriter().info("### Verifying interests registered by DurableClient. ###");
         CacheClientNotifier ccn = CacheClientNotifier.getInstance();
         CacheClientProxy p = null;
         
@@ -631,7 +636,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
         for (int i=0; i < 60; i++) {
           Iterator ps = ccn.getClientProxies().iterator();
           if (!ps.hasNext()) {
-            pause(1000);
+            Wait.pause(1000);
             continue;
           } else {
             p = (CacheClientProxy)ps.next();
@@ -703,7 +708,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
 
     }
     catch (Exception ex) {
-      fail("failed while registering interest in registerKey function", ex);
+      Assert.fail("failed while registering interest in registerKey function", ex);
     }
   }
 
@@ -736,7 +741,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
       region.registerInterest(key, InterestResultPolicy.NONE, isDurable);
     }
     catch (Exception ex) {
-      fail("failed while registering interest in registerKey function", ex);
+      Assert.fail("failed while registering interest in registerKey function", ex);
     }
   }
 
@@ -752,7 +757,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
       region.unregisterInterest(key);
     }
     catch (Exception ex) {
-      fail("failed while registering interest in registerKey function", ex);
+      Assert.fail("failed while registering interest in registerKey function", ex);
     }
   }
 
@@ -801,7 +806,7 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 15 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 15 * 1000, 200, true);
   }
 
   protected static int getNumberOfClientProxies() {
@@ -858,9 +863,8 @@ public class DurableRegistrationDUnitTest extends DistributedTestCase {
     this.regionName = regionName;
   }
   
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     CacheServerTestUtil.resetDisableShufflingOfEndpointsFlag();
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableResponseMatrixDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableResponseMatrixDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableResponseMatrixDUnitTest.java
index bbbdf80..4f1028c 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableResponseMatrixDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/DurableResponseMatrixDUnitTest.java
@@ -24,9 +24,14 @@ import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverAdapter;
 import com.gemstone.gemfire.internal.cache.ClientServerObserverHolder;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.cache.client.*;
 import com.gemstone.gemfire.cache.client.internal.PoolImpl;
@@ -66,9 +71,9 @@ public class DurableResponseMatrixDUnitTest extends DistributedTestCase
     // start servers first
     PORT1 = ((Integer)server1.invoke(DurableResponseMatrixDUnitTest.class,
         "createServerCache"));
-    createCacheClient(getServerHostName(server1.getHost()));
+    createCacheClient(NetworkUtils.getServerHostName(server1.getHost()));
     //Disconnecting the client can cause this
-    addExpectedException("Connection reset||Unexpected IOException");
+    IgnoredException.addIgnoredException("Connection reset||Unexpected IOException");
   }
 
   public void testRegisterInterestResponse_NonExistent_Invalid()
@@ -196,7 +201,7 @@ public class DurableResponseMatrixDUnitTest extends DistributedTestCase
         return null;
       }
     };
-    DistributedTestCase.waitForCriterion(ev, 120 * 1000, 200, true);
+    Wait.waitForCriterion(ev, 120 * 1000, 200, true);
   }
   
   public void testNotification_NonExistent_Create() throws Exception
@@ -369,7 +374,7 @@ public class DurableResponseMatrixDUnitTest extends DistributedTestCase
       r.put(key, value);
     }
     catch (Exception e) {
-      fail("test failed due to ", e);
+      Assert.fail("test failed due to ", e);
     }
   }
 
@@ -381,7 +386,7 @@ public class DurableResponseMatrixDUnitTest extends DistributedTestCase
       r.destroy(key);
     }
     catch (Exception e) {
-      fail("test failed due to ", e);
+      Assert.fail("test failed due to ", e);
     }
   }
 
@@ -393,7 +398,7 @@ public class DurableResponseMatrixDUnitTest extends DistributedTestCase
       r.invalidate(key);
     }
     catch (Exception e) {
-      fail("test failed due to ", e);
+      Assert.fail("test failed due to ", e);
     }
   }
 
@@ -406,7 +411,7 @@ public class DurableResponseMatrixDUnitTest extends DistributedTestCase
       r.invalidate(key);
     }
     catch (Exception e) {
-      fail("test failed due to ", e);
+      Assert.fail("test failed due to ", e);
     }
   }
 
@@ -419,7 +424,7 @@ public class DurableResponseMatrixDUnitTest extends DistributedTestCase
       r.localInvalidate(key);
     }
     catch (Exception e) {
-      fail("test failed due to ", e);
+      Assert.fail("test failed due to ", e);
     }
   }
 
@@ -434,7 +439,7 @@ public class DurableResponseMatrixDUnitTest extends DistributedTestCase
       assertNotNull(cache);
     }
     catch (Exception e) {
-      fail("test failed due to ", e);
+      Assert.fail("test failed due to ", e);
     }
   }
 
@@ -467,7 +472,7 @@ public class DurableResponseMatrixDUnitTest extends DistributedTestCase
 
     }
     catch (Exception e) {
-      fail("test failed due to ", e);
+      Assert.fail("test failed due to ", e);
     }
 
   }
@@ -503,9 +508,8 @@ public class DurableResponseMatrixDUnitTest extends DistributedTestCase
     return properties;
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     closeCache();
     // then close the servers

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/EventIDVerificationDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/EventIDVerificationDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/EventIDVerificationDUnitTest.java
index 6cb897e..78e0f41 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/EventIDVerificationDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/EventIDVerificationDUnitTest.java
@@ -42,8 +42,10 @@ import com.gemstone.gemfire.internal.cache.CacheObserverHolder;
 import com.gemstone.gemfire.internal.cache.EntryEventImpl;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.cache.RegionEventImpl;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 /**
@@ -97,7 +99,7 @@ public class EventIDVerificationDUnitTest extends DistributedTestCase
 
     //vm2.invoke(EventIDVerificationDUnitTest.class, "createClientCache", new
     // Object[] { new Integer(PORT1),new Integer(PORT2)});
-    createClientCache(getServerHostName(host), new Integer(PORT1), new Integer(PORT2));
+    createClientCache(NetworkUtils.getServerHostName(host), new Integer(PORT1), new Integer(PORT2));
     CacheObserverHolder.setInstance(new CacheObserverAdapter());
 
   }
@@ -388,7 +390,7 @@ public class EventIDVerificationDUnitTest extends DistributedTestCase
       assertEquals(r.getEntry("key-1").getValue(), "key-1");
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -404,7 +406,7 @@ public class EventIDVerificationDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
 
@@ -416,7 +418,7 @@ public class EventIDVerificationDUnitTest extends DistributedTestCase
       r.destroy("key-1");
     }
     catch (Exception ex) {
-      fail("test failed due to exception in destroy ", ex);
+      Assert.fail("test failed due to exception in destroy ", ex);
     }
   }
 
@@ -429,7 +431,7 @@ public class EventIDVerificationDUnitTest extends DistributedTestCase
       r.remove("key-1");
     }
     catch (Exception ex) {
-      fail("test failed due to exception in remove ", ex);
+      Assert.fail("test failed due to exception in remove ", ex);
     }
   }
 
@@ -442,7 +444,7 @@ public class EventIDVerificationDUnitTest extends DistributedTestCase
       r.destroyRegion();
     }
     catch (Exception ex) {
-      fail("test failed due to exception in destroyRegion ", ex);
+      Assert.fail("test failed due to exception in destroyRegion ", ex);
     }
   }
 
@@ -454,7 +456,7 @@ public class EventIDVerificationDUnitTest extends DistributedTestCase
       r.clear();
     }
     catch (Exception ex) {
-      fail("test failed due to exception in clearRegion ", ex);
+      Assert.fail("test failed due to exception in clearRegion ", ex);
     }
   }
 
@@ -486,9 +488,8 @@ public class EventIDVerificationDUnitTest extends DistributedTestCase
     assertEquals(eventId, ((RegionEventImpl)event).getEventId());
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     // close the clients first
     closeCache();
     // then close the servers

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/EventIDVerificationInP2PDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/EventIDVerificationInP2PDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/EventIDVerificationInP2PDUnitTest.java
index 4e8b82d..15a709b 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/EventIDVerificationInP2PDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/EventIDVerificationInP2PDUnitTest.java
@@ -31,6 +31,7 @@ import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.cache.InternalCacheEvent;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -216,7 +217,7 @@ public class EventIDVerificationInP2PDUnitTest extends DistributedTestCase
       assertEquals(r.getEntry("key-1").getValue(), "key-1");
     }
     catch (Exception ex) {
-      fail("failed while createEntries()", ex);
+      Assert.fail("failed while createEntries()", ex);
     }
   }
 
@@ -232,7 +233,7 @@ public class EventIDVerificationInP2PDUnitTest extends DistributedTestCase
 
     }
     catch (Exception ex) {
-      fail("failed while r.put()", ex);
+      Assert.fail("failed while r.put()", ex);
     }
   }
 
@@ -244,7 +245,7 @@ public class EventIDVerificationInP2PDUnitTest extends DistributedTestCase
       r.destroy("key-1");
     }
     catch (Exception ex) {
-      fail("test failed due to exception in destroy ", ex);
+      Assert.fail("test failed due to exception in destroy ", ex);
     }
   }
 
@@ -256,7 +257,7 @@ public class EventIDVerificationInP2PDUnitTest extends DistributedTestCase
       r.destroyRegion();
     }
     catch (Exception ex) {
-      fail("test failed due to exception in destroyRegion ", ex);
+      Assert.fail("test failed due to exception in destroyRegion ", ex);
     }
   }
   
@@ -268,7 +269,7 @@ public class EventIDVerificationInP2PDUnitTest extends DistributedTestCase
       r.invalidateRegion();
     }
     catch (Exception ex) {
-      fail("test failed due to exception in invalidateRegion ", ex);
+      Assert.fail("test failed due to exception in invalidateRegion ", ex);
     }
   }
 
@@ -317,9 +318,8 @@ public class EventIDVerificationInP2PDUnitTest extends DistributedTestCase
     assertTrue(pass.booleanValue());   
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     closeCache();
     vm0.invoke(EventIDVerificationInP2PDUnitTest.class, "closeCache");
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ForceInvalidateEvictionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ForceInvalidateEvictionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ForceInvalidateEvictionDUnitTest.java
index 3d89089..9313ddd 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ForceInvalidateEvictionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ForceInvalidateEvictionDUnitTest.java
@@ -43,9 +43,13 @@ import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.CachedDeserializable;
 import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.Token;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * @author dsmith
@@ -258,7 +262,7 @@ public class ForceInvalidateEvictionDUnitTest extends CacheTestCase {
         Region region = cache.getRegion(name);
         final MyListener listener = (MyListener) region.getAttributes().getCacheListeners()[0];
         if(invalidated) {
-          waitForCriterion(new WaitCriterion() {
+          Wait.waitForCriterion(new WaitCriterion() {
 
             public String description() {
               return "Didn't receive invalidate after 30 seconds";
@@ -284,7 +288,7 @@ public class ForceInvalidateEvictionDUnitTest extends CacheTestCase {
         Cache cache = getCache();
         final LocalRegion region = (LocalRegion) cache.getRegion(name);
         
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           
           public boolean done() {
             Object value  = null;
@@ -341,7 +345,7 @@ public class ForceInvalidateEvictionDUnitTest extends CacheTestCase {
         Cache cache = getCache();
         
         PoolFactory pf = PoolManager.createFactory();
-        pf.addServer(getServerHostName(host), port);
+        pf.addServer(NetworkUtils.getServerHostName(host), port);
         pf.setSubscriptionEnabled(true);
         pf.create(name);
         RegionFactory rf = new RegionFactory();
@@ -366,7 +370,7 @@ public class ForceInvalidateEvictionDUnitTest extends CacheTestCase {
         try {
           server.start();
         } catch (IOException e) {
-          fail("IO Exception", e);
+          Assert.fail("IO Exception", e);
         }
       }
     });

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ForceInvalidateOffHeapEvictionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ForceInvalidateOffHeapEvictionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ForceInvalidateOffHeapEvictionDUnitTest.java
index 7a441c6..b9c1893 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ForceInvalidateOffHeapEvictionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/ForceInvalidateOffHeapEvictionDUnitTest.java
@@ -20,6 +20,7 @@ import java.util.Properties;
 
 import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.OffHeapTestUtil;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -35,7 +36,7 @@ public class ForceInvalidateOffHeapEvictionDUnitTest extends
   }
 
   @Override
-  public void tearDown2() throws Exception {
+  protected final void preTearDownCacheTestCase() throws Exception {
     SerializableRunnable checkOrphans = new SerializableRunnable() {
 
       @Override
@@ -45,12 +46,8 @@ public class ForceInvalidateOffHeapEvictionDUnitTest extends
         }
       }
     };
-    invokeInEveryVM(checkOrphans);
-    try {
-      checkOrphans.run();
-    } finally {
-      super.tearDown2();
-    }
+    Invoke.invokeInEveryVM(checkOrphans);
+    checkOrphans.run();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HABug36738DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HABug36738DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HABug36738DUnitTest.java
index 395047a..08175dd 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HABug36738DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HABug36738DUnitTest.java
@@ -39,7 +39,9 @@ import com.gemstone.gemfire.internal.cache.tier.sockets.ClientProxyMembershipID;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ClientUpdateMessage;
 import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * This is the bugtest for bug no. 36738. When Object of class
@@ -85,18 +87,16 @@ public class HABug36738DUnitTest extends DistributedTestCase
 
   }
 
-  public void tearDown2() throws Exception
-  {
-    super.tearDown2();
+  @Override
+  protected final void preTearDown() throws Exception {
     server1.invoke(HABug36738DUnitTest.class, "closeCache");
     server2.invoke(HABug36738DUnitTest.class, "closeCache");
-
   }
 
   public void testBug36768() throws Exception
   {
     createServer1();
-    pause(10000);
+    Wait.pause(10000);
     server1.invoke(HABug36738DUnitTest.class, "checkRegionQueueSize");
     createServer2();
     server1.invoke(HABug36738DUnitTest.class, "checkRegionQueueSize");
@@ -113,7 +113,7 @@ public class HABug36738DUnitTest extends DistributedTestCase
     while (itr.hasNext()) {
       Object key = itr.next();
       ClientUpdateMessage value = (ClientUpdateMessage)region.get(key);
-      getLogWriter().info("key : " + key + "Value " + value.getValue());
+      LogWriterUtils.getLogWriter().info("key : " + key + "Value " + value.getValue());
 
     }
 
@@ -161,7 +161,7 @@ public class HABug36738DUnitTest extends DistributedTestCase
             new EventID(("memberID" + i).getBytes(), i, i));
 
         haRegion.put(new Long(i), clientMessage);
-        getLogWriter().info("Putting in the message Queue");
+        LogWriterUtils.getLogWriter().info("Putting in the message Queue");
 
       }
     }
@@ -190,7 +190,7 @@ public class HABug36738DUnitTest extends DistributedTestCase
     HARegion region = (HARegion)cache.getRegion(Region.SEPARATOR
         + HAHelper.getRegionQueueName(HAREGION_NAME));
     assertNotNull(region);
-    getLogWriter().info("Size of the Queue : " + region.size());
+    LogWriterUtils.getLogWriter().info("Size of the Queue : " + region.size());
 
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestPart1DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestPart1DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestPart1DUnitTest.java
index e82faae..fc9211d 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestPart1DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestPart1DUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache.tier.sockets;
 
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
 
 @SuppressWarnings("serial")
@@ -29,7 +31,7 @@ public class HAInterestPart1DUnitTest extends HAInterestTestCase {
    * Tests whether interest is registered or not on both primary and secondaries
    */
   public void testInterestRegistrationOnBothPrimaryAndSecondary() throws Exception {
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     createEntriesK1andK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
@@ -46,7 +48,7 @@ public class HAInterestPart1DUnitTest extends HAInterestTestCase {
    * verify their responses
    */
   public void testInterestRegistrationResponseOnBothPrimaryAndSecondary() throws Exception {
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     createEntriesK1andK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
@@ -60,7 +62,7 @@ public class HAInterestPart1DUnitTest extends HAInterestTestCase {
    * interest map
    */
   public void testRERegistrationWillNotCreateDuplicateKeysOnServerInterstMaps() throws Exception {
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     createEntriesK1andK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
@@ -79,7 +81,7 @@ public class HAInterestPart1DUnitTest extends HAInterestTestCase {
    * registerInterest
    */
   public void testPrimaryFailureInRegisterInterest() throws Exception {
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     createEntriesK1andK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
@@ -100,7 +102,7 @@ public class HAInterestPart1DUnitTest extends HAInterestTestCase {
    * list
    */
   public void testSecondaryFailureInRegisterInterest() throws Exception {
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     createEntriesK1andK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
@@ -122,7 +124,7 @@ public class HAInterestPart1DUnitTest extends HAInterestTestCase {
    * registration on newly selected primary
    */
   public void testBothPrimaryAndSecondaryFailureInRegisterInterest() throws Exception {
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     createEntriesK1andK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
@@ -146,7 +148,7 @@ public class HAInterestPart1DUnitTest extends HAInterestTestCase {
    *
    */
   public void testProbablePrimaryFailureInRegisterInterest() throws Exception {
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     createEntriesK1andK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
@@ -166,10 +168,10 @@ public class HAInterestPart1DUnitTest extends HAInterestTestCase {
    * client ( create CCP) as welll as register IL
    */
   public void testInterstRegistrationOnRecoveredEPbyDSM() throws Exception {
-    addExpectedException("SocketException");
-    addExpectedException("Unexpected IOException");
+    IgnoredException.addIgnoredException("SocketException");
+    IgnoredException.addIgnoredException("Unexpected IOException");
 
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     createEntriesK1andK2();
     registerK1AndK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestPart2DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestPart2DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestPart2DUnitTest.java
index 28bee9f..647069f 100755
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestPart2DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/internal/cache/tier/sockets/HAInterestPart2DUnitTest.java
@@ -19,8 +19,11 @@ package com.gemstone.gemfire.internal.cache.tier.sockets;
 import com.gemstone.gemfire.cache.EntryDestroyedException;
 import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.client.ServerConnectivityException;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 @SuppressWarnings({"rawtypes", "serial"})
 public class HAInterestPart2DUnitTest extends HAInterestTestCase {
@@ -34,7 +37,7 @@ public class HAInterestPart2DUnitTest extends HAInterestTestCase {
    * failover should pick new primary
    */
   public void testPrimaryFailureInUNregisterInterest() throws Exception {
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     createEntriesK1andK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
@@ -60,7 +63,7 @@ public class HAInterestPart2DUnitTest extends HAInterestTestCase {
    * Ep list
    */
   public void testSecondaryFailureInUNRegisterInterest() throws Exception {
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     createEntriesK1andK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
@@ -82,7 +85,7 @@ public class HAInterestPart2DUnitTest extends HAInterestTestCase {
    * detected live server as well
    */
   public void testDSMDetectsServerLiveJustBeforeInterestRegistration() throws Exception {
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     createEntriesK1andK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
@@ -108,7 +111,7 @@ public class HAInterestPart2DUnitTest extends HAInterestTestCase {
    * detected live server as well
    */
   public void testDSMDetectsServerLiveJustAfterInterestRegistration() throws Exception {
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
 
     createEntriesK1andK2();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
@@ -140,11 +143,11 @@ public class HAInterestPart2DUnitTest extends HAInterestTestCase {
    * is primary
    */
   public void testRefreshEntriesFromPrimaryWhenDSMDetectsServerLive() throws Exception {
-    addExpectedException(ServerConnectivityException.class.getName());
+    IgnoredException.addIgnoredException(ServerConnectivityException.class.getName());
     
     PORT1 = ((Integer) server1.invoke(HAInterestTestCase.class, "createServerCache")).intValue();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
-    createClientPoolCacheConnectionToSingleServer(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCacheConnectionToSingleServer(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     registerK1AndK2();
     verifyRefreshedEntriesFromServer();
 
@@ -200,7 +203,7 @@ public class HAInterestPart2DUnitTest extends HAInterestTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
   }
 
   /**
@@ -222,7 +225,7 @@ public class HAInterestPart2DUnitTest extends HAInterestTestCase {
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     server3.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
 
-    createClientPoolCache(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCache(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
 
     VM backup1 = getBackupVM();
     VM backup2 = getBackupVM(backup1);
@@ -247,7 +250,7 @@ public class HAInterestPart2DUnitTest extends HAInterestTestCase {
   public void testBug35945() throws Exception {
     PORT1 = ((Integer) server1.invoke(HAInterestTestCase.class, "createServerCache")).intValue();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
-    createClientPoolCacheConnectionToSingleServer(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCacheConnectionToSingleServer(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     registerK1AndK2();
     verifyRefreshedEntriesFromServer();
 
@@ -303,7 +306,7 @@ public class HAInterestPart2DUnitTest extends HAInterestTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
   }
 
   /**
@@ -311,13 +314,13 @@ public class HAInterestPart2DUnitTest extends HAInterestTestCase {
    * new endpoint to register interest
    */
   public void testInterestRecoveryFailure() throws Exception {
-    addExpectedException("Server unreachable");
+    IgnoredException.addIgnoredException("Server unreachable");
     
     PORT1 = ((Integer) server1.invoke(HAInterestTestCase.class, "createServerCache")).intValue();
     server1.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
     PORT2 = ((Integer) server2.invoke(HAInterestTestCase.class, "createServerCache")).intValue();
     server2.invoke(HAInterestTestCase.class, "createEntriesK1andK2");
-    createClientPoolCacheWithSmallRetryInterval(this.getName(), getServerHostName(server1.getHost()));
+    createClientPoolCacheWithSmallRetryInterval(this.getName(), NetworkUtils.getServerHostName(server1.getHost()));
     registerK1AndK2();
     verifyRefreshedEntriesFromServer();
     VM backup = getBackupVM();
@@ -368,6 +371,6 @@ public class HAInterestPart2DUnitTest extends HAInterestTestCase {
         return excuse;
       }
     };
-    DistributedTestCase.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
+    Wait.waitForCriterion(wc, TIMEOUT_MILLIS, INTERVAL_MILLIS, true);
   }
 }


[16/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
index 1698609..7378ada 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/FunctionCommandsDUnitTest.java
@@ -31,11 +31,13 @@ import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.json.GfJsonException;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import java.util.List;
 import java.util.Properties;
@@ -118,11 +120,11 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
       e.printStackTrace();
     }
     String command = "execute function --id=" + function.getId() + " --region=" + "/" + "RegionOne";
-    getLogWriter().info("testExecuteFunctionWithNoRegionOnManager command : " + command);
+    LogWriterUtils.getLogWriter().info("testExecuteFunctionWithNoRegionOnManager command : " + command);
     CommandResult cmdResult = executeCommand(command);
     if (cmdResult != null) {
       String strCmdResult = commandResultToString(cmdResult);
-      getLogWriter().info("testExecuteFunctionWithNoRegionOnManager stringResult : " + strCmdResult);
+      LogWriterUtils.getLogWriter().info("testExecuteFunctionWithNoRegionOnManager stringResult : " + strCmdResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
       assertTrue(strCmdResult.contains("Execution summary"));
     } else {
@@ -150,13 +152,13 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
     });
 
     String command = "execute function --id=" + function.getId() + " --region=" + REGION_NAME;
-    getLogWriter().info("testExecuteFunctionOnRegion command=" + command);
+    LogWriterUtils.getLogWriter().info("testExecuteFunctionOnRegion command=" + command);
     CommandResult cmdResult = executeCommand(command);
     if (cmdResult != null) {
       assertEquals(Result.Status.OK, cmdResult.getStatus());
-      getLogWriter().info("testExecuteFunctionOnRegion cmdResult=" + cmdResult);
+      LogWriterUtils.getLogWriter().info("testExecuteFunctionOnRegion cmdResult=" + cmdResult);
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testExecuteFunctionOnRegion stringResult=" + stringResult);
+      LogWriterUtils.getLogWriter().info("testExecuteFunctionOnRegion stringResult=" + stringResult);
       assert (stringResult.contains("Execution summary"));
     } else {
       fail("testExecuteFunctionOnRegion did not return CommandResult");
@@ -193,7 +195,7 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
           if (bean == null) {
             return false;
           } else {
-            getLogWriter().info("Probing for checkRegionMBeans testExecuteFunctionOnRegionBug51480 finished");
+            LogWriterUtils.getLogWriter().info("Probing for checkRegionMBeans testExecuteFunctionOnRegionBug51480 finished");
             return true;
           }
         }
@@ -203,7 +205,7 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
           return "Probing for testExecuteFunctionOnRegionBug51480";
         }
       };
-      DistributedTestCase.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
+      Wait.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
       DistributedRegionMXBean bean = ManagementService.getManagementService(getCache()).getDistributedRegionMXBean(
           Region.SEPARATOR + REGION_ONE);
       assertNotNull(bean);
@@ -226,13 +228,13 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
 
     String command = "execute function --id=" + function.getId() + " --region=" + REGION_ONE;
 
-    getLogWriter().info("testExecuteFunctionOnRegionBug51480 command=" + command);
+    LogWriterUtils.getLogWriter().info("testExecuteFunctionOnRegionBug51480 command=" + command);
     CommandResult cmdResult = executeCommand(command);
     if (cmdResult != null) {
-      getLogWriter().info("testExecuteFunctionOnRegionBug51480 cmdResult=" + cmdResult);
+      LogWriterUtils.getLogWriter().info("testExecuteFunctionOnRegionBug51480 cmdResult=" + cmdResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testExecuteFunctionOnRegionBug51480 stringResult=" + stringResult);
+      LogWriterUtils.getLogWriter().info("testExecuteFunctionOnRegionBug51480 stringResult=" + stringResult);
       assert (stringResult.contains("Execution summary"));
     } else {
       fail("testExecuteFunctionOnRegionBug51480 did not return CommandResult");
@@ -261,12 +263,12 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
     });
 
     String command = "execute function --id=" + function.getId() + " --member=" + vm1MemberId;
-    getLogWriter().info("testExecuteFunctionOnMember command=" + command);
+    LogWriterUtils.getLogWriter().info("testExecuteFunctionOnMember command=" + command);
     CommandResult cmdResult = executeCommand(command);
     assertEquals(Result.Status.OK, cmdResult.getStatus());
 
     String stringResult = commandResultToString(cmdResult);
-    getLogWriter().info("testExecuteFunctionOnMember stringResult:" + stringResult);
+    LogWriterUtils.getLogWriter().info("testExecuteFunctionOnMember stringResult:" + stringResult);
     assertTrue(stringResult.contains("Execution summary"));
   }
 
@@ -290,13 +292,13 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
       }
     });
     String command = "execute function --id=" + function.getId();
-    getLogWriter().info("testExecuteFunctionOnMembers command=" + command);
+    LogWriterUtils.getLogWriter().info("testExecuteFunctionOnMembers command=" + command);
     CommandResult cmdResult = executeCommand(command);
     if (cmdResult != null) {
       assertEquals(Result.Status.OK, cmdResult.getStatus());
-      getLogWriter().info("testExecuteFunctionOnMembers cmdResult:" + cmdResult);
+      LogWriterUtils.getLogWriter().info("testExecuteFunctionOnMembers cmdResult:" + cmdResult);
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testExecuteFunctionOnMembers stringResult:" + stringResult);
+      LogWriterUtils.getLogWriter().info("testExecuteFunctionOnMembers stringResult:" + stringResult);
       assertTrue(stringResult.contains("Execution summary"));
     } else {
       fail("testExecuteFunctionOnMembers did not return CommandResult");
@@ -324,13 +326,13 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
 
     String command = "execute function --id=" + function.getId() + " --arguments=arg1,arg2";
 
-    getLogWriter().info("testExecuteFunctionOnMembersWithArgs command=" + command);
+    LogWriterUtils.getLogWriter().info("testExecuteFunctionOnMembersWithArgs command=" + command);
     CommandResult cmdResult = executeCommand(command);
     if (cmdResult != null) {
       assertEquals(Result.Status.OK, cmdResult.getStatus());
-      getLogWriter().info("testExecuteFunctionOnMembersWithArgs cmdResult:" + cmdResult);
+      LogWriterUtils.getLogWriter().info("testExecuteFunctionOnMembersWithArgs cmdResult:" + cmdResult);
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testExecuteFunctionOnMembersWithArgs stringResult:" + stringResult);
+      LogWriterUtils.getLogWriter().info("testExecuteFunctionOnMembersWithArgs stringResult:" + stringResult);
       assertTrue(stringResult.contains("Execution summary"));
       assertTrue(stringResult.contains("arg1"));
     } else {
@@ -386,13 +388,13 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
     });
 
     String command = "execute function --id=" + TestFunction.TEST_FUNCTION1 + " --groups=Group1,Group2";
-    getLogWriter().info("testExecuteFunctionOnGroups command=" + command);
+    LogWriterUtils.getLogWriter().info("testExecuteFunctionOnGroups command=" + command);
     CommandResult cmdResult = executeCommand(command);
-    getLogWriter().info("testExecuteFunctionOnGroups cmdResult=" + cmdResult);
+    LogWriterUtils.getLogWriter().info("testExecuteFunctionOnGroups cmdResult=" + cmdResult);
     assertEquals(Result.Status.OK, cmdResult.getStatus());
     TabularResultData resultData = (TabularResultData) cmdResult.getResultData();
     List<String> members = resultData.retrieveAllValues("Member ID/Name");
-    getLogWriter().info("testExecuteFunctionOnGroups members=" + members);
+    LogWriterUtils.getLogWriter().info("testExecuteFunctionOnGroups members=" + members);
     assertTrue(members.size() == 2 && members.contains(vm1id) && members.contains(vm2id));
   }
 
@@ -404,12 +406,12 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
     final VM vm1 = Host.getHost(0).getVM(1);
     final String vm1MemberId = (String) vm1.invoke(FunctionCommandsDUnitTest.class, "getMemberId");
     String command = "destroy function --id=" + function.getId() + " --member=" + vm1MemberId;
-    getLogWriter().info("testDestroyOnMember command=" + command);
+    LogWriterUtils.getLogWriter().info("testDestroyOnMember command=" + command);
     CommandResult cmdResult = executeCommand(command);
     if (cmdResult != null) {
       String strCmdResult = commandResultToString(cmdResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
-      getLogWriter().info("testDestroyOnMember strCmdResult=" + strCmdResult);
+      LogWriterUtils.getLogWriter().info("testDestroyOnMember strCmdResult=" + strCmdResult);
       assertTrue(strCmdResult.contains("Destroyed TestFunction1 Successfully"));
     } else {
       fail("testDestroyOnMember failed as did not get CommandResult");
@@ -462,14 +464,14 @@ public class FunctionCommandsDUnitTest extends CliCommandTestBase {
     });
 
     String command = "destroy function --id=" + TestFunction.TEST_FUNCTION1 + " --groups=Group1,Group2";
-    getLogWriter().info("testDestroyOnGroups command=" + command);
+    LogWriterUtils.getLogWriter().info("testDestroyOnGroups command=" + command);
     CommandResult cmdResult = executeCommand(command);
-    getLogWriter().info("testDestroyOnGroups cmdResult=" + cmdResult);
+    LogWriterUtils.getLogWriter().info("testDestroyOnGroups cmdResult=" + cmdResult);
     assertEquals(Result.Status.OK, cmdResult.getStatus());
     String content = null;
     try {
       content = cmdResult.getContent().get("message").toString();
-      getLogWriter().info("testDestroyOnGroups content = " + content);
+      LogWriterUtils.getLogWriter().info("testDestroyOnGroups content = " + content);
     } catch (GfJsonException e) {
       fail("testDestroyOnGroups exception=" + e);
     }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
index 76f51de..d6719d1 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GemfireDataCommandsDUnitTest.java
@@ -55,11 +55,15 @@ import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData.S
 import com.gemstone.gemfire.management.internal.cli.result.ResultData;
 import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.Host;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import hydra.GsRandom;
 
@@ -140,29 +144,29 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
         RegionFactory regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
         Region dataRegion = regionFactory.create(DATA_REGION_NAME);
         assertNotNull(dataRegion);
-        getLogWriter().info("Created Region " + dataRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataRegion);
 
         dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1, dataRegion.getAttributes());
         assertNotNull(dataRegion);
-        getLogWriter().info("Created Region " + dataRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataRegion);
 
         dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1_2, dataRegion.getAttributes());
         assertNotNull(dataRegion);
-        getLogWriter().info("Created Region " + dataRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataRegion);
 
         dataRegion = regionFactory.create(DATA_REGION_NAME_VM1);
         assertNotNull(dataRegion);
-        getLogWriter().info("Created Region " + dataRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataRegion);
 
         PartitionAttributes partitionAttrs = new PartitionAttributesFactory().setRedundantCopies(2).create();
         RegionFactory<Object, Object> partitionRegionFactory = cache.createRegionFactory(RegionShortcut.PARTITION);
         partitionRegionFactory.setPartitionAttributes(partitionAttrs);
         Region dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME);
         assertNotNull(dataParRegion);
-        getLogWriter().info("Created Region " + dataParRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataParRegion);
         dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME_VM1);
         assertNotNull(dataParRegion);
-        getLogWriter().info("Created Region " + dataParRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataParRegion);
 
       }
     });
@@ -173,19 +177,19 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
         RegionFactory regionFactory = cache.createRegionFactory(RegionShortcut.REPLICATE);
         Region dataRegion = regionFactory.create(DATA_REGION_NAME);
         assertNotNull(dataRegion);
-        getLogWriter().info("Created Region " + dataRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataRegion);
 
         dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1, dataRegion.getAttributes());
         assertNotNull(dataRegion);
-        getLogWriter().info("Created Region " + dataRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataRegion);
 
         dataRegion = dataRegion.createSubregion(DATA_REGION_NAME_CHILD_1_2, dataRegion.getAttributes());
         assertNotNull(dataRegion);
-        getLogWriter().info("Created Region " + dataRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataRegion);
 
         dataRegion = regionFactory.create(DATA_REGION_NAME_VM2);
         assertNotNull(dataRegion);
-        getLogWriter().info("Created Region " + dataRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataRegion);
 
 
         PartitionAttributes partitionAttrs = new PartitionAttributesFactory().setRedundantCopies(2).create();
@@ -193,18 +197,18 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
         partitionRegionFactory.setPartitionAttributes(partitionAttrs);
         Region dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME);
         assertNotNull(dataParRegion);
-        getLogWriter().info("Created Region " + dataParRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataParRegion);
         dataParRegion = partitionRegionFactory.create(DATA_PAR_REGION_NAME_VM2);
         assertNotNull(dataParRegion);
-        getLogWriter().info("Created Region " + dataParRegion);
+        LogWriterUtils.getLogWriter().info("Created Region " + dataParRegion);
 
       }
     });
 
     final String vm1MemberId = (String) vm1.invoke(GemfireDataCommandsDUnitTest.class, "getMemberId");
     final String vm2MemberId = (String) vm2.invoke(GemfireDataCommandsDUnitTest.class, "getMemberId");
-    getLogWriter().info("Vm1 ID : " + vm1MemberId);
-    getLogWriter().info("Vm2 ID : " + vm2MemberId);
+    LogWriterUtils.getLogWriter().info("Vm1 ID : " + vm1MemberId);
+    LogWriterUtils.getLogWriter().info("Vm2 ID : " + vm2MemberId);
 
     final VM manager = Host.getHost(0).getVM(0);
 
@@ -220,10 +224,10 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
             ManagerMXBean bean1 = service.getManagerMXBean();
             DistributedRegionMXBean bean2 = service.getDistributedRegionMXBean(DATA_REGION_NAME_PATH);
             if (bean1 == null) {
-              getLogWriter().info("Still probing for ManagerMBean");
+              LogWriterUtils.getLogWriter().info("Still probing for ManagerMBean");
               return false;
             } else {
-              getLogWriter().info("Still probing for DistributedRegionMXBean=" + bean2);
+              LogWriterUtils.getLogWriter().info("Still probing for DistributedRegionMXBean=" + bean2);
               return (bean2 != null);
             }
           }
@@ -234,7 +238,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
           }
         };
 
-        DistributedTestCase.waitForCriterion(waitForMaangerMBean, 30000, 2000, true);
+        Wait.waitForCriterion(waitForMaangerMBean, 30000, 2000, true);
 
         assertNotNull(service.getMemberMXBean());
         assertNotNull(service.getManagerMXBean());
@@ -264,7 +268,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
             }
 
             if (!flag) {
-              getLogWriter().info(
+              LogWriterUtils.getLogWriter().info(
                   "Still probing for regionMbeans " + DATA_REGION_NAME_PATH + "=" + beans[0] + " " + DATA_REGION_NAME_VM1_PATH + "=" + beans[1] + " " + DATA_REGION_NAME_VM2_PATH + "=" + beans[2] + " " + DATA_PAR_REGION_NAME_PATH + "=" + beans[3] + " " + DATA_PAR_REGION_NAME_VM1_PATH + "=" + beans[4] + " " + DATA_PAR_REGION_NAME_VM2_PATH + "=" + beans[5] + " "
                   //+ DATA_REGION_NAME_CHILD_1_PATH
                   // +"="+ beans[6]  + " " + DATA_REGION_NAME_CHILD_1_2_PATH
@@ -272,7 +276,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
               );
               return false;
             } else {
-              getLogWriter().info(
+              LogWriterUtils.getLogWriter().info(
                   "Probing complete for regionMbeans " + DATA_REGION_NAME_PATH + "=" + beans[0] + " " + DATA_REGION_NAME_VM1_PATH + "=" + beans[1] + " " + DATA_REGION_NAME_VM2_PATH + "=" + beans[2] + " " + DATA_PAR_REGION_NAME_PATH + "=" + beans[3] + " " + DATA_PAR_REGION_NAME_VM1_PATH + "=" + beans[4] + " " + DATA_PAR_REGION_NAME_VM2_PATH + "=" + beans[5] + " "
                   //+ DATA_REGION_NAME_CHILD_1_PATH
                   // +"="+ beans[6]  + " " + DATA_REGION_NAME_CHILD_1_2_PATH
@@ -294,7 +298,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
           }
         };
 
-        DistributedTestCase.waitForCriterion(waitForRegionMBeans, 30000, 2000, true);
+        Wait.waitForCriterion(waitForRegionMBeans, 30000, 2000, true);
 
         String regions[] = {DATA_REGION_NAME_PATH, DATA_REGION_NAME_VM1_PATH, DATA_REGION_NAME_VM2_PATH, DATA_PAR_REGION_NAME_PATH, DATA_PAR_REGION_NAME_VM1_PATH, DATA_PAR_REGION_NAME_VM2_PATH, /*DATA_REGION_NAME_CHILD_1_PATH, DATA_REGION_NAME_CHILD_1_2_PATH*/};
 
@@ -302,7 +306,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
           bean = service.getDistributedRegionMXBean(region);
           assertNotNull(bean);
           String[] membersName = bean.getMembers();
-          getLogWriter().info(
+          LogWriterUtils.getLogWriter().info(
               "Members Array for region " + region + " : " + StringUtils.objectToString(membersName, true, 10));
           if (bean.getMemberCount() < 1) fail(
               "Even after waiting mbean reports number of member hosting region " + DATA_REGION_NAME_VM1_PATH + " is less than one");
@@ -367,7 +371,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       query = query.replace("?" + i, r);
       i++;
     }
-    getLogWriter().info("Checking members for query : " + query);
+    LogWriterUtils.getLogWriter().info("Checking members for query : " + query);
     QCompiler compiler = new QCompiler();
     Set<String> regionsInQuery = null;
     try {
@@ -375,11 +379,11 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       Set regionSet = new HashSet();
       compiledQuery.getRegionsInQuery(regionSet, null);//GFSH ENV VARIBLES
       regionsInQuery = Collections.unmodifiableSet(regionSet);
-      getLogWriter().info("Region in query : " + regionsInQuery);
+      LogWriterUtils.getLogWriter().info("Region in query : " + regionsInQuery);
       if (regionsInQuery.size() > 0) {
         Set<DistributedMember> members = DataCommands.getQueryRegionsAssociatedMembers(regionsInQuery, cache,
             returnAll);
-        getLogWriter().info("Members for Region in query : " + members);
+        LogWriterUtils.getLogWriter().info("Members for Region in query : " + members);
         if (expectedMembers != -1) {
           assertNotNull(members);
           assertEquals(expectedMembers, members.size());
@@ -388,7 +392,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
         assertEquals(-1, expectedMembers);//Regions do not exist at all
       }
     } catch (QueryInvalidException qe) {
-      fail("Invalid Query", qe);
+      Assert.fail("Invalid Query", qe);
     }
   }
 
@@ -451,7 +455,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
         MemberMXBean member = service.getMemberMXBean();
         String cmdResult = member.processCommand(query);
         assertNotNull(cmdResult);
-        getLogWriter().info("Text Command Output : " + cmdResult);
+        LogWriterUtils.getLogWriter().info("Text Command Output : " + cmdResult);
       }
     });
   }
@@ -503,7 +507,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     CommandResult cmdResult = executeCommand(query);
     printCommandOutput(cmdResult);
     validateSelectResult(cmdResult, true, -1, null);
-    ExpectedException ex = addExpectedException(QueryInvalidException.class.getSimpleName(), Host.getHost(0).getVM(0));
+    IgnoredException ex = IgnoredException.addIgnoredException(QueryInvalidException.class.getSimpleName(), Host.getHost(0).getVM(0));
     try {
       query = "query --query=\"select ID , status , createTime , pk, floatMinValue from ${DATA_REGION2} where ID <= ${PORTFOLIO_ID2}" + " and status='${STATUS2}'" + "\" --interactive=false";
       cmdResult = executeCommand(query);
@@ -548,15 +552,15 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     Double doubleKey = Double.valueOf("12432.235425");
     Double doubleValue = Double.valueOf("111111.111111");
 
-    getLogWriter().info("Testing Byte Wrappers");
+    LogWriterUtils.getLogWriter().info("Testing Byte Wrappers");
     testGetPutLocateEntryFromShellAndGemfire(byteKey, byteValue, Byte.class, true, true);
-    getLogWriter().info("Testing Short Wrappers");
+    LogWriterUtils.getLogWriter().info("Testing Short Wrappers");
     testGetPutLocateEntryFromShellAndGemfire(shortKey, shortValue, Short.class, true, true);
-    getLogWriter().info("Testing Integer Wrappers");
+    LogWriterUtils.getLogWriter().info("Testing Integer Wrappers");
     testGetPutLocateEntryFromShellAndGemfire(integerKey, integerValue, Integer.class, true, true);
-    getLogWriter().info("Testing Float Wrappers");
+    LogWriterUtils.getLogWriter().info("Testing Float Wrappers");
     testGetPutLocateEntryFromShellAndGemfire(floatKey, flaotValue, Float.class, true, true);
-    getLogWriter().info("Testing Double Wrappers");
+    LogWriterUtils.getLogWriter().info("Testing Double Wrappers");
     testGetPutLocateEntryFromShellAndGemfire(doubleKey, doubleValue, Double.class, true, true);
   }
 
@@ -758,7 +762,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     try {
       for (String col : expectedCols) {
         boolean found = false;
-        getLogWriter().info("Validating column " + col);
+        LogWriterUtils.getLogWriter().info("Validating column " + col);
         for (int i = 0; i < array.size(); i++) {
           String header = (String) array.get(i);
           if (col.equals(header)) found = true;
@@ -766,7 +770,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
         assertEquals(true, found);
       }
     } catch (GfJsonException e) {
-      fail("Error accessing table data", e);
+      Assert.fail("Error accessing table data", e);
     }
   }
 
@@ -779,7 +783,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     try {
       for (String col : expectedCols) {
         boolean found = false;
-        getLogWriter().info("Validating column " + col);
+        LogWriterUtils.getLogWriter().info("Validating column " + col);
         for (int i = 0; i < array.size(); i++) {
           String header = (String) array.get(i);
           if (col.equals(header)) found = true;
@@ -793,7 +797,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
 
       }
     } catch (GfJsonException e) {
-      fail("Error accessing table data", e);
+      Assert.fail("Error accessing table data", e);
     }
   }
 
@@ -813,7 +817,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
             assertEquals(cols.length, array.size());
             for (String col : cols) {
               boolean found = false;
-              getLogWriter().info("Validating column " + col);
+              LogWriterUtils.getLogWriter().info("Validating column " + col);
               for (int i = 0; i < array.size(); i++) {
                 String header = (String) array.get(i);
                 if (col.equals(header)) found = true;
@@ -821,7 +825,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
               assertEquals(true, found);
             }
           } catch (GfJsonException e) {
-            fail("Error accessing table data", e);
+            Assert.fail("Error accessing table data", e);
           }
         }
       }
@@ -1275,7 +1279,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       String valueJson = valueTemplate.replaceAll("\\?1", population);
       valueJson = valueJson.replaceAll("\\?2", area);
       valueJson = valueJson.replaceAll("\\?", keyString);
-      getLogWriter().info("Getting key with json key : " + keyJson);
+      LogWriterUtils.getLogWriter().info("Getting key with json key : " + keyJson);
       command = command + " " + "--key=" + keyJson + " --region=" + DATA_REGION_NAME_PATH + " --key-class=" + Key1.class.getCanonicalName();
       command = command + " --value-class=" + Value2.class.getCanonicalName();
       CommandResult cmdResult = executeCommand(command);
@@ -1304,7 +1308,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       String valueJson = valueTemplate.replaceAll("\\?1", population);
       valueJson = valueJson.replaceAll("\\?2", area);
       valueJson = valueJson.replaceAll("\\?", keyString);
-      getLogWriter().info("Getting key with json key : " + keyJson);
+      LogWriterUtils.getLogWriter().info("Getting key with json key : " + keyJson);
       command = command + " " + "--key=" + keyJson + " --region=" + DATA_REGION_NAME_PATH + " --key-class=" + Key1.class.getCanonicalName();
       command = command + " --value-class=" + Value2.class.getCanonicalName();
       CommandResult cmdResult = executeCommand(command);
@@ -1341,8 +1345,8 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       String valueJson = valueTemplate.replaceAll("\\?1", population);
       valueJson = valueJson.replaceAll("\\?2", area);
       valueJson = valueJson.replaceAll("\\?", keyString);
-      getLogWriter().info("Putting key with json key : " + keyJson);
-      getLogWriter().info("Putting key with json valye : " + valueJson);
+      LogWriterUtils.getLogWriter().info("Putting key with json key : " + keyJson);
+      LogWriterUtils.getLogWriter().info("Putting key with json valye : " + valueJson);
       command = command + " " + "--key=" + keyJson + " --value=" + valueJson + " --region=" + DATA_REGION_NAME_PATH;
       command = command + " --key-class=" + Key1.class.getCanonicalName() + " --value-class=" + Value2.class.getCanonicalName();
       ;
@@ -1370,8 +1374,8 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       valueJson = valueJson.replaceAll("\\?set", set);
       valueJson = valueJson.replaceAll("\\?map", map);
 
-      getLogWriter().info("Putting key with json key : " + keyJson);
-      getLogWriter().info("Putting key with json valye : " + valueJson);
+      LogWriterUtils.getLogWriter().info("Putting key with json key : " + keyJson);
+      LogWriterUtils.getLogWriter().info("Putting key with json valye : " + valueJson);
       command = command + " " + "--key=" + keyJson + " --value=" + valueJson + " --region=" + DATA_REGION_NAME_PATH;
       command = command + " --key-class=" + Key1.class.getCanonicalName() + " --value-class=" + Car.class.getCanonicalName();
       ;
@@ -1481,7 +1485,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       String command = "remove";
       String keyString = keyPrefix + i;
       String keyJson = keyTemplate.replaceAll("\\?", keyString);
-      getLogWriter().info("Removing key with json key : " + keyJson);
+      LogWriterUtils.getLogWriter().info("Removing key with json key : " + keyJson);
       command = command + " " + "--key=" + keyJson + " --region=" + DATA_REGION_NAME_PATH + " --key-class=" + Key1.class.getCanonicalName();
       CommandResult cmdResult = executeCommand(command);
       printCommandOutput(cmdResult);
@@ -1562,8 +1566,8 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       CommandResult cmdResult = executeCommand(commandString);
       String resultAsString = commandResultToString(cmdResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
-      getLogWriter().info("Command Output");
-      getLogWriter().info(resultAsString);
+      LogWriterUtils.getLogWriter().info("Command Output");
+      LogWriterUtils.getLogWriter().info(resultAsString);
 
       vm1.invoke(new SerializableRunnable() {
         public void run() {
@@ -1586,8 +1590,8 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       cmdResult = executeCommand(commandString);
       resultAsString = commandResultToString(cmdResult);
 
-      getLogWriter().info("Result of import data");
-      getLogWriter().info(resultAsString);
+      LogWriterUtils.getLogWriter().info("Result of import data");
+      LogWriterUtils.getLogWriter().info(resultAsString);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
 
       /**
@@ -1612,8 +1616,8 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
 
       cmdResult = executeCommand(commandString);
       resultAsString = commandResultToString(cmdResult);
-      getLogWriter().info("Result of import data with wrong region name");
-      getLogWriter().info(resultAsString);
+      LogWriterUtils.getLogWriter().info("Result of import data with wrong region name");
+      LogWriterUtils.getLogWriter().info(resultAsString);
       assertEquals(Result.Status.ERROR, cmdResult.getStatus());
 
       csb = new CommandStringBuilder(CliStrings.IMPORT_DATA);
@@ -1624,8 +1628,8 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
 
       cmdResult = executeCommand(commandString);
       resultAsString = commandResultToString(cmdResult);
-      getLogWriter().info("Result of import data with wrong file");
-      getLogWriter().info(resultAsString);
+      LogWriterUtils.getLogWriter().info("Result of import data with wrong file");
+      LogWriterUtils.getLogWriter().info(resultAsString);
       assertEquals(Result.Status.ERROR, cmdResult.getStatus());
 
     } finally {
@@ -1685,7 +1689,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
           final DistributedRegionMXBean bean = service.getDistributedRegionMXBean(
               Region.SEPARATOR + REBALANCE_REGION_NAME);
           if (bean == null) {
-            getLogWriter().info("Still probing for checkRegionMBeans ManagerMBean");
+            LogWriterUtils.getLogWriter().info("Still probing for checkRegionMBeans ManagerMBean");
             return false;
           } else {
             // verify that bean is proper before executing tests
@@ -1702,7 +1706,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
           return "Probing for testRebalanceCommandForSimulateWithNoMember ManagerMBean";
         }
       };
-      DistributedTestCase.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
+      Wait.waitForCriterion(waitForMaangerMBean, 2 * 60 * 1000, 2000, true);
       DistributedRegionMXBean bean = ManagementService.getManagementService(getCache()).getDistributedRegionMXBean(
           "/" + REBALANCE_REGION_NAME);
       assertNotNull(bean);
@@ -1715,13 +1719,13 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     //check if DistributedRegionMXBean is available so that command will not fail
     final VM manager = Host.getHost(0).getVM(0);
     manager.invoke(checkRegionMBeans);
-    getLogWriter().info("testRebalanceCommandForTimeOut verified Mbean and executin command");
+    LogWriterUtils.getLogWriter().info("testRebalanceCommandForTimeOut verified Mbean and executin command");
     String command = "rebalance --time-out=1";
     CommandResult cmdResult = executeCommand(command);
-    getLogWriter().info("testRebalanceCommandForTimeOut just after executing " + cmdResult);
+    LogWriterUtils.getLogWriter().info("testRebalanceCommandForTimeOut just after executing " + cmdResult);
     if (cmdResult != null) {
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testRebalanceCommandForTimeOut stringResult : " + stringResult);
+      LogWriterUtils.getLogWriter().info("testRebalanceCommandForTimeOut stringResult : " + stringResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testRebalanceCommandForTimeOut failed as did not get CommandResult");
@@ -1735,16 +1739,16 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     final VM manager = Host.getHost(0).getVM(0);
     manager.invoke(checkRegionMBeans);
 
-    getLogWriter().info("testRebalanceCommandForTimeOutForRegion verified Mbean and executin command");
+    LogWriterUtils.getLogWriter().info("testRebalanceCommandForTimeOutForRegion verified Mbean and executin command");
 
     String command = "rebalance --time-out=1 --include-region=" + "/" + REBALANCE_REGION_NAME;
     CommandResult cmdResult = executeCommand(command);
 
-    getLogWriter().info("testRebalanceCommandForTimeOutForRegion just after executing " + cmdResult);
+    LogWriterUtils.getLogWriter().info("testRebalanceCommandForTimeOutForRegion just after executing " + cmdResult);
 
     if (cmdResult != null) {
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testRebalanceCommandForTimeOutForRegion stringResult : " + stringResult);
+      LogWriterUtils.getLogWriter().info("testRebalanceCommandForTimeOutForRegion stringResult : " + stringResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testRebalanceCommandForTimeOut failed as did not get CommandResult");
@@ -1758,13 +1762,13 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     final VM manager = Host.getHost(0).getVM(0);
     manager.invoke(checkRegionMBeans);
 
-    getLogWriter().info("testRebalanceCommandForSimulate verified Mbean and executin command");
+    LogWriterUtils.getLogWriter().info("testRebalanceCommandForSimulate verified Mbean and executin command");
     String command = "rebalance --simulate=true --include-region=" + "/" + REBALANCE_REGION_NAME;
     CommandResult cmdResult = executeCommand(command);
-    getLogWriter().info("testRebalanceCommandForSimulate just after executing " + cmdResult);
+    LogWriterUtils.getLogWriter().info("testRebalanceCommandForSimulate just after executing " + cmdResult);
     if (cmdResult != null) {
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testRebalanceCommandForSimulate stringResult : " + stringResult);
+      LogWriterUtils.getLogWriter().info("testRebalanceCommandForSimulate stringResult : " + stringResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testRebalanceCommandForSimulate failed as did not get CommandResult");
@@ -1778,16 +1782,16 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     final VM manager = Host.getHost(0).getVM(0);
     manager.invoke(checkRegionMBeans);
 
-    getLogWriter().info("testRebalanceCommandForSimulateWithNoMember verified Mbean and executin command");
+    LogWriterUtils.getLogWriter().info("testRebalanceCommandForSimulateWithNoMember verified Mbean and executin command");
 
     String command = "rebalance --simulate=true";
     CommandResult cmdResult = executeCommand(command);
 
-    getLogWriter().info("testRebalanceCommandForSimulateWithNoMember just after executing " + cmdResult);
+    LogWriterUtils.getLogWriter().info("testRebalanceCommandForSimulateWithNoMember just after executing " + cmdResult);
 
     if (cmdResult != null) {
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testRebalanceCommandForSimulateWithNoMember stringResult : " + stringResult);
+      LogWriterUtils.getLogWriter().info("testRebalanceCommandForSimulateWithNoMember stringResult : " + stringResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testRebalanceCommandForSimulateWithNoMember failed as did not get CommandResult");
@@ -1801,13 +1805,13 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     //check if DistributedRegionMXBean is available so that command will not fail
     final VM manager = Host.getHost(0).getVM(0);
     manager.invoke(checkRegionMBeans);
-    getLogWriter().info("testRebalanceForIncludeRegionFunction verified Mbean and executin command");
+    LogWriterUtils.getLogWriter().info("testRebalanceForIncludeRegionFunction verified Mbean and executin command");
     String command = "rebalance --include-region=" + "/" + REBALANCE_REGION_NAME + ",/" + REBALANCE_REGION2_NAME;
     CommandResult cmdResult = executeCommand(command);
-    getLogWriter().info("testRebalanceForIncludeRegionFunction just after executing " + cmdResult);
+    LogWriterUtils.getLogWriter().info("testRebalanceForIncludeRegionFunction just after executing " + cmdResult);
     if (cmdResult != null) {
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testRebalanceForIncludeRegionFunction stringResult : " + stringResult);
+      LogWriterUtils.getLogWriter().info("testRebalanceForIncludeRegionFunction stringResult : " + stringResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testRebalanceForIncludeRegionFunction failed as did not get CommandResult");
@@ -1820,17 +1824,17 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     final VM manager = Host.getHost(0).getVM(0);
     manager.invoke(checkRegionMBeans);
 
-    getLogWriter().info("testSimulateForEntireDS verified Mbean and executin command");
+    LogWriterUtils.getLogWriter().info("testSimulateForEntireDS verified Mbean and executin command");
 
     String command = "rebalance --simulate=true";
 
     CommandResult cmdResult = executeCommand(command);
 
-    getLogWriter().info("testSimulateForEntireDS just after executing " + cmdResult);
+    LogWriterUtils.getLogWriter().info("testSimulateForEntireDS just after executing " + cmdResult);
 
     if (cmdResult != null) {
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testSimulateForEntireDS stringResult : " + stringResult);
+      LogWriterUtils.getLogWriter().info("testSimulateForEntireDS stringResult : " + stringResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testRebalanceForIncludeRegionFunction failed as did not get CommandResult");
@@ -1842,13 +1846,13 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     //check if DistributedRegionMXBean is available so that command will not fail
     final VM manager = Host.getHost(0).getVM(0);
     manager.invoke(checkRegionMBeans);
-    getLogWriter().info("testRebalanceForEntireDS verified Mbean and executin command");
+    LogWriterUtils.getLogWriter().info("testRebalanceForEntireDS verified Mbean and executin command");
     String command = "rebalance";
     CommandResult cmdResult = executeCommand(command);
-    getLogWriter().info("testRebalanceForEntireDS just after executing " + cmdResult);
+    LogWriterUtils.getLogWriter().info("testRebalanceForEntireDS just after executing " + cmdResult);
     if (cmdResult != null) {
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testRebalanceForEntireDS stringResult : " + stringResult);
+      LogWriterUtils.getLogWriter().info("testRebalanceForEntireDS stringResult : " + stringResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testRebalanceForIncludeRegionFunction failed as did not get CommandResult");
@@ -1899,14 +1903,14 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
 
   private static void printCommandOutput(CommandResult cmdResult) {
     assertNotNull(cmdResult);
-    getLogWriter().info("Command Output : ");
+    LogWriterUtils.getLogWriter().info("Command Output : ");
     StringBuilder sb = new StringBuilder();
     cmdResult.resetToFirstLine();
     while (cmdResult.hasNextLine()) {
       sb.append(cmdResult.nextLine()).append(DataCommandRequest.NEW_LINE);
     }
-    getLogWriter().info(sb.toString());
-    getLogWriter().info("");
+    LogWriterUtils.getLogWriter().info(sb.toString());
+    LogWriterUtils.getLogWriter().info("");
   }
 
   public static class Value1WithValue2 extends Value1 {
@@ -1933,15 +1937,15 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
     final VM manager = Host.getHost(0).getVM(0);
     manager.invoke(checkRegionMBeans);
 
-    getLogWriter().info("testRebalanceForExcludeRegionFunction verified Mbean and executing command");
+    LogWriterUtils.getLogWriter().info("testRebalanceForExcludeRegionFunction verified Mbean and executing command");
 
     String command = "rebalance --exclude-region=" + "/" + REBALANCE_REGION2_NAME;
-    getLogWriter().info("testRebalanceForExcludeRegionFunction command : " + command);
+    LogWriterUtils.getLogWriter().info("testRebalanceForExcludeRegionFunction command : " + command);
     CommandResult cmdResult = executeCommand(command);
-    getLogWriter().info("testRebalanceForExcludeRegionFunction just after executing " + cmdResult);
+    LogWriterUtils.getLogWriter().info("testRebalanceForExcludeRegionFunction just after executing " + cmdResult);
     if (cmdResult != null) {
       String stringResult = commandResultToString(cmdResult);
-      getLogWriter().info("testRebalanceForExcludeRegionFunction stringResult : " + stringResult);
+      LogWriterUtils.getLogWriter().info("testRebalanceForExcludeRegionFunction stringResult : " + stringResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testRebalanceForIncludeRegionFunction failed as did not get CommandResult");
@@ -1964,19 +1968,19 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
             ManagerMXBean bean1 = service.getManagerMXBean();
             DistributedRegionMXBean bean2 = service.getDistributedRegionMXBean(regionName);
             if (bean1 == null) {
-              getLogWriter().info("waitForListClientMbean Still probing for ManagerMBean");
+              LogWriterUtils.getLogWriter().info("waitForListClientMbean Still probing for ManagerMBean");
               return false;
             } else {
-              getLogWriter().info("waitForListClientMbean Still probing for DistributedRegionMXBean=" + bean2);
+              LogWriterUtils.getLogWriter().info("waitForListClientMbean Still probing for DistributedRegionMXBean=" + bean2);
               if (bean2 == null) {
                 bean2 = service.getDistributedRegionMXBean(Region.SEPARATOR + regionName);
               }
               if (bean2 == null) {
-                getLogWriter().info(
+                LogWriterUtils.getLogWriter().info(
                     "waitForListClientMbean Still probing for DistributedRegionMXBean with separator = " + bean2);
                 return false;
               } else {
-                getLogWriter().info(
+                LogWriterUtils.getLogWriter().info(
                     "waitForListClientMbean Still probing for DistributedRegionMXBean with separator Not null  " + bean2.getMembers().length);
                 if (bean2.getMembers().length > 1) {
                   return true;
@@ -1993,7 +1997,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
           }
         };
 
-        DistributedTestCase.waitForCriterion(waitForMaangerMBean, 30000, 2000, true);
+        Wait.waitForCriterion(waitForMaangerMBean, 30000, 2000, true);
         DistributedRegionMXBean bean = service.getDistributedRegionMXBean(regionName);
         if (bean == null) {
           bean = service.getDistributedRegionMXBean(Region.SEPARATOR + regionName);
@@ -2029,7 +2033,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       }
     });
 
-    getLogWriter().info("testRegionsViaMbeanAndFunctions memSizeFromMbean= " + memSizeFromMbean);
+    LogWriterUtils.getLogWriter().info("testRegionsViaMbeanAndFunctions memSizeFromMbean= " + memSizeFromMbean);
 
     String memSizeFromFunctionCall = (String) manager.invoke(new SerializableCallable() {
       public Object call() {
@@ -2039,7 +2043,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       }
     });
 
-    getLogWriter().info("testRegionsViaMbeanAndFunctions memSizeFromFunctionCall= " + memSizeFromFunctionCall);
+    LogWriterUtils.getLogWriter().info("testRegionsViaMbeanAndFunctions memSizeFromFunctionCall= " + memSizeFromFunctionCall);
     assertTrue(memSizeFromFunctionCall.equals(memSizeFromMbean));
   }
 
@@ -2068,7 +2072,7 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       }
     });
 
-    getLogWriter().info("testRegionsViaMbeanAndFunctionsForPartRgn memSizeFromMbean= " + memSizeFromMbean);
+    LogWriterUtils.getLogWriter().info("testRegionsViaMbeanAndFunctionsForPartRgn memSizeFromMbean= " + memSizeFromMbean);
 
     String memSizeFromFunctionCall = (String) manager.invoke(new SerializableCallable() {
       public Object call() {
@@ -2077,12 +2081,8 @@ public class GemfireDataCommandsDUnitTest extends CliCommandTestBase {
       }
     });
 
-    getLogWriter().info(
+    LogWriterUtils.getLogWriter().info(
         "testRegionsViaMbeanAndFunctionsForPartRgn memSizeFromFunctionCall= " + memSizeFromFunctionCall);
     assertTrue(memSizeFromFunctionCall.equals(memSizeFromMbean));
   }
-
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest.java
index f092196..7f161fe 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest.java
@@ -39,11 +39,13 @@ import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData;
 import com.gemstone.gemfire.management.internal.cli.result.ResultData;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * The GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest class is test suite of test cases testing the Gfsh
@@ -172,7 +174,7 @@ public class GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest extends C
           }
         };
 
-        DistributedTestCase.waitForCriterion(waitOnManagerCriterion, 30000, 2000, true);
+        Wait.waitForCriterion(waitOnManagerCriterion, 30000, 2000, true);
       }
     });
   }
@@ -195,7 +197,7 @@ public class GetCommandOnRegionWithCacheLoaderDuringCacheMissDUnitTest extends C
 
   protected void log(final String tag, final String message) {
     //System.out.printf("%1$s (%2$s)%n", tag, message);
-    getLogWriter().info(String.format("%1$s (%2$s)%n", tag, message));
+    LogWriterUtils.getLogWriter().info(String.format("%1$s (%2$s)%n", tag, message));
   }
 
   protected CommandResult runCommand(final String command) {

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/IndexCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/IndexCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/IndexCommandsDUnitTest.java
index 99ba73d..431c6b3 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/IndexCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/IndexCommandsDUnitTest.java
@@ -35,11 +35,14 @@ import com.gemstone.gemfire.management.internal.cli.domain.Stock;
 import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 import java.io.File;
 import java.io.IOException;
@@ -604,7 +607,7 @@ public class IndexCommandsDUnitTest extends CliCommandTestBase {
           final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locatorPort, locatorLogFile, null,
               locatorProps);
 
-          DistributedTestCase.WaitCriterion wc = new DistributedTestCase.WaitCriterion() {
+          WaitCriterion wc = new WaitCriterion() {
             @Override
             public boolean done() {
               return locator.isSharedConfigurationRunning();
@@ -615,7 +618,7 @@ public class IndexCommandsDUnitTest extends CliCommandTestBase {
               return "Waiting for shared configuration to be started";
             }
           };
-          DistributedTestCase.waitForCriterion(wc, 5000, 500, true);
+          Wait.waitForCriterion(wc, 5000, 500, true);
         } catch (IOException ioex) {
           fail("Unable to create a locator with a shared configuration");
         }
@@ -664,7 +667,7 @@ public class IndexCommandsDUnitTest extends CliCommandTestBase {
           xmlFromConfig = sharedConfig.getConfiguration(groupName).getCacheXmlContent();
           assertTrue(xmlFromConfig.contains(indexName));
         } catch (Exception e) {
-          fail("Error occurred in cluster configuration service", e);
+          Assert.fail("Error occurred in cluster configuration service", e);
         }
       }
     });
@@ -709,7 +712,7 @@ public class IndexCommandsDUnitTest extends CliCommandTestBase {
           xmlFromConfig = sharedConfig.getConfiguration(groupName).getCacheXmlContent();
           assertFalse(xmlFromConfig.contains(indexName));
         } catch (Exception e) {
-          fail("Error occurred in cluster configuration service", e);
+          Assert.fail("Error occurred in cluster configuration service", e);
         }
       }
     });
@@ -738,8 +741,8 @@ public class IndexCommandsDUnitTest extends CliCommandTestBase {
   }
 
   private void writeToLog(String text, String resultAsString) {
-    getLogWriter().info(testName + "\n");
-    getLogWriter().info(resultAsString);
+    LogWriterUtils.getLogWriter().info(getTestMethodName() + "\n");
+    LogWriterUtils.getLogWriter().info(resultAsString);
   }
 
   private void setupSystem() {
@@ -809,9 +812,4 @@ public class IndexCommandsDUnitTest extends CliCommandTestBase {
       }
     });
   }
-
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeDiskStoreCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeDiskStoreCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeDiskStoreCommandsDUnitTest.java
index acb7759..386b8ed 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeDiskStoreCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeDiskStoreCommandsDUnitTest.java
@@ -29,6 +29,7 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.management.cli.Result;
 import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -68,11 +69,6 @@ public class ListAndDescribeDiskStoreCommandsDUnitTest extends CliCommandTestBas
     setupGemFire();
   }
 
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   protected Peer createPeer(final Properties distributedSystemConfiguration, final VM vm) {
     return new Peer(distributedSystemConfiguration, vm);
   }
@@ -95,7 +91,7 @@ public class ListAndDescribeDiskStoreCommandsDUnitTest extends CliCommandTestBas
   protected Properties createDistributedSystemProperties(final String gemfireName) {
     final Properties distributedSystemProperties = new Properties();
 
-    distributedSystemProperties.setProperty(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    distributedSystemProperties.setProperty(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
     distributedSystemProperties.setProperty(DistributionConfig.NAME_NAME, gemfireName);
 
     return distributedSystemProperties;
@@ -131,7 +127,7 @@ public class ListAndDescribeDiskStoreCommandsDUnitTest extends CliCommandTestBas
     final Result result = executeCommand(CliStrings.LIST_DISK_STORE);
 
     assertNotNull(result);
-    getLogWriter().info(toString(result));
+    LogWriterUtils.getLogWriter().info(toString(result));
     assertEquals(Result.Status.OK, result.getStatus());
   }
 
@@ -140,7 +136,7 @@ public class ListAndDescribeDiskStoreCommandsDUnitTest extends CliCommandTestBas
         CliStrings.DESCRIBE_DISK_STORE + " --member=producerServer --name=producerData");
 
     assertNotNull(result);
-    getLogWriter().info(toString(result));
+    LogWriterUtils.getLogWriter().info(toString(result));
     assertEquals(Result.Status.OK, result.getStatus());
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeRegionDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeRegionDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeRegionDUnitTest.java
index a81a085..2d11491 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeRegionDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListAndDescribeRegionDUnitTest.java
@@ -35,6 +35,7 @@ import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
 import com.gemstone.gemfire.management.internal.cli.util.RegionAttributesNames;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -192,8 +193,8 @@ public class ListAndDescribeRegionDUnitTest extends CliCommandTestBase {
     String commandString = csb.toString();
     CommandResult commandResult = executeCommand(commandString);
     String commandResultAsString = commandResultToString(commandResult);
-    getLogWriter().info("Command String : " + commandString);
-    getLogWriter().info("Output : \n" + commandResultAsString);
+    LogWriterUtils.getLogWriter().info("Command String : " + commandString);
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultAsString);
     assertEquals(Status.OK, commandResult.getStatus());
     assertTrue(commandResultAsString.contains(PR1));
     assertTrue(commandResultAsString.contains(LOCALREGIONONMANAGER));
@@ -207,8 +208,8 @@ public class ListAndDescribeRegionDUnitTest extends CliCommandTestBase {
     commandString = csb.toString();
     commandResult = executeCommand(commandString);
     commandResultAsString = commandResultToString(commandResult);
-    getLogWriter().info("Command String : " + commandString);
-    getLogWriter().info("Output : \n" + commandResultAsString);
+    LogWriterUtils.getLogWriter().info("Command String : " + commandString);
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultAsString);
     assertEquals(Status.OK, commandResult.getStatus());
     assertTrue(commandResultAsString.contains(PR1));
     assertTrue(commandResultAsString.contains(LOCALREGIONONMANAGER));
@@ -218,8 +219,8 @@ public class ListAndDescribeRegionDUnitTest extends CliCommandTestBase {
     commandString = csb.toString();
     commandResult = executeCommand(commandString);
     commandResultAsString = commandResultToString(commandResult);
-    getLogWriter().info("Command String : " + commandString);
-    getLogWriter().info("Output : \n" + commandResultAsString);
+    LogWriterUtils.getLogWriter().info("Command String : " + commandString);
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultAsString);
     assertEquals(Status.OK, commandResult.getStatus());
     assertTrue(commandResultAsString.contains(PR1));
     assertTrue(commandResultAsString.contains(REGION1));
@@ -232,8 +233,8 @@ public class ListAndDescribeRegionDUnitTest extends CliCommandTestBase {
     commandString = csb.toString();
     commandResult = executeCommand(commandString);
     commandResultAsString = commandResultToString(commandResult);
-    getLogWriter().info("Command String : " + commandString);
-    getLogWriter().info("Output : \n" + commandResultAsString);
+    LogWriterUtils.getLogWriter().info("Command String : " + commandString);
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultAsString);
     assertEquals(Status.OK, commandResult.getStatus());
     assertTrue(commandResultAsString.contains(PR1));
     assertTrue(commandResultAsString.contains(LOCALREGIONONMANAGER));
@@ -243,8 +244,8 @@ public class ListAndDescribeRegionDUnitTest extends CliCommandTestBase {
     commandString = csb.toString();
     commandResult = executeCommand(commandString);
     commandResultAsString = commandResultToString(commandResult);
-    getLogWriter().info("Command String : " + commandString);
-    getLogWriter().info("Output : \n" + commandResultAsString);
+    LogWriterUtils.getLogWriter().info("Command String : " + commandString);
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultAsString);
     assertEquals(Status.OK, commandResult.getStatus());
     assertTrue(commandResultAsString.contains(PR1));
     assertTrue(commandResultAsString.contains(REGION1));
@@ -260,8 +261,8 @@ public class ListAndDescribeRegionDUnitTest extends CliCommandTestBase {
     String commandString = csb.toString();
     CommandResult commandResult = executeCommand(commandString);
     String commandResultAsString = commandResultToString(commandResult);
-    getLogWriter().info("Command String : " + commandString);
-    getLogWriter().info("Output : \n" + commandResultAsString);
+    LogWriterUtils.getLogWriter().info("Command String : " + commandString);
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultAsString);
     assertEquals(Status.OK, commandResult.getStatus());
     assertTrue(commandResultAsString.contains(PR1));
     assertTrue(commandResultAsString.contains("Server1"));
@@ -271,8 +272,8 @@ public class ListAndDescribeRegionDUnitTest extends CliCommandTestBase {
     commandString = csb.toString();
     commandResult = executeCommand(commandString);
     commandResultAsString = commandResultToString(commandResult);
-    getLogWriter().info("Command String : " + commandString);
-    getLogWriter().info("Output : \n" + commandResultAsString);
+    LogWriterUtils.getLogWriter().info("Command String : " + commandString);
+    LogWriterUtils.getLogWriter().info("Output : \n" + commandResultAsString);
     assertEquals(Status.OK, commandResult.getStatus());
     assertTrue(commandResultAsString.contains(LOCALREGIONONMANAGER));
     assertTrue(commandResultAsString.contains("Manager"));

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListIndexCommandDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListIndexCommandDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListIndexCommandDUnitTest.java
index 399e89c..b7c42dc 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListIndexCommandDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/ListIndexCommandDUnitTest.java
@@ -45,6 +45,7 @@ import com.gemstone.gemfire.management.cli.Result;
 import com.gemstone.gemfire.management.internal.cli.domain.IndexDetails;
 import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnableIF;
 import com.gemstone.gemfire.test.dunit.VM;
@@ -90,11 +91,6 @@ public class ListIndexCommandDUnitTest extends CliCommandTestBase {
     setupGemFire();
   }
 
-  @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
-  }
-
   protected Index createIndex(final String name, final String indexedExpression, final String fromClause) {
     return createIndex(name, IndexType.FUNCTIONAL, indexedExpression, fromClause);
   }
@@ -142,7 +138,7 @@ public class ListIndexCommandDUnitTest extends CliCommandTestBase {
   protected Properties createDistributedSystemProperties(final String gemfireName) {
     final Properties distributedSystemProperties = new Properties();
 
-    distributedSystemProperties.setProperty(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    distributedSystemProperties.setProperty(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
     distributedSystemProperties.setProperty(DistributionConfig.NAME_NAME, gemfireName);
 
     return distributedSystemProperties;
@@ -180,7 +176,7 @@ public class ListIndexCommandDUnitTest extends CliCommandTestBase {
               }
             }
           } catch (Exception e) {
-            getLogWriter().error(
+            LogWriterUtils.getLogWriter().error(
                 String.format("Error occurred creating Index (%1$s) on Region (%2$s) - (%3$s)", indexName,
                     region.getFullPath(), e.getMessage()));
           }
@@ -282,11 +278,11 @@ public class ListIndexCommandDUnitTest extends CliCommandTestBase {
   @SuppressWarnings("unchecked")
   protected <T extends Comparable<T>, B extends AbstractBean<T>> B query(final Cache cache, final String queryString) {
     try {
-      getLogWriter().info(String.format("Running Query (%1$s) in GemFire...", queryString));
+      LogWriterUtils.getLogWriter().info(String.format("Running Query (%1$s) in GemFire...", queryString));
 
       final SelectResults<B> results = (SelectResults<B>) cache.getQueryService().newQuery(queryString).execute();
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           String.format("Running Query (%1$s) in GemFire returned (%2$d) result(s).", queryString, results.size()));
 
       return (results.iterator().hasNext() ? results.iterator().next() : null);
@@ -298,12 +294,12 @@ public class ListIndexCommandDUnitTest extends CliCommandTestBase {
   protected <T extends Comparable<T>, B extends AbstractBean<T>> B query(final Region<T, B> region,
       final String queryPredicate) {
     try {
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           String.format("Running Query (%1$s) on Region (%2$s)...", queryPredicate, region.getFullPath()));
 
       final SelectResults<B> results = region.query(queryPredicate);
 
-      getLogWriter().info(
+      LogWriterUtils.getLogWriter().info(
           String.format("Running Query (%1$s) on Region (%2$s) returned (%3$d) result(s).", queryPredicate,
               region.getFullPath(), results.size()));
 
@@ -319,7 +315,7 @@ public class ListIndexCommandDUnitTest extends CliCommandTestBase {
     final Result result = executeCommand(CliStrings.LIST_INDEX + " --" + CliStrings.LIST_INDEX__STATS);
 
     assertNotNull(result);
-    getLogWriter().info(toString(result));
+    LogWriterUtils.getLogWriter().info(toString(result));
     assertEquals(Result.Status.OK, result.getStatus());
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MemberCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MemberCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MemberCommandsDUnitTest.java
index a000053..7d6efc5 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MemberCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MemberCommandsDUnitTest.java
@@ -39,6 +39,8 @@ import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings;
 import com.gemstone.gemfire.management.internal.cli.remote.CommandProcessor;
 import com.gemstone.gemfire.management.internal.cli.util.CommandStringBuilder;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -79,8 +81,7 @@ public class MemberCommandsDUnitTest extends CacheTestCase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    super.tearDown2();
+  protected final void postTearDownCacheTestCase() throws Exception {
     disconnectFromDS();
     CliUtil.isGfshVM = true;
   }
@@ -180,7 +181,7 @@ public class MemberCommandsDUnitTest extends CacheTestCase {
     Properties props = new Properties();
 
     props.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
-    props.setProperty(DistributionConfig.LOCATORS_NAME, getServerHostName(host) + "[" + locatorPort + "]");
+    props.setProperty(DistributionConfig.LOCATORS_NAME, NetworkUtils.getServerHostName(host) + "[" + locatorPort + "]");
     props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "info");
     props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
     props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
@@ -207,7 +208,7 @@ public class MemberCommandsDUnitTest extends CacheTestCase {
     setupSystem();
     CommandProcessor commandProcessor = new CommandProcessor();
     Result result = commandProcessor.createCommandStatement(CliStrings.LIST_MEMBER, EMPTY_ENV).process();
-    getLogWriter().info("#SB" + getResultAsString(result));
+    LogWriterUtils.getLogWriter().info("#SB" + getResultAsString(result));
     assertEquals(true, result.getStatus().equals(Status.OK));
   }
 
@@ -230,7 +231,7 @@ public class MemberCommandsDUnitTest extends CacheTestCase {
       CommandProcessor commandProcessor = new CommandProcessor();
       Result result = commandProcessor.createCommandStatement(CliStrings.LIST_MEMBER, EMPTY_ENV).process();
 
-      getLogWriter().info("#SB" + getResultAsString(result));
+      LogWriterUtils.getLogWriter().info("#SB" + getResultAsString(result));
       assertEquals(true, result.getStatus().equals(Status.ERROR));
     } finally {
       locator.stop(); // fix for bug 46562
@@ -249,7 +250,7 @@ public class MemberCommandsDUnitTest extends CacheTestCase {
     CommandStringBuilder csb = new CommandStringBuilder(CliStrings.LIST_MEMBER);
     csb.addOption(CliStrings.LIST_MEMBER__GROUP, "G1");
     Result result = commandProcessor.createCommandStatement(csb.toString(), EMPTY_ENV).process();
-    getLogWriter().info("#SB" + getResultAsString(result));
+    LogWriterUtils.getLogWriter().info("#SB" + getResultAsString(result));
     assertEquals(true, result.getStatus().equals(Status.OK));
   }
 
@@ -272,7 +273,7 @@ public class MemberCommandsDUnitTest extends CacheTestCase {
       Result result = commandProcessor.createCommandStatement("describe member --name=" + member.getId(),
           EMPTY_ENV).process();
       assertEquals(true, result.getStatus().equals(Status.OK));
-      getLogWriter().info("#SB" + getResultAsString(result));
+      LogWriterUtils.getLogWriter().info("#SB" + getResultAsString(result));
       //assertEquals(true, result.getStatus().equals(Status.OK));
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsDUnitTest.java
index c449a18..841c960 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsDUnitTest.java
@@ -33,9 +33,14 @@ import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData;
 import com.gemstone.gemfire.management.internal.cli.result.CompositeResultData.SectionResultData;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 import com.gemstone.gemfire.management.internal.cli.result.ResultBuilder;
 import com.gemstone.gemfire.management.internal.cli.result.ResultData;
 import com.gemstone.gemfire.management.internal.cli.result.TabularResultData;
@@ -61,8 +66,8 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
   }
 
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable("reset log level") {
+  protected final void preTearDownCliCommandTestBase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable("reset log level") {
       public void run() {
         if (cachedLogLevel != null) {
           System.setProperty("gemfire.log-level", cachedLogLevel);
@@ -82,7 +87,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
     cmdResult.resetToFirstLine();
     if (cmdResult != null) {
       String cmdResultStr = commandResultToString(cmdResult);
-      getLogWriter().info("testGCForGroup cmdResultStr=" + cmdResultStr + "; cmdResult=" + cmdResult);
+      LogWriterUtils.getLogWriter().info("testGCForGroup cmdResultStr=" + cmdResultStr + "; cmdResult=" + cmdResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
       if (cmdResult.getType().equals(ResultData.TYPE_TABULAR)) {
         TabularResultData table = (TabularResultData) cmdResult.getResultData();
@@ -110,7 +115,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
     cmdResult.resetToFirstLine();
     if (cmdResult != null) {
       String cmdResultStr = commandResultToString(cmdResult);
-      getLogWriter().info("testGCForMemberID cmdResultStr=" + cmdResultStr);
+      LogWriterUtils.getLogWriter().info("testGCForMemberID cmdResultStr=" + cmdResultStr);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
       if (cmdResult.getType().equals(ResultData.TYPE_TABULAR)) {
         TabularResultData table = (TabularResultData) cmdResult.getResultData();
@@ -136,7 +141,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
       if (cmdResult != null) {
         String log = commandResultToString(cmdResult);
         assertNotNull(log);
-        getLogWriter().info("Show Log is" + log);
+        LogWriterUtils.getLogWriter().info("Show Log is" + log);
         assertEquals(Result.Status.OK, cmdResult.getStatus());
       } else {
         fail("testShowLog failed as did not get CommandResult");
@@ -158,7 +163,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
       if (cmdResult != null) {
         String log = commandResultToString(cmdResult);
         assertNotNull(log);
-        getLogWriter().info("Show Log is" + log);
+        LogWriterUtils.getLogWriter().info("Show Log is" + log);
         assertEquals(Result.Status.OK, cmdResult.getStatus());
       } else {
         fail("testShowLog failed as did not get CommandResult");
@@ -175,7 +180,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
     cmdResult.resetToFirstLine();
     if (cmdResult != null) {
       String cmdResultStr = commandResultToString(cmdResult);
-      getLogWriter().info("testGCForEntireCluster cmdResultStr=" + cmdResultStr + "; cmdResult=" + cmdResult);
+      LogWriterUtils.getLogWriter().info("testGCForEntireCluster cmdResultStr=" + cmdResultStr + "; cmdResult=" + cmdResult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
       if (cmdResult.getType().equals(ResultData.TYPE_TABULAR)) {
         TabularResultData table = (TabularResultData) cmdResult.getResultData();
@@ -222,7 +227,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
 
   public void testShutDownWithoutTimeout() {
 
-    addExpectedException("EntryDestroyedException");
+    IgnoredException.addIgnoredException("EntryDestroyedException");
 
     setupForShutDown();
     ThreadUtils.sleep(2500);
@@ -232,7 +237,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
 
     if (cmdResult != null) {
       String cmdResultStr = commandResultToString(cmdResult);
-      getLogWriter().info("testShutDownWithoutTimeout cmdResultStr=" + cmdResultStr);
+      LogWriterUtils.getLogWriter().info("testShutDownWithoutTimeout cmdResultStr=" + cmdResultStr);
     }
 
     verifyShutDown();
@@ -241,7 +246,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
 
     // Need for the Gfsh HTTP enablement during shutdown to properly assess the
     // state of the connection.
-    waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       public boolean done() {
         return !defaultShell.isConnectedAndReady();
       }
@@ -259,14 +264,14 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
     setupForShutDown();
     ThreadUtils.sleep(2500);
 
-    addExpectedException("EntryDestroyedException");
+    IgnoredException.addIgnoredException("EntryDestroyedException");
 
     String command = "shutdown --time-out=15";
     CommandResult cmdResult = executeCommand(command);
 
     if (cmdResult != null) {
       String cmdResultStr = commandResultToString(cmdResult);
-      getLogWriter().info("testShutDownWithTIMEOUT cmdResultStr=" + cmdResultStr);
+      LogWriterUtils.getLogWriter().info("testShutDownWithTIMEOUT cmdResultStr=" + cmdResultStr);
     }
 
     verifyShutDown();
@@ -274,7 +279,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
     final HeadlessGfsh defaultShell = getDefaultShell();
 
     // Need for the Gfsh HTTP enablement during shutdown to properly assess the state of the connection.
-    waitForCriterion(new WaitCriterion() {
+    Wait.waitForCriterion(new WaitCriterion() {
       public boolean done() {
         return !defaultShell.isConnectedAndReady();
       }
@@ -303,7 +308,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
 
     if (cmdResult != null) {
       String cmdResultStr = commandResultToString(cmdResult);
-      getLogWriter().info("testShutDownForTIMEOUT cmdResultStr = " + cmdResultStr);
+      LogWriterUtils.getLogWriter().info("testShutDownForTIMEOUT cmdResultStr = " + cmdResultStr);
       CommandResult result = (CommandResult) ResultBuilder.createInfoResult(CliStrings.SHUTDOWN_TIMEDOUT);
       String expectedResult = commandResultToString(result);
       assertEquals(expectedResult, cmdResultStr);
@@ -383,7 +388,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
         return "Wait for gfsh to get disconnected from Manager.";
       }
     };
-    waitForCriterion(waitCriterion, 5000, 200, true);
+    Wait.waitForCriterion(waitCriterion, 5000, 200, true);
 
     assertTrue(Boolean.FALSE.equals(vm1.invoke(connectedChecker)));
     assertTrue(Boolean.FALSE.equals(vm0.invoke(connectedChecker)));
@@ -414,7 +419,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
     String commandString = CliStrings.CHANGE_LOGLEVEL + " --" + CliStrings.CHANGE_LOGLEVEL__LOGLEVEL + "=finer" + " --" + CliStrings.CHANGE_LOGLEVEL__MEMBER + "=" + serverName1 + "," + serverName2;
 
     CommandResult commandResult = executeCommand(commandString);
-    getLogWriter().info("testChangeLogLevel commandResult=" + commandResult);
+    LogWriterUtils.getLogWriter().info("testChangeLogLevel commandResult=" + commandResult);
     assertTrue(Status.OK.equals(commandResult.getStatus()));
     CompositeResultData resultData = (CompositeResultData) commandResult.getResultData();
     SectionResultData section = resultData.retrieveSection("section");
@@ -470,7 +475,7 @@ public class MiscellaneousCommandsDUnitTest extends CliCommandTestBase {
     String commandString = CliStrings.CHANGE_LOGLEVEL + " --" + CliStrings.CHANGE_LOGLEVEL__LOGLEVEL + "=finer" + " --" + CliStrings.CHANGE_LOGLEVEL__GROUPS + "=" + grp1 + "," + grp2;
 
     CommandResult commandResult = executeCommand(commandString);
-    getLogWriter().info("testChangeLogLevelForGrps commandResult=" + commandResult);
+    LogWriterUtils.getLogWriter().info("testChangeLogLevelForGrps commandResult=" + commandResult);
 
     assertTrue(Status.OK.equals(commandResult.getStatus()));
 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart1DUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart1DUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart1DUnitTest.java
index 1bd8999..c8928fa 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart1DUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/commands/MiscellaneousCommandsExportLogsPart1DUnitTest.java
@@ -25,6 +25,7 @@ import com.gemstone.gemfire.internal.logging.LogWriterImpl;
 import com.gemstone.gemfire.management.cli.Result;
 import com.gemstone.gemfire.management.internal.cli.result.CommandResult;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
 
@@ -95,11 +96,11 @@ public class MiscellaneousCommandsExportLogsPart1DUnitTest extends CliCommandTes
     Result cmdResult = misc.exportLogsPreprocessing("./testExportLogs" + dir, null, null, logLevel, false, false, start,
         end, 1);
 
-    getLogWriter().info("testExportLogs command result =" + cmdResult);
+    LogWriterUtils.getLogWriter().info("testExportLogs command result =" + cmdResult);
 
     if (cmdResult != null) {
       String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
-      getLogWriter().info("testExportLogs cmdStringRsult=" + cmdStringRsult);
+      LogWriterUtils.getLogWriter().info("testExportLogs cmdStringRsult=" + cmdStringRsult);
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {
       fail("testExportLogs failed as did not get CommandResult");
@@ -124,11 +125,11 @@ public class MiscellaneousCommandsExportLogsPart1DUnitTest extends CliCommandTes
 
     Result cmdResult = misc.exportLogsPreprocessing("./testExportLogsForMerge" + dir, null, null, logLevel, false, true,
         start, end, 1);
-    getLogWriter().info("testExportLogsForMerge command=" + cmdResult);
+    LogWriterUtils.getLogWriter().info("testExportLogsForMerge command=" + cmdResult);
 
     if (cmdResult != null) {
       String cmdStringRsult = commandResultToString((CommandResult) cmdResult);
-      getLogWriter().info("testExportLogsForMerge cmdStringRsult=" + cmdStringRsult);
+      LogWriterUtils.getLogWriter().info("testExportLogsForMerge cmdStringRsult=" + cmdStringRsult);
 
       assertEquals(Result.Status.OK, cmdResult.getStatus());
     } else {


[08/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryUsingPoolDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryUsingPoolDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryUsingPoolDUnitTest.java
index 4988831..2f317f2 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryUsingPoolDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryUsingPoolDUnitTest.java
@@ -66,10 +66,16 @@ import com.gemstone.gemfire.internal.cache.LocalRegion;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientNotifier;
 import com.gemstone.gemfire.internal.cache.tier.sockets.CacheClientProxy;
 import com.gemstone.gemfire.internal.cache.tier.sockets.ClientProxyMembershipID;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This class tests the ContiunousQuery mechanism in GemFire.
@@ -159,15 +165,15 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     
     //We're seeing this on the server when the client
     //disconnects.
-    addExpectedException("Connection reset");
-    addExpectedException("SocketTimeoutException");
-    addExpectedException("ServerConnectivityException");
-    addExpectedException("Socket Closed");
-    addExpectedException("SocketException");
+    IgnoredException.addIgnoredException("Connection reset");
+    IgnoredException.addIgnoredException("SocketTimeoutException");
+    IgnoredException.addIgnoredException("ServerConnectivityException");
+    IgnoredException.addIgnoredException("Socket Closed");
+    IgnoredException.addIgnoredException("SocketException");
     // avoid IllegalStateException from HandShake by connecting all vms tor
     // system before creating connection pools
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -202,7 +208,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         "Create Cache Server") {
       public void run2() throws CacheException
       {
-        getLogWriter().info("### Create Cache Server. ###");
+        LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.DISTRIBUTED_ACK);
         factory.setMirrorType(mirrorType);
@@ -223,7 +229,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         }
 
         catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
         
       }
@@ -245,7 +251,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         "Create Cache Server") {
       public void run2() throws CacheException
       {
-          getLogWriter().info("### Create Cache Server. ###");
+          LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
           //AttributesFactory factory = new AttributesFactory();
           //factory.setScope(Scope.DISTRIBUTED_ACK);
           //factory.setMirrorType(MirrorType.KEYS_VALUES);
@@ -264,13 +270,13 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           //assertTrue(getSystem().getDistributionManager().getOtherDistributionManagerIds().size() > 0);
           for (int i = 0; i < regions.length; i++) {
             Region r = createRegion(regions[i], attr.create());
-            getLogWriter().info("Server created the region: "+r);
+            LogWriterUtils.getLogWriter().info("Server created the region: "+r);
           }
           try {
             startBridgeServer(port, true);
           }
           catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
        
       }
@@ -284,7 +290,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
   public void closeServer(VM server) {
     server.invoke(new SerializableRunnable("Close CacheServer") {
       public void run() {
-        getLogWriter().info("### Close CacheServer. ###");
+        LogWriterUtils.getLogWriter().info("### Close CacheServer. ###");
         stopBridgeServer(getCache());
       }
     });
@@ -302,14 +308,14 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     SerializableRunnable createQService =
       new CacheSerializableRunnable("Create Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Client. ###");
+        LogWriterUtils.getLogWriter().info("### Create Client. ###");
         //Region region1 = null;
         // Initialize CQ Service.
         try {
           getCache().getQueryService();
-          addExpectedException("java.net.ConnectException");
+          IgnoredException.addIgnoredException("java.net.ConnectException");
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         AttributesFactory regionFactory = new AttributesFactory();
@@ -326,7 +332,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         }
         for (int i=0; i < regions.length; i++) {        
           createRegion(regions[i], regionFactory.createRegionAttributes());
-          getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
+          LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
           //region1.getAttributesMutator().setCacheListener(new CqListener());
         }
       }
@@ -341,18 +347,18 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     SerializableRunnable closeCQService =
       new CacheSerializableRunnable("Close Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close Client. ###");
+        LogWriterUtils.getLogWriter().info("### Close Client. ###");
         try {
           ((DefaultQueryService)getCache().getQueryService()).closeCqService();
         } catch (Exception ex) {
-          getLogWriter().info("### Failed to get CqService during ClientClose() ###");
+          LogWriterUtils.getLogWriter().info("### Failed to get CqService during ClientClose() ###");
         }
         
       }
     };
     
     client.invoke(closeCQService);
-    pause(1000);
+    Wait.pause(1000);
   }
 
   public void createFunctionalIndex(VM vm, final String indexName, final String indexedExpression, final String fromClause) {
@@ -362,12 +368,12 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qs = getCache().getQueryService();
         }catch (Exception ex) {
-          getLogWriter().info("### Failed to get CqService during ClientClose() ###");
+          LogWriterUtils.getLogWriter().info("### Failed to get CqService during ClientClose() ###");
         }
         try {
           qs.createIndex(indexName, IndexType.FUNCTIONAL, indexedExpression, fromClause);
         } catch (Exception ex) {
-          getLogWriter().info("### Failed to create Index :" + indexName);
+          LogWriterUtils.getLogWriter().info("### Failed to create Index :" + indexName);
         }
       }
     });
@@ -381,7 +387,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         for (int i = 1; i <= size; i++) {
           region1.put(KEY+i, new Portfolio(i, i));
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -396,7 +402,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           portfolio.createTime = System.currentTimeMillis();
           region1.put(KEY+i, portfolio);
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -409,7 +415,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         for (int i = 1; i <= size; i++) {
           region1.destroy(KEY+i);
         }
-        getLogWriter().info("### Number of Entries In Region after Delete :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries In Region after Delete :" + region1.keys().size());
       }
       
     });
@@ -425,7 +431,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         for (int i = 1; i <= size; i++) {
           region1.invalidate(KEY+i);
         }
-        getLogWriter().info("### Number of Entries In Region after Delete :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries In Region after Delete :" + region1.keys().size());
       }
       
     });
@@ -444,7 +450,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
       public void run2() throws CacheException {
         // Create Cache.
         getCache();
-        addExpectedException("java.net.ConnectException");
+        IgnoredException.addIgnoredException("java.net.ConnectException");
         
         PoolFactory cpf = PoolManager.createFactory();
         cpf.setSubscriptionEnabled(true);
@@ -455,7 +461,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         } 
         
         for (int i=0; i < servers.length; i++){
-          getLogWriter().info("### Adding to Pool. ### Server : " + servers[i] + " Port : " + ports[i]);
+          LogWriterUtils.getLogWriter().info("### Adding to Pool. ### Server : " + servers[i] + " Port : " + ports[i]);
           cpf.addServer(servers[i], ports[i]);
         }
         
@@ -472,17 +478,17 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         //getLogWriter().info("### DEBUG CREATE CQ START ####");
         //pause(20 * 1000);
         
-        getLogWriter().info("### Create CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
         // Get CQ Service.
         QueryService qService = null;
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         // Create CQ Attributes.
         CqAttributesFactory cqf = new CqAttributesFactory();
-        CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+        CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
         ((CqQueryTestListener)cqListeners[0]).cqName = cqName;
         
         cqf.initCqListeners(cqListeners);
@@ -495,7 +501,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         } catch (Exception ex){
           AssertionError err = new AssertionError("Failed to create CQ " + cqName + " . ");
           err.initCause(ex);
-          getLogWriter().info("QueryService is :" + qService, err);
+          LogWriterUtils.getLogWriter().info("QueryService is :" + qService, err);
           throw err;
         }
       }
@@ -510,17 +516,17 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         //getLogWriter().info("### DEBUG CREATE CQ START ####");
         //pause(20 * 1000);
         
-        getLogWriter().info("### Create CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
         // Get CQ Service.
         QueryService qService = null;
         try {
           qService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         // Create CQ Attributes.
         CqAttributesFactory cqf = new CqAttributesFactory();
-        CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+        CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
         ((CqQueryTestListener)cqListeners[0]).cqName = cqName;
         
         cqf.initCqListeners(cqListeners);
@@ -533,7 +539,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         } catch (Exception ex){
           AssertionError err = new AssertionError("Failed to create CQ " + cqName + " . ");
           err.initCause(ex);
-          getLogWriter().info("QueryService is :" + qService, err);
+          LogWriterUtils.getLogWriter().info("QueryService is :" + qService, err);
           throw err;
         }
       }
@@ -545,10 +551,10 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     vm.invoke(new CacheSerializableRunnable("Create CQ with no name:" ) {
       public void run2() throws CacheException {
         //pause(60 * 1000);
-        getLogWriter().info("### DEBUG CREATE CQ START ####");
+        LogWriterUtils.getLogWriter().info("### DEBUG CREATE CQ START ####");
         //pause(20 * 1000);
         
-        getLogWriter().info("### Create CQ with no name. ###");
+        LogWriterUtils.getLogWriter().info("### Create CQ with no name. ###");
         // Get CQ Service.
         QueryService qService = null;
         CqQuery cq1 = null;
@@ -557,14 +563,14 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           qService = (PoolManager.find(poolName)).getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         SelectResults cqResults = null;
         for (int i = 0; i < 20; ++i) {
           // Create CQ Attributes.
           CqAttributesFactory cqf = new CqAttributesFactory();
-          CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+          CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
           
           cqf.initCqListeners(cqListeners);
           CqAttributes cqa = cqf.create();
@@ -574,38 +580,38 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
             cq1 = qService.newCq(queryStr, cqa);
             ((CqQueryTestListener)cqListeners[0]).cqName = cq1.getName();
           } catch (Exception ex){
-            getLogWriter().info("CQService is :" + qService);
-            fail("Failed to create CQ with no name" +  " . ", ex);
+            LogWriterUtils.getLogWriter().info("CQService is :" + qService);
+            Assert.fail("Failed to create CQ with no name" +  " . ", ex);
           }
           
           if (cq1 == null) {
-            getLogWriter().info("Failed to get CqQuery object for CQ with no name.");
+            LogWriterUtils.getLogWriter().info("Failed to get CqQuery object for CQ with no name.");
           }
           else {
             cqName = cq1.getName();
-            getLogWriter().info("Created CQ with no name, generated CQ name: " + cqName + " CQ state:" + cq1.getState());
+            LogWriterUtils.getLogWriter().info("Created CQ with no name, generated CQ name: " + cqName + " CQ state:" + cq1.getState());
             assertTrue("Create CQ with no name illegal state", cq1.getState().isStopped());
           }
           if ( i%2 == 0) {
             try {
               cqResults = cq1.executeWithInitialResults();
             } catch (Exception ex){
-              getLogWriter().info("CqService is :" + qService);
-              fail("Failed to execute CQ with initial results, cq name: " 
+              LogWriterUtils.getLogWriter().info("CqService is :" + qService);
+              Assert.fail("Failed to execute CQ with initial results, cq name: " 
                   + cqName + " . ",  ex);
             }
-            getLogWriter().info("initial result size = " + cqResults.size());
-            getLogWriter().info("CQ state after execute with initial results = " + cq1.getState());
+            LogWriterUtils.getLogWriter().info("initial result size = " + cqResults.size());
+            LogWriterUtils.getLogWriter().info("CQ state after execute with initial results = " + cq1.getState());
             assertTrue("executeWithInitialResults() state mismatch", cq1.getState().isRunning());
           }
           else {
             try {
               cq1.execute();
             } catch (Exception ex){
-              getLogWriter().info("CQService is :" + qService);
-              fail("Failed to execute CQ " + cqName + " . ", ex);
+              LogWriterUtils.getLogWriter().info("CQService is :" + qService);
+              Assert.fail("Failed to execute CQ " + cqName + " . ", ex);
             }
-            getLogWriter().info("CQ state after execute = " + cq1.getState());
+            LogWriterUtils.getLogWriter().info("CQ state after execute = " + cq1.getState());
             assertTrue("execute() state mismatch", cq1.getState().isRunning());
           }
           
@@ -613,8 +619,8 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           try {
             cq1.close();
           } catch (Exception ex){
-            getLogWriter().info("CqService is :" + qService);
-            fail("Failed to close CQ " + cqName + " . ", ex);
+            LogWriterUtils.getLogWriter().info("CqService is :" + qService);
+            Assert.fail("Failed to close CQ " + cqName + " . ", ex);
           }
           assertTrue("closeCq() state mismatch", cq1.getState().isClosed());
         }
@@ -642,7 +648,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
 
       private void work() throws CacheException {
       //pause(60 * 1000);
-      getLogWriter().info("### DEBUG EXECUTE CQ START ####");
+      LogWriterUtils.getLogWriter().info("### DEBUG EXECUTE CQ START ####");
       //pause(20 * 1000);
       
       // Get CQ Service.
@@ -662,16 +668,16 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
       try {
         cq1 = cqService.getCq(cqName);
         if (cq1 == null) {
-          getLogWriter().info("Failed to get CqQuery object for CQ name: " + cqName);
+          LogWriterUtils.getLogWriter().info("Failed to get CqQuery object for CQ name: " + cqName);
           fail("Failed to get CQ " + cqName);
         }
         else {
-          getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
+          LogWriterUtils.getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
           assertTrue("newCq() state mismatch", cq1.getState().isStopped());
         }
       } catch (Exception ex){
-        getLogWriter().info("CqService is :" + cqService);
-        getLogWriter().error(ex);
+        LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
+        LogWriterUtils.getLogWriter().error(ex);
         AssertionError err = new AssertionError("Failed to execute  CQ " + cqName);
         err.initCause(ex);
         throw err;
@@ -683,13 +689,13 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqResults = cq1.executeWithInitialResults();
         } catch (Exception ex){
-          getLogWriter().info("CqService is :" + cqService);
+          LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
           ex.printStackTrace();
           AssertionError err = new AssertionError("Failed to execute  CQ " + cqName);
           err.initCause(ex);
           throw err;
         }
-        getLogWriter().info("initial result size = " + cqResults.size());
+        LogWriterUtils.getLogWriter().info("initial result size = " + cqResults.size());
         assertTrue("executeWithInitialResults() state mismatch", cq1.getState().isRunning());
         if (expectedResultsSize >= 0) {
           assertEquals("Unexpected results size for CQ: " + cqName + 
@@ -719,7 +725,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           AssertionError err = new AssertionError("Failed to execute  CQ " + cqName);
           err.initCause(ex);
           if (expectedErr == null) {
-            getLogWriter().info("CqService is :" + cqService, err);
+            LogWriterUtils.getLogWriter().info("CqService is :" + cqService, err);
           }
           throw err;
         }
@@ -749,13 +755,13 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
   public void stopCQ(VM vm, final String cqName) throws Exception {
     vm.invoke(new CacheSerializableRunnable("Stop CQ :" + cqName) {
       public void run2() throws CacheException {
-        getLogWriter().info("### Stop CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Stop CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         // Stop CQ.
@@ -764,7 +770,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           cq1 = cqService.getCq(cqName);
           cq1.stop();
         } catch (Exception ex){
-          fail("Failed to stop CQ " + cqName + " . ", ex);
+          Assert.fail("Failed to stop CQ " + cqName + " . ", ex);
         }
         assertTrue("Stop CQ state mismatch", cq1.getState().isStopped());
       }
@@ -777,20 +783,20 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     vm.invoke(new CacheSerializableRunnable("Stop CQ :" + cqName) {
       public void run2() throws CacheException {
         CqQuery cq1 = null;
-        getLogWriter().info("### Stop and Exec CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Stop and Exec CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCqService.", cqe);
+          Assert.fail("Failed to getCqService.", cqe);
         }
         
         // Get CQ.
         try {
           cq1 = cqService.getCq(cqName);
         } catch (Exception ex){
-          fail("Failed to get CQ " + cqName + " . ", ex);
+          Assert.fail("Failed to get CQ " + cqName + " . ", ex);
         }
         
         for (int i = 0; i < count; ++i) {
@@ -798,21 +804,21 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           try {
             cq1.stop();
           } catch (Exception ex) {
-            fail("Count = " + i + "Failed to stop CQ " + cqName + " . ", ex);
+            Assert.fail("Count = " + i + "Failed to stop CQ " + cqName + " . ", ex);
           }
           assertTrue("Stop CQ state mismatch, count = " + i, cq1.getState().isStopped());
-          getLogWriter().info("After stop in Stop and Execute loop, ran successfully, loop count: " + i);
-          getLogWriter().info("CQ state: " + cq1.getState());
+          LogWriterUtils.getLogWriter().info("After stop in Stop and Execute loop, ran successfully, loop count: " + i);
+          LogWriterUtils.getLogWriter().info("CQ state: " + cq1.getState());
           
           // Re-execute CQ
           try {
             cq1.execute();
           } catch (Exception ex) {
-            fail("Count = " + i + "Failed to execute CQ " + cqName + " . ", ex);
+            Assert.fail("Count = " + i + "Failed to execute CQ " + cqName + " . ", ex);
           }
           assertTrue("Execute CQ state mismatch, count = " + i, cq1.getState().isRunning());
-          getLogWriter().info("After execute in Stop and Execute loop, ran successfully, loop count: " + i);
-          getLogWriter().info("CQ state: " + cq1.getState());
+          LogWriterUtils.getLogWriter().info("After execute in Stop and Execute loop, ran successfully, loop count: " + i);
+          LogWriterUtils.getLogWriter().info("CQ state: " + cq1.getState());
         }
       }
     });
@@ -823,13 +829,13 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
   public void closeCQ(VM vm, final String cqName) throws Exception {
     vm.invoke(new CacheSerializableRunnable("Close CQ :" + cqName) {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Close CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCqService.", cqe);
+          Assert.fail("Failed to getCqService.", cqe);
         }
         
         // Close CQ.
@@ -838,7 +844,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           cq1 = cqService.getCq(cqName);
           cq1.close();
         } catch (Exception ex) {
-          fail("Failed to close CQ " + cqName + " . ", ex);
+          Assert.fail("Failed to close CQ " + cqName + " . ", ex);
         }
         assertTrue("Close CQ state mismatch", cq1.getState().isClosed());
       }
@@ -854,7 +860,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         Region region = null;
         try {
           region = getRootRegion().getSubregion(regionName);
-          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(getLogWriter()));
+          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(LogWriterUtils.getLogWriter()));
         } catch (Exception cqe) {
           AssertionError err = new AssertionError("Failed to get Region.");
           err.initCause(cqe);
@@ -891,14 +897,14 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         int numCqs = 0;
         try {
           numCqs = cqService.getCqs().length;
         } catch (Exception ex) {
-          fail ("Failed to get the CQ Count.", ex);
+          Assert.fail ("Failed to get the CQ Count.", ex);
         }
         assertEquals("Number of cqs mismatch.", cqCnt, numCqs);
       }
@@ -913,13 +919,13 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
   private void failIfCQExists(VM vm, final String cqName) {
     vm.invoke(new CacheSerializableRunnable("Fail if CQ exists") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Fail if CQ Exists. ### " + cqName);
+        LogWriterUtils.getLogWriter().info("### Fail if CQ Exists. ### " + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {          
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         CqQuery cQuery = cqService.getCq(cqName);
@@ -935,13 +941,13 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     vm.invoke(new CacheSerializableRunnable("Validate CQs") {
       public void run2() throws CacheException {
         
-        getLogWriter().info("### Validating CQ. ### " + cqName);
+        LogWriterUtils.getLogWriter().info("### Validating CQ. ### " + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {          
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         CqQuery cQuery = cqService.getCq(cqName);
@@ -984,13 +990,13 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
       final int totalEvents) {
     vm.invoke(new CacheSerializableRunnable("Validate CQs") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating CQ. ### " + cqName);
+        LogWriterUtils.getLogWriter().info("### Validating CQ. ### " + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {          
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         CqQuery cQuery = cqService.getCq(cqName);
@@ -1101,7 +1107,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         CqQuery cQuery = cqService.getCq(cqName);
@@ -1160,7 +1166,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
 
         CqQuery cQuery = cqService.getCq(cqName);
@@ -1177,7 +1183,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
               + "ms for Cq State to be changed to " + state
               + "; consider raising " + WAIT_PROPERTY, 
               (System.currentTimeMillis() - start) < MAX_TIME);
-          pause(100);
+          Wait.pause(100);
         }
       }
     });
@@ -1192,7 +1198,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         CqQuery cQuery = cqService.getCq(cqName);
@@ -1211,7 +1217,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
   public void validateQuery(VM vm, final String query, final int resultSize) {
     vm.invoke(new CacheSerializableRunnable("Validate Query") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating Query. ###");
+        LogWriterUtils.getLogWriter().info("### Validating Query. ###");
         QueryService qs = getCache().getQueryService();
         
         Query q = qs.newQuery(query);
@@ -1219,12 +1225,12 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           Object r = q.execute();
           if(r instanceof Collection){
             int rSize = ((Collection)r).size();
-            getLogWriter().info("### Result Size is :" + rSize);
+            LogWriterUtils.getLogWriter().info("### Result Size is :" + rSize);
             assertEquals(rSize, resultSize);
           }
         }
         catch (Exception e) {
-          fail("Failed to execute the query.", e);
+          Assert.fail("Failed to execute the query.", e);
         }
       }
     });
@@ -1270,20 +1276,20 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     vm.invoke(new CacheSerializableRunnable("Stop CQ :" + cqName) {
       public void run2() throws CacheException {
         CqQuery cq1 = null;
-        getLogWriter().info("### CQ attributes mutator for ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### CQ attributes mutator for ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         // Get CQ.
         try {
           cq1 = cqService.getCq(cqName);
         } catch (Exception ex) {
-          fail("Failed to get CQ " + cqName + " . ", ex);
+          Assert.fail("Failed to get CQ " + cqName + " . ", ex);
         }
         CqAttributesMutator cqAttrMutator = cq1.getCqAttributesMutator();
         CqAttributes cqAttr = cq1.getCqAttributes();
@@ -1340,7 +1346,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     String poolName = "testInterestListAndCQs";
     createPool(client, poolName, host0, thePort);
@@ -1363,7 +1369,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     for (int i=1; i <=10; i++){
       waitForCreated(client, "testInterestListAndCQs_0", KEY + i);
     }
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     
     // validate CQs.
     validateCQ(client, "testInterestListAndCQs_0",
@@ -1404,7 +1410,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
             return excuse;
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 60 * 1000, 1000, true);
+        Wait.waitForCriterion(wc, 60 * 1000, 1000, true);
         
         CertifiableTestCacheListener ctl = (CertifiableTestCacheListener) region.getAttributes().getCacheListener();
         for (int i = 1; i <= 10; i++) {
@@ -1532,7 +1538,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     String poolName = "testCQStopExecute";
     createPool(client, poolName, host0, thePort);
@@ -1619,7 +1625,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     String poolName = "testCQAttributesMutator";
     createPool(client, poolName, host0, thePort);
@@ -1717,7 +1723,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     String poolName = "testCQCreateClose";
     System.out.println("##### Pool Name :" + poolName + " host :" + host0 + " port :" + thePort);
@@ -1816,22 +1822,22 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     
     client.invoke(new CacheSerializableRunnable("CloseAll CQ :") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close All CQ. ###");
+        LogWriterUtils.getLogWriter().info("### Close All CQ. ###");
         // Get CQ Service.
         QueryService cqService = null;
         try {          
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          getLogWriter().info("Failed to getCQService.", cqe);
-          fail("Failed to getCQService.", cqe);
+          LogWriterUtils.getLogWriter().info("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         // Close CQ.
         try {
           cqService.closeCqs();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to close All CQ.", ex);
-          fail("Failed to close All CQ. ", ex);
+          LogWriterUtils.getLogWriter().info("Failed to close All CQ.", ex);
+          Assert.fail("Failed to close All CQ. ", ex);
         }
       }
     });
@@ -1850,20 +1856,20 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     // Call close all CQ.
     client.invoke(new CacheSerializableRunnable("CloseAll CQ 2 :") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close All CQ 2. ###");
+        LogWriterUtils.getLogWriter().info("### Close All CQ 2. ###");
         // Get CQ Service.
         QueryService cqService = null;
         try {          
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }
         
         // Close CQ.
         try {
           cqService.closeCqs();
         } catch (Exception ex) {
-          fail("Failed to close All CQ  . ", ex);
+          Assert.fail("Failed to close All CQ  . ", ex);
         }
       }
     });
@@ -1886,7 +1892,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     String poolName = "testRegionDestroy";
     createPool(client, poolName, host0, thePort);
@@ -1947,7 +1953,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
             return excuse;
           }
         };
-        DistributedTestCase.waitForCriterion(wc, 30 * 1000, 250, true);
+        Wait.waitForCriterion(wc, 30 * 1000, 250, true);
         
         Region region = getRootRegion().getSubregion(regions[0]);
         assertNotNull(region);
@@ -1969,7 +1975,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
       }
     });
     
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     validateCQCount(client, 0);
     
     closeClient(client);
@@ -1991,7 +1997,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     /* Create Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     String poolName1 = "testCQWithMultipleClients1";
     String poolName2 = "testCQWithMultipleClients2";
@@ -2144,7 +2150,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     createServer(server);
     
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     String poolName = "testCQResultSet";
     createPool(client, poolName, host0, thePort);
@@ -2155,7 +2161,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     /* CQ Test with initial Values. */
     int size = 10;
     createValues(server, regions[0], size);
-    pause(1*500);
+    Wait.pause(1*500);
     
     // Create CQs.
     createCQ(client, poolName, "testCQResultSet_0", cqs[0]);    
@@ -2218,7 +2224,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     createServer(server);
     
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testCQEvents";
     createPool(client, poolName, host0, thePort);
@@ -2292,7 +2298,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
       }
     });
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     // cqs should not get any creates, deletes or updates. rdubey.
     validateCQ(client, "testCQEvents_0",
         /* resultSize: */ noTest,
@@ -2323,7 +2329,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     createServer(server);
     
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testCQEvents";
     createPool(client, poolName, host0, thePort);
@@ -2397,7 +2403,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
       }
     });
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     // cqs should not get any creates, deletes or updates. rdubey.
     validateCQ(client, "testCQEvents_0",
         /* resultSize: */ noTest,
@@ -2425,7 +2431,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     createServer(server);
     
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testEnableDisableCQ";
     createPool(client, poolName, host0, thePort);
@@ -2446,16 +2452,16 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           cqService = getCache().getQueryService();
           cqService.stopCqs();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }                
       }      
     });
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     // Init values at server.
     int size = 10;
     createValues(server, regions[0], size);
-    pause(1 * 500);
+    Wait.pause(1 * 500);
     // There should not be any creates.
     validateCQ(client, "testEnableDisable_0",
         /* resultSize: */ noTest,
@@ -2476,11 +2482,11 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           cqService = getCache().getQueryService();
           cqService.executeCqs();
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }                
       }
     });
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     createValues(server, regions[0], size);    
     waitForUpdated(client, "testEnableDisable_0", KEY+size);
     // It gets created on the CQs
@@ -2503,14 +2509,14 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           cqService = getCache().getQueryService();
           cqService.stopCqs("/root/" + regions[0]);
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }                
       }
     });
     
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     deleteValues(server, regions[0], size / 2);
-    pause(1 * 500);    
+    Wait.pause(1 * 500);    
     // There should not be any deletes.
     validateCQ(client, "testEnableDisable_0",
         /* resultSize: */ noTest,
@@ -2531,11 +2537,11 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           cqService = getCache().getQueryService();
           cqService.executeCqs("/root/" + regions[0]);
         } catch (Exception cqe) {
-          fail("Failed to getCQService.", cqe);
+          Assert.fail("Failed to getCQService.", cqe);
         }                
       }
     });
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     createValues(server, regions[0], size / 2);    
     waitForCreated(client, "testEnableDisable_0", KEY+(size / 2));
     // Gets updated on the CQ.
@@ -2566,7 +2572,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     createServer(server);
     
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     String poolName = "testQuery";
     createPool(client, poolName, host0, thePort);
@@ -2611,7 +2617,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     createServer(server1);
     
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     // Create client.
 //    Properties props = new Properties();
     // Create client with redundancyLevel -1
@@ -2629,23 +2635,23 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
       createCQ(client, poolName, "testCQFailOver_" + i, cqs[i]);
       executeCQ(client, "testCQFailOver_" + i, false, null);
     }
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     // CREATE.
     createValues(server1, regions[0], 10);
     createValues(server1, regions[1], 10);
     waitForCreated(client, "testCQFailOver_0", KEY+10);
 
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     createServer(server2, ports[0]);
     final int thePort2 = server2.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
     System.out.println("### Port on which server1 running : " + port1 + 
         " Server2 running : " + thePort2);
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
 
     // Extra pause - added after downmerging trunk r17050
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     
     // UPDATE - 1.
     createValues(server1, regions[0], 10);    
@@ -2663,7 +2669,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     closeServer(server1);
     
     // Fail over should happen.
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
     for (int i=0; i < numCQs; i++) {
       validateCQ(client, "testCQFailOver_" + i, noTest, resultsCnt[i], resultsCnt[i], noTest);
@@ -2700,12 +2706,12 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     VM client = host.getVM(3);
     
     //Killing servers can cause this message on the client side.
-    addExpectedException("Could not find any server");
+    IgnoredException.addIgnoredException("Could not find any server");
     
     createServer(server1);
     
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     
@@ -2735,7 +2741,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
       executeCQ(client, "testCQHA_" + i, false, null);
     }
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     // CREATE.
     createValues(server1, regions[0], 10);
@@ -2751,7 +2757,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     // Close server1.
     // To maintain the redundancy; it will make connection to endpoint-3.
     closeServer(server1);
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
     // UPDATE-1.
     createValues(server2, regions[0], 10);
@@ -2768,7 +2774,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     
     // Close server-2
     closeServer(server2);
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     
     // UPDATE - 2.
     clearCQListenerEvents(client, "testCQHA_0");
@@ -2805,7 +2811,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     createServer(server1);
     
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
     
@@ -2830,7 +2836,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     registerInterestListCQ(client1, regions[0], interestSize, false);
     registerInterestListCQ(client2, regions[0], 0, true);
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     // CREATE.
     createValues(server1, regions[0], 100);
@@ -2842,7 +2848,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     server2.invoke(new CacheSerializableRunnable("Create Cache Server") {
       public void run2() throws CacheException
       {
-        getLogWriter().info("### Create Cache Server. ###");
+        LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.DISTRIBUTED_ACK);
         factory.setMirrorType(MirrorType.KEYS_VALUES);
@@ -2856,7 +2862,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           try {
             startBridgeServer(ports[0], true);
           } catch (Exception ex) {
-            fail("While starting CacheServer", ex);
+            Assert.fail("While starting CacheServer", ex);
           }
 
           while(true) {
@@ -2868,7 +2874,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
               }
               break;
             }
-            pause(20);
+            Wait.pause(20);
           }
         } finally {
           InitialImageOperation.slowImageProcessing = 0;
@@ -2877,7 +2883,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     }
     );
 
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
     
     // Check if CQs are registered as part of GII.
@@ -2937,7 +2943,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     createServer(server2);
     
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     final int thePort2 = server2.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
     
@@ -2945,7 +2951,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
       new CacheSerializableRunnable("Create region") {
       public void run2() throws CacheException {
         getCache();
-        addExpectedException("java.net.ConnectException||java.net.SocketException");
+        IgnoredException.addIgnoredException("java.net.ConnectException||java.net.SocketException");
         AttributesFactory regionFactory = new AttributesFactory();
         regionFactory.setScope(Scope.LOCAL);
         
@@ -2980,7 +2986,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     });
     
     
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     closeServer(server1);
     closeServer(server2);
   }
@@ -2997,7 +3003,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     createServer(server);
     
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     // Create client.
     // createClient(client, thePort, host0);
@@ -3044,7 +3050,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
           getCache().getLogger().info("cqs for region: /root/"+regions[1]+" : "+cq.length);
           assertNotNull("CQservice should not return null for cqs on this region : /root/"+regions[1], cq);
         } catch (Exception cqe) {
-          fail("Failed to getCQService", cqe);
+          Assert.fail("Failed to getCQService", cqe);
         }                
       }
     });
@@ -3072,7 +3078,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
 
     final int thePort = server.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testQueryWithNULLInWhereClause";
     createPool(client, poolName, host0, thePort);
@@ -3127,7 +3133,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     "Create Cache Server") {
       public void run2() throws CacheException
       {
-        getLogWriter().info("### Create Cache Server. ###");
+        LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
         
         // Create region with Global scope
         AttributesFactory factory1 = new AttributesFactory();
@@ -3141,16 +3147,16 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
         factory2.setMirrorType(MirrorType.KEYS_VALUES);
         createRegion(regions[1], factory2.createRegionAttributes());
         
-        pause(2000);
+        Wait.pause(2000);
 
         try {
           startBridgeServer(port, true);
         }
 
         catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
-        pause(2000);
+        Wait.pause(2000);
 
       }
     };
@@ -3161,7 +3167,7 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
 
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     final int thePort2 = server2.invokeInt(CqQueryUsingPoolDUnitTest.class,
     "getCacheServerPort");
@@ -3228,14 +3234,14 @@ public class CqQueryUsingPoolDUnitTest extends CacheTestCase {
     server.invoke(new CacheSerializableRunnable("Server Region Entries") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
-        getLogWriter().info("### Entries in Server :" + region.keys().size());
+        LogWriterUtils.getLogWriter().info("### Entries in Server :" + region.keys().size());
       }
     });
     
     client.invoke(new CacheSerializableRunnable("Client Region Entries") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
-        getLogWriter().info("### Entries in Client :" + region.keys().size()); 
+        LogWriterUtils.getLogWriter().info("### Entries in Client :" + region.keys().size()); 
       }
     });
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryUsingPoolOptimizedExecuteDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryUsingPoolOptimizedExecuteDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryUsingPoolOptimizedExecuteDUnitTest.java
index 07c1650..cf3dc95 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryUsingPoolOptimizedExecuteDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryUsingPoolOptimizedExecuteDUnitTest.java
@@ -17,6 +17,7 @@
 package com.gemstone.gemfire.cache.query.cq.dunit;
 
 import com.gemstone.gemfire.cache.query.internal.cq.CqServiceImpl;
+import com.gemstone.gemfire.test.dunit.Invoke;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 
 /**
@@ -31,7 +32,7 @@ public class CqQueryUsingPoolOptimizedExecuteDUnitTest extends CqQueryUsingPoolD
 
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = false;
       }
@@ -39,12 +40,11 @@ public class CqQueryUsingPoolOptimizedExecuteDUnitTest extends CqQueryUsingPoolD
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = true;
       }
     });
-    super.tearDown2();
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqResultSetUsingPoolDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqResultSetUsingPoolDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqResultSetUsingPoolDUnitTest.java
index de349a5..7104947 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqResultSetUsingPoolDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqResultSetUsingPoolDUnitTest.java
@@ -32,9 +32,14 @@ import com.gemstone.gemfire.cache.query.internal.index.IndexManager;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * This class tests the ContiunousQuery mechanism in GemFire.
@@ -132,7 +137,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
     // avoid IllegalStateException from HandShake by connecting all vms tor
     // system before creating ConnectionPools
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -156,7 +161,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
 
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testCqResults";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -211,7 +216,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
 
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testCqResults";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -256,7 +261,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
 
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testCqResults";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -315,7 +320,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
     
     final int port = server1.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     String poolName = "testCqResults";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -372,7 +377,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
 
     final int port = server1.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     String poolName = "testCqResults";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -421,7 +426,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
 
     final int port = server1.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     String poolName = "testCqResults";
     cqDUnitTest.createPool(client, poolName, host0, port);
@@ -490,7 +495,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
 
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testCqResults";
     final String cqName = "testCqResultsP_0";
@@ -553,8 +558,8 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         
         // Wait till all the region update is performed.
@@ -576,7 +581,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
           if (cqQuery.getName().equals(cqName)) {
             int size = cqQuery.getCqResultKeysSize();
             if (size != totalObjects) {
-              getLogWriter().info("The number of Cached events " + size + 
+              LogWriterUtils.getLogWriter().info("The number of Cached events " + size + 
                   " is not equal to the expected size " + totalObjects);
               HashSet expectedKeys = new HashSet();
               for (int i = 1; i < totalObjects; i++) {
@@ -584,7 +589,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
               }
               Set cachedKeys = cqQuery.getCqResultKeyCache();
               expectedKeys.removeAll(cachedKeys);
-              getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
+              LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
             }
             assertEquals("The number of keys cached for cq " + cqName + " is wrong.", 
                 totalObjects, cqQuery.getCqResultKeysSize());              
@@ -614,7 +619,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
 
     final int port = server.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     String poolName = "testCqResults";
     final String cqName1 = "testCqResultsP_0";
@@ -682,8 +687,8 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         
         // Wait till all the region update is performed.
@@ -704,7 +709,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
           ServerCQImpl cqQuery = (ServerCQImpl)cq;
           int size = cqQuery.getCqResultKeysSize();
           if (size != totalObjects) {
-            getLogWriter().info("The number of Cached events " + size + 
+            LogWriterUtils.getLogWriter().info("The number of Cached events " + size + 
                 " is not equal to the expected size " + totalObjects);
             HashSet expectedKeys = new HashSet();
             for (int i = 1; i < totalObjects; i++) {
@@ -712,7 +717,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
             }
             Set cachedKeys = cqQuery.getCqResultKeyCache();
             expectedKeys.removeAll(cachedKeys);
-            getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
+            LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
           }
           assertEquals("The number of keys cached for cq " + cqQuery.getName() + " is wrong.", 
               totalObjects, cqQuery.getCqResultKeysSize());              
@@ -743,7 +748,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
     
     final int port = server1.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     String poolName = "testCqResults";
     final String cqName = "testCqResultsP_0";
@@ -806,8 +811,8 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         
         // Wait till all the region update is performed.
@@ -856,7 +861,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
     
     final int port = server1.invokeInt(CqQueryUsingPoolDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     String poolName = "testCqResults";
     final String cqName = "testCqResultsCachingForDestroyEventsOnPR_0";
@@ -917,8 +922,8 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
@@ -937,8 +942,8 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         
         Collection<? extends InternalCqQuery> cqs = cqService.getAllCqs();
@@ -973,7 +978,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
     cqDUnitTest.createServer(server1);
     
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
     
     String poolName = "testCQFailOver";
@@ -1034,8 +1039,8 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         
         // Wait till all the region update is performed.
@@ -1057,7 +1062,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
           if (cqQuery.getName().equals(cqName)) {
             int size = cqQuery.getCqResultKeysSize();
             if (size != totalObjects) {
-              getLogWriter().info("The number of Cached events " + size + 
+              LogWriterUtils.getLogWriter().info("The number of Cached events " + size + 
                   " is not equal to the expected size " + totalObjects);
               HashSet expectedKeys = new HashSet();
               for (int i = 1; i < totalObjects; i++) {
@@ -1065,7 +1070,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
               }
               Set cachedKeys = cqQuery.getCqResultKeyCache();
               expectedKeys.removeAll(cachedKeys);
-              getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
+              LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
             }
             assertEquals("The number of keys cached for cq " + cqName + " is wrong.", 
                 totalObjects, cqQuery.getCqResultKeysSize());              
@@ -1078,11 +1083,11 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
     final int thePort2 = server2.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
     System.out.println("### Port on which server1 running : " + port1 + 
         " Server2 running : " + thePort2);
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
     // Close server1 for CQ fail over to server2.
     cqDUnitTest.closeServer(server1); 
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
     // Verify CQ Cache results.
     server2.invoke(new CacheSerializableRunnable("Verify CQ Cache results"){
@@ -1091,8 +1096,8 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
         try {
           cqService = ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqService.", ex);
-          fail ("Failed to get the internal CqService.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqService.", ex);
+          Assert.fail ("Failed to get the internal CqService.", ex);
         }
         
         // Wait till all the region update is performed.
@@ -1114,7 +1119,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
           if (cqQuery.getName().equals(cqName)) {
             int size = cqQuery.getCqResultKeysSize();
             if (size != totalObjects) {
-              getLogWriter().info("The number of Cached events " + size + 
+              LogWriterUtils.getLogWriter().info("The number of Cached events " + size + 
                   " is not equal to the expected size " + totalObjects);
               HashSet expectedKeys = new HashSet();
               for (int i = 1; i < totalObjects; i++) {
@@ -1122,7 +1127,7 @@ public class CqResultSetUsingPoolDUnitTest extends CacheTestCase {
               }
               Set cachedKeys = cqQuery.getCqResultKeyCache();
               expectedKeys.removeAll(cachedKeys);
-              getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
+              LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
             }
             assertEquals("The number of keys cached for cq " + cqName + " is wrong.", 
                 totalObjects, cqQuery.getCqResultKeysSize());              

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqResultSetUsingPoolOptimizedExecuteDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqResultSetUsingPoolOptimizedExecuteDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqResultSetUsingPoolOptimizedExecuteDUnitTest.java
index 74351b0..facb3d9 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqResultSetUsingPoolOptimizedExecuteDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqResultSetUsingPoolOptimizedExecuteDUnitTest.java
@@ -30,9 +30,14 @@ import com.gemstone.gemfire.cache.query.internal.cq.ServerCQImpl;
 import com.gemstone.gemfire.cache.query.internal.cq.CqServiceImpl;
 import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 public class CqResultSetUsingPoolOptimizedExecuteDUnitTest extends CqResultSetUsingPoolDUnitTest{
 
@@ -42,7 +47,7 @@ public class CqResultSetUsingPoolOptimizedExecuteDUnitTest extends CqResultSetUs
 
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = false;
       }
@@ -50,13 +55,12 @@ public class CqResultSetUsingPoolOptimizedExecuteDUnitTest extends CqResultSetUs
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = true;
       }
     });
-    super.tearDown2();
   }
   
   /**
@@ -76,7 +80,7 @@ public class CqResultSetUsingPoolOptimizedExecuteDUnitTest extends CqResultSetUs
     cqDUnitTest.createServer(server1);
     
     final int port1 = server1.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
     
     String poolName = "testCQFailOver";
@@ -137,8 +141,8 @@ public class CqResultSetUsingPoolOptimizedExecuteDUnitTest extends CqResultSetUs
         try {
           CqServiceImpl = (com.gemstone.gemfire.cache.query.internal.cq.CqServiceImpl) ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqServiceImpl.", ex);
-          fail ("Failed to get the internal CqServiceImpl.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqServiceImpl.", ex);
+          Assert.fail ("Failed to get the internal CqServiceImpl.", ex);
         }
         
         // Wait till all the region update is performed.
@@ -160,7 +164,7 @@ public class CqResultSetUsingPoolOptimizedExecuteDUnitTest extends CqResultSetUs
           if (cqQuery.getName().equals(cqName)) {
             int size = cqQuery.getCqResultKeysSize();
             if (size != totalObjects) {
-              getLogWriter().info("The number of Cached events " + size + 
+              LogWriterUtils.getLogWriter().info("The number of Cached events " + size + 
                   " is not equal to the expected size " + totalObjects);
               HashSet expectedKeys = new HashSet();
               for (int i = 1; i < totalObjects; i++) {
@@ -168,7 +172,7 @@ public class CqResultSetUsingPoolOptimizedExecuteDUnitTest extends CqResultSetUs
               }
               Set cachedKeys = cqQuery.getCqResultKeyCache();
               expectedKeys.removeAll(cachedKeys);
-              getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
+              LogWriterUtils.getLogWriter().info("Missing keys from the Cache : " + expectedKeys);
             }
             assertEquals("The number of keys cached for cq " + cqName + " is wrong.", 
                 totalObjects, cqQuery.getCqResultKeysSize());              
@@ -181,11 +185,11 @@ public class CqResultSetUsingPoolOptimizedExecuteDUnitTest extends CqResultSetUs
     final int thePort2 = server2.invokeInt(CqQueryUsingPoolDUnitTest.class, "getCacheServerPort");
     System.out.println("### Port on which server1 running : " + port1 + 
         " Server2 running : " + thePort2);
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
     // Close server1 for CQ fail over to server2.
     cqDUnitTest.closeServer(server1); 
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
     // Verify CQ Cache results.
     server2.invoke(new CacheSerializableRunnable("Verify CQ Cache results"){
@@ -194,8 +198,8 @@ public class CqResultSetUsingPoolOptimizedExecuteDUnitTest extends CqResultSetUs
         try {
           CqServiceImpl = (CqServiceImpl) ((DefaultQueryService)getCache().getQueryService()).getCqService();
         } catch (Exception ex) {
-          getLogWriter().info("Failed to get the internal CqServiceImpl.", ex);
-          fail ("Failed to get the internal CqServiceImpl.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to get the internal CqServiceImpl.", ex);
+          Assert.fail ("Failed to get the internal CqServiceImpl.", ex);
         }
         
         // Wait till all the region update is performed.

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStateDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStateDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStateDUnitTest.java
index 6d6213b..7c1e8f2 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStateDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqStateDUnitTest.java
@@ -22,10 +22,14 @@ import com.gemstone.gemfire.cache.query.CqQuery;
 import com.gemstone.gemfire.cache.query.dunit.HelperTestCase;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.test.dunit.AsyncInvocation;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.IgnoredException;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableCallable;
+import com.gemstone.gemfire.test.dunit.ThreadUtils;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 public class CqStateDUnitTest extends HelperTestCase {
 
@@ -42,8 +46,8 @@ public class CqStateDUnitTest extends HelperTestCase {
   // CI testing.  See internal ticket #52229
   public void disabledtestBug51222() throws Exception {
     //The client can log this when the server shuts down.
-    addExpectedException("Could not find any server");
-    addExpectedException("java.net.ConnectException");
+    IgnoredException.addIgnoredException("Could not find any server");
+    IgnoredException.addIgnoredException("java.net.ConnectException");
     final String cqName = "theCqInQuestion";
     final String regionName = "aattbbss";
     final Host host = Host.getHost(0);
@@ -55,7 +59,7 @@ public class CqStateDUnitTest extends HelperTestCase {
     startCacheServer(serverA, ports[0], getAuthenticatedServerProperties());
     createReplicatedRegion(serverA, regionName, null);
 
-    final String host0 = getServerHostName(serverA.getHost());
+    final String host0 = NetworkUtils.getServerHostName(serverA.getHost());
     startClient(client, new VM[]{ serverA, serverB }, ports, 1, getClientProperties());
     createCQ(client, cqName, "select * from /"+ regionName, null);
     
@@ -65,13 +69,13 @@ public class CqStateDUnitTest extends HelperTestCase {
     startCacheServers(serverB);
     
     AsyncInvocation async = executeCQ(client, cqName);
-    DistributedTestCase.join(async, 10000, getLogWriter());
+    ThreadUtils.join(async, 10000);
 
     Boolean clientRunning = (Boolean) client.invoke(new SerializableCallable() {
       @Override
       public Object call() throws Exception {
         final CqQuery cq = getCache().getQueryService().getCq(cqName);
-        waitForCriterion(new WaitCriterion() {
+        Wait.waitForCriterion(new WaitCriterion() {
           @Override
           public boolean done() {
             return cq.getState().isRunning();


[54/62] [abbrv] incubator-geode git commit: GEODE-557: change test to use port zero

Posted by je...@apache.org.
GEODE-557: change test to use port zero

* Set cacheserver port to 'zero' in-order for system to pick-up random port instead of using AvailablePortHelper.

This closes #92


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/46643253
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/46643253
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/46643253

Branch: refs/heads/feature/GEODE-17
Commit: 466432538391a22942ab9f464354f6365b598a77
Parents: 8db793d
Author: Sai Boorlagadda <sb...@pivotal.io>
Authored: Tue Feb 9 09:58:29 2016 -0800
Committer: Darrel Schneider <ds...@pivotal.io>
Committed: Tue Feb 9 16:05:13 2016 -0800

----------------------------------------------------------------------
 .../cache/management/MemoryThresholdsOffHeapDUnitTest.java   | 8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/46643253/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
index 5c65b1a..532a239 100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/management/MemoryThresholdsOffHeapDUnitTest.java
@@ -436,14 +436,12 @@ public class MemoryThresholdsOffHeapDUnitTest extends ClientServerTestCase {
     final VM server1 = host.getVM(0);
     final VM server2 = host.getVM(1);
 
-    final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
-    final int port1 = ports[0];
-    final int port2 = ports[1];
     final String regionName = "offHeapDRRemotePutRejection";
 
-    startCacheServer(server1, port1, 0f, 0f,
+    //set port to 0 in-order for system to pickup a random port.
+    startCacheServer(server1, 0, 0f, 0f,
         regionName, false/*createPR*/, false/*notifyBySubscription*/, 0);
-    startCacheServer(server2, port2, 0f, 90f,
+    startCacheServer(server2, 0, 0f, 90f,
         regionName, false/*createPR*/, false/*notifyBySubscription*/, 0);
 
     registerTestMemoryThresholdListener(server1);


[53/62] [abbrv] incubator-geode git commit: GEODE-730: Optimize single filter join queries

Posted by je...@apache.org.
GEODE-730: Optimize single filter join queries

When executing a join query with additional filters on a single region, we now detect this scenario and
instead of creating a CompositeGroupJunction, we create a regular GroupJunction.  When we being cutdown
and expansion, we then create new compiled comparisons so that we can do an index lookup.

For example "select * from /region1 a, /region2 b where a.name = "joe" and a.id = b.id"
We will now execute the a.name first assuming an index is present on a.name

During cutdown and expand, we determine that the comparison a.id = b.id can create a new compiled comparison
because the alias a from a.id matches our original filter a.name.  We can evaluate a.id at this point, say id
evaluated to 8.  So we create a compiled comparison of b.id = 8.  We can now do a lookup using the index on b.id.
We retrieve these results and place them into a map.  This map will be used to continue to derive additional joins
if they exist.  Such as b.id = c.id, etc...

We can continue with the iteration but instead of iterating the entire b region, we can now go over derived results.

This also includes a fix to not unlock an index lock when reevaluating an inner query.


Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/d232e259
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/d232e259
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/d232e259

Branch: refs/heads/feature/GEODE-17
Commit: d232e25947a8e223f35feb1feffe3150875dc5f5
Parents: 820cfd6
Author: Jason Huynh <hu...@gmail.com>
Authored: Tue Jan 26 10:09:25 2016 -0800
Committer: Jason Huynh <hu...@gmail.com>
Committed: Tue Feb 9 10:57:35 2016 -0800

----------------------------------------------------------------------
 .../query/internal/CompiledComparison.java      |   4 +-
 .../cache/query/internal/CompiledJunction.java  |  94 +++-
 .../cache/query/internal/CompiledSelect.java    |  22 +-
 .../cache/query/internal/DerivedInfo.java       | 306 +++++++++++++
 .../cache/query/internal/GroupJunction.java     |   5 +-
 .../cache/query/internal/QueryUtils.java        |  61 ++-
 .../query/internal/index/IndexManager.java      |   4 +
 .../query/internal/index/EquijoinDUnitTest.java | 437 +++++++++++++++++++
 .../PartitionedRegionEquijoinDUnitTest.java     | 130 ++++++
 9 files changed, 1022 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d232e259/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledComparison.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledComparison.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledComparison.java
index 75eaaad..9084351 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledComparison.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledComparison.java
@@ -52,8 +52,8 @@ public class CompiledComparison extends AbstractCompiledValue implements
     Negatable, OQLLexerTokenTypes, Indexable {
 
   // persistent inst vars
-  private final CompiledValue _left;
-  private final CompiledValue _right;
+  public final CompiledValue _left;
+  public final CompiledValue _right;
   private int _operator;
 
   // List groupRuntimeItrs = null;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d232e259/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledJunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledJunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledJunction.java
index 6bc68de..902aecd 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledJunction.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledJunction.java
@@ -16,11 +16,28 @@
  */
 package com.gemstone.gemfire.cache.query.internal;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import com.gemstone.gemfire.InternalGemFireError;
 import com.gemstone.gemfire.cache.EntryDestroyedException;
-import com.gemstone.gemfire.cache.query.*;
+import com.gemstone.gemfire.cache.query.AmbiguousNameException;
+import com.gemstone.gemfire.cache.query.FunctionDomainException;
+import com.gemstone.gemfire.cache.query.NameResolutionException;
+import com.gemstone.gemfire.cache.query.QueryInvocationTargetException;
+import com.gemstone.gemfire.cache.query.QueryService;
+import com.gemstone.gemfire.cache.query.SelectResults;
+import com.gemstone.gemfire.cache.query.Struct;
+import com.gemstone.gemfire.cache.query.TypeMismatchException;
+import com.gemstone.gemfire.cache.query.internal.index.IndexManager;
 import com.gemstone.gemfire.cache.query.internal.index.IndexProtocol;
 import com.gemstone.gemfire.cache.query.internal.parse.OQLLexerTokenTypes;
 import com.gemstone.gemfire.cache.query.internal.types.StructTypeImpl;
@@ -116,7 +133,12 @@ public class CompiledJunction extends AbstractCompiledValue implements
   private int _operator = 0;
   private List unevaluatedFilterOperands = null; 
   
-
+  //A token to place into the samesort map.  This is to let the engine know there is more than one index
+  //being used for this junction but allows actual operands to form range junctions if enough exist. 
+  //The mechanism checks to see if the mapped object is an integer, if so, it increments, if it's not it sets as 1
+  //Because we are a string place holder, the next actual operand would just start at one.  If the join is added
+  //after a valid operand has already set the counter to an integer, we instead just ignore and do not set the place holder
+  private final static String PLACEHOLDER_FOR_JOIN = "join";  
   CompiledJunction(CompiledValue[] operands, int operator) {
     // invariant: operator must be LITERAL_and or LITERAL_or
     // invariant: at least two operands
@@ -502,6 +524,7 @@ public class CompiledJunction extends AbstractCompiledValue implements
     Map iterToOperands = new HashMap();
     CompiledValue operand = null;
     boolean isJunctionNeeded = false;
+    boolean indexExistsOnNonJoinOp = false;
 
     for (int i = 0; i < _operands.length; i++) {
       // Asif : If we are inside this function this itself indicates
@@ -554,8 +577,9 @@ public class CompiledJunction extends AbstractCompiledValue implements
         }
         
         for (CompiledValue expndOperand : expandedOperands) {
+          boolean operandEvalAsFilter = expndOperand.getPlanInfo(context).evalAsFilter;
           isJunctionNeeded = isJunctionNeeded
-              || expndOperand.getPlanInfo(context).evalAsFilter;
+              || operandEvalAsFilter;
           Set set = QueryUtils.getCurrentScopeUltimateRuntimeIteratorsIfAny(
               expndOperand, context);
           if (set.size() != 1) {
@@ -563,7 +587,7 @@ public class CompiledJunction extends AbstractCompiledValue implements
             // than 1, will mean a composite condition. For a Composite
             // Condition which is filter evaluable that means necessarily that
             // RHS is dependent on one independent iterator & LHS on the other.
-            if ((expndOperand.getPlanInfo(context).evalAsFilter)) {
+            if (operandEvalAsFilter) {
               Support.Assert(set.size() == 2,
                   " The no of independent iterators should be equal to 2");
               compositeFilterOpsMap.put(expndOperand, set);
@@ -582,6 +606,9 @@ public class CompiledJunction extends AbstractCompiledValue implements
               operandsList = new ArrayList();
               iterToOperands.put(rIter, operandsList);
             }
+            if (operandEvalAsFilter && _operator == LITERAL_and) {
+              indexExistsOnNonJoinOp = true;
+            }
             operandsList.add(expndOperand);
           }
         }
@@ -606,7 +633,7 @@ public class CompiledJunction extends AbstractCompiledValue implements
       // There exists at least one condition which must have an index available.
       Filter junction = createJunction(compositeIterOperands,
           compositeFilterOpsMap, iterToOperands, context, indexCount,
-          evalOperands);
+          evalOperands, indexExistsOnNonJoinOp);
       // Asif Ensure that independent operands are always at the start
       evalOperands.add(indexCount++, junction);
     }
@@ -774,11 +801,15 @@ public class CompiledJunction extends AbstractCompiledValue implements
       //TODO:Do not club Like predicate in an existing range
       if (evalAsFilter ) {
         indx = ((Indexable)tempOp).getIndexInfo(context);
-        Assert.assertTrue(indx.length == 1,
-            "There should have been just one index for the condition");
-        listOrPosition = sameIndexOperands.get(indx[0]._index);
+        //We are now sorting these for joins, therefore we need to weed out the join indexes
+        if (!IndexManager.JOIN_OPTIMIZATION || indx.length == 1) {
+          Assert.assertTrue(indx.length == 1,
+              "There should have been just one index for the condition");
+          listOrPosition = sameIndexOperands.get(indx[0]._index);
+        }
       }
-      if (listOrPosition != null) {
+    
+     if (listOrPosition != null) {
         if (listOrPosition instanceof Integer) {
           int position = ((Integer)listOrPosition).intValue();
           List operands = new ArrayList(size);
@@ -788,16 +819,31 @@ public class CompiledJunction extends AbstractCompiledValue implements
           sameIndexOperands.put(indx[0]._index, operands);
           needsCompacting = true;
         }
-        else {
+        else if (listOrPosition instanceof List){
           List operands = (List)listOrPosition;
           operands.add(tempOp);
         }
-      }
-      else {
+        else {
+          //a join was present here, let's now occupy that spot and remove the placeholder
+          listOrPosition = null;
+        }
+     }
+      if (listOrPosition == null) {
         cv[i] = tempOp;
-        // TODO: Enable only for AND junction for now
-        if (evalAsFilter && this._operator == OQLLexerTokenTypes.LITERAL_and) {
-          sameIndexOperands.put(indx[0]._index, Integer.valueOf(i));
+        if (indx != null && indx.length == 1) {
+          // TODO: Enable only for AND junction for now
+          if (evalAsFilter && this._operator == OQLLexerTokenTypes.LITERAL_and) {
+            sameIndexOperands.put(indx[0]._index, Integer.valueOf(i));
+          }
+        } else if (indx != null && indx.length == 2) {
+          if (evalAsFilter && this._operator == OQLLexerTokenTypes.LITERAL_and) {
+            if (!sameIndexOperands.containsKey(indx[0]._index)) {
+              sameIndexOperands.put(indx[0]._index, PLACEHOLDER_FOR_JOIN);
+            }
+            if (!sameIndexOperands.containsKey(indx[1]._index)) {
+              sameIndexOperands.put(indx[1]._index, PLACEHOLDER_FOR_JOIN);
+            }
+          }
         }
       }
     }
@@ -805,10 +851,10 @@ public class CompiledJunction extends AbstractCompiledValue implements
   }
   //This is called only if the CompiledJunction was either independent or filter evaluable.
   public int getSizeEstimate(ExecutionContext context)throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException  {
-    if( this.isDependentOnCurrentScope(context)) {	  
-	return Integer.MAX_VALUE;
+    if( this.isDependentOnCurrentScope(context)) {    
+  return Integer.MAX_VALUE;
     }else {
-    	return 0;
+      return 0;
     }
   }
 
@@ -816,7 +862,7 @@ public class CompiledJunction extends AbstractCompiledValue implements
   // Lists
   private Filter createJunction(List compositeIterOperands,
       Map compositeFilterOpsMap, Map iterToOperands, ExecutionContext context,
-      int indexCount, List evalOperands) throws FunctionDomainException,
+      int indexCount, List evalOperands, boolean indexExistsOnNonJoinOp) throws FunctionDomainException,
       TypeMismatchException, NameResolutionException,
       QueryInvocationTargetException {
     Support.Assert(!(iterToOperands.isEmpty() && compositeFilterOpsMap
@@ -825,7 +871,13 @@ public class CompiledJunction extends AbstractCompiledValue implements
     CompiledValue junction = null;
     int size;
     /*---------- Create only a  GroupJunction */
-    if (iterToOperands.size() == 1 && compositeFilterOpsMap.isEmpty()) {
+    if (iterToOperands.size() == 1 && (compositeFilterOpsMap.isEmpty()
+        || (indexExistsOnNonJoinOp && IndexManager.JOIN_OPTIMIZATION))) {
+      if ((indexExistsOnNonJoinOp && IndexManager.JOIN_OPTIMIZATION)) {
+        // For the optimization we will want to add the compositeFilterOpsMap 848
+        // without the optimization we only fall into here if it's empty anyways, but have not tested the removal of this if clause
+        evalOperands.addAll(compositeFilterOpsMap.keySet());
+      }
       // Asif :Create only a GroupJunction. The composite conditions can be
       // evaluated as iter operands inside GroupJunction.
       Map.Entry entry = (Map.Entry) iterToOperands.entrySet().iterator().next();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d232e259/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledSelect.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledSelect.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledSelect.java
index 205acaa..ec7c51e 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledSelect.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/CompiledSelect.java
@@ -82,6 +82,19 @@ public class CompiledSelect extends AbstractCompiledValue {
 
   //used as a key in a context to identify the scope of this CompiledSelect 
   private Object scopeID = new Object(); 
+  
+  /*
+   * Set in context for the where clause to signify that it has been evaluated at least one time
+   * for any other CompiledValue that may use precalculated indexes
+   * we want to mark this as Evaluated so that we don't unlock locks
+   * that don't belong to this iteration of evaluate.
+   * This is similar to how CompiledComparisons store their IndexInfo in the context
+   * but for example a CompiledJunction that uses 2 Comparisons
+   * would have unlocked the readlocks because we check to see if the clause has a mapped value
+   * in the context. Because CompiledJunctions did not, we unlocked the read locks.
+   * Now we set a value so that it will not do this. See where we use this value to see how unlock is determined
+   */
+  private final static String CLAUSE_EVALUATED = "Evaluated";
 
   public CompiledSelect(boolean distinct, boolean count, CompiledValue whereClause,
                         List iterators, List projAttrs,List<CompiledSortCriterion> orderByAttrs, CompiledValue limit,
@@ -464,7 +477,7 @@ public class CompiledSelect extends AbstractCompiledValue {
           }
           boolean unlock = true;
           Object obj = context.cacheGet(this.whereClause);
-          if(obj != null && obj instanceof IndexInfo[]) {
+          if(obj != null && (obj instanceof IndexInfo[] || obj.equals(CLAUSE_EVALUATED))) {
             // if indexinfo is cached means the read lock 
             // is not being taken this time, so releasing 
             // the lock is not required
@@ -473,6 +486,9 @@ public class CompiledSelect extends AbstractCompiledValue {
           // see if we should evaluate as filters,
           // and count how many actual index lookups will be performed
           PlanInfo planInfo = this.whereClause.getPlanInfo(context);
+          if (context.cacheGet(this.whereClause) == null) {
+            context.cachePut(this.whereClause, CLAUSE_EVALUATED);
+          }
           try {
             evalAsFilters = planInfo.evalAsFilter;
             // let context know if there is exactly one index lookup
@@ -1089,10 +1105,10 @@ public class CompiledSelect extends AbstractCompiledValue {
     if (elementType.isStructType()) {
       if (isSorted) { // sorted struct
         return prepareEmptySortedStructSet((StructTypeImpl)elementType);
-	}
+  }
       else { // unsorted struct
         return new StructBag((StructType)elementType,  context.getCachePerfStats());
-	}
+  }
     }
     else { // non-struct
       if (isSorted) { // sorted non-struct

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d232e259/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/DerivedInfo.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/DerivedInfo.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/DerivedInfo.java
new file mode 100644
index 0000000..a934524
--- /dev/null
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/DerivedInfo.java
@@ -0,0 +1,306 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.gemstone.gemfire.cache.query.internal;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.gemstone.gemfire.cache.query.FunctionDomainException;
+import com.gemstone.gemfire.cache.query.Index;
+import com.gemstone.gemfire.cache.query.NameResolutionException;
+import com.gemstone.gemfire.cache.query.QueryInvocationTargetException;
+import com.gemstone.gemfire.cache.query.SelectResults;
+import com.gemstone.gemfire.cache.query.Struct;
+import com.gemstone.gemfire.cache.query.TypeMismatchException;
+import com.gemstone.gemfire.cache.query.internal.index.AbstractIndex;
+import com.gemstone.gemfire.cache.query.internal.index.IndexProtocol;
+import com.gemstone.gemfire.cache.query.internal.index.PartitionedIndex;
+import com.gemstone.gemfire.cache.query.internal.parse.OQLLexerTokenTypes;
+import com.gemstone.gemfire.cache.query.types.CollectionType;
+import com.gemstone.gemfire.cache.query.types.ObjectType;
+
+public class DerivedInfo {
+  public Map<String, SelectResults> derivedResults;
+  public List<Object[]> newDerivatives;
+  public List successfulOps = new LinkedList();
+  public List originalOps;
+  public CompiledValue currentOp;
+  private List expansionList;
+
+  public DerivedInfo() {
+    derivedResults = new HashMap<String, SelectResults>();
+    newDerivatives = new ArrayList<Object[]>();
+  }
+
+  public List getExpansionList() {
+    return expansionList;
+  }
+
+  public void setExpansionList(List expansionList) {
+    this.expansionList = expansionList;
+  }
+
+  public void setOriginalOps(List opsList) {
+    originalOps = new LinkedList(opsList);
+  }
+
+  public List getRemainingOps() {
+    List remainingOps = new LinkedList(originalOps);
+    remainingOps.removeAll(successfulOps);
+    return remainingOps;
+  }
+
+  public void addDerivedResults(IndexInfo indexInfo, SelectResults sr) {
+    IndexProtocol index = indexInfo._index;
+    String key = QueryUtils.getCompiledIdFromPath(indexInfo._path).getId() + ":" + index.getCanonicalizedIteratorDefinitions()[0];
+    // String key = index.getCanonicalizedIteratorDefinitions()[0];
+    if (derivedResults.containsKey(key)) {
+      derivedResults.get(key).addAll(sr);
+    } else {
+      derivedResults.put(key, sr);
+    }
+    newDerivatives.add(new Object[] { QueryUtils.getCompiledIdFromPath(indexInfo._path).getId(), sr });
+    successfulOps.add(currentOp);
+  }
+
+  public void addDerivedResults(IndexInfo indexInfo, SelectResults[] srs) {
+    addDerivedResults(indexInfo, srs[0]);
+    // Nested / range index is not supported at this time due to the way we cross the results
+    // This solution would have duplicates. The problem is the way we doNestedIteration. The map would
+    // have all values be associated with the current nested level object which is not what the values would represent
+    // IndexProtocol index = indexInfo._index;
+    // String[] definitions = index.getCanonicalizedIteratorDefinitions();
+    // for (int i = 0 ; i < definitions.length; i++) {
+    // String key = QueryUtils.getCompiledIdFromPath(indexInfo._path).getId() + ":" + definitions[i];
+    // if (derivedResults.containsKey(key)) {
+    // derivedResults.get(key).addAll(srs[i]);
+    // }
+    // else {
+    // derivedResults.put(key, srs[i]);
+    // }
+    // }
+    //
+    // int indexToIterateOn = QueryUtils.figureOutWhichStructIndexToExtract(index);
+    // newDerivatives.add(new Object[]{getCompiledIdFromPath(indexInfo._path).getId(), srs[indexToIterateOn]});
+    // successfulOps.add(currentOp);
+
+  }
+
+  public void computeDerivedJoinResults(IndexInfo theCallingIndex, ExecutionContext context, CompiledValue iterOps) throws FunctionDomainException, TypeMismatchException, NameResolutionException,
+      QueryInvocationTargetException {
+    // Call this computeDerivedResults()
+    // We are looking for join conditions so we can filter eval instead of iterate eval
+    // Then we can apply the rest of the ops on the results
+    if (theCallingIndex != null && iterOps != null) {
+      if (iterOps instanceof CompiledJunction) {
+        List opsList = ((CompiledJunction) iterOps).getOperands();
+        this.setOriginalOps(opsList);
+        createDerivedJoinResultsFromOpsList((QueryUtils.getCompiledIdFromPath(theCallingIndex._path)).getId(), context, opsList);
+      } else if (iterOps.getType() == CompiledValue.COMPARISON) {
+        createDerivedJoinResultsFromCC((QueryUtils.getCompiledIdFromPath(theCallingIndex._path)).getId(), (CompiledComparison) iterOps, context);
+      }
+    }
+  }
+
+  private void createDerivedJoinResultsFromOpsList(String theCallingIndexId, ExecutionContext context, List opsList) throws FunctionDomainException, TypeMismatchException, NameResolutionException,
+      QueryInvocationTargetException {
+    Iterator iter = opsList.iterator();
+    while (iter.hasNext()) {
+      CompiledValue cv = (CompiledValue) iter.next();
+      this.currentOp = cv;
+
+      if (cv.getType() == CompiledValue.COMPARISON) {
+        createDerivedJoinResultsFromCC(theCallingIndexId, (CompiledComparison) cv, context);
+      }
+    }
+    // Now let's derive from our derivatives (for multiple join clauses that can be chained, such as a.id = 1 and a.id = b.id and b.id = c.id
+    List<Object[]> newDerivatives = new ArrayList<Object[]>(this.newDerivatives);
+    this.newDerivatives.clear();
+    if (newDerivatives.size() > 0) {
+      Iterator<Object[]> iterator = newDerivatives.iterator();
+      while (iterator.hasNext()) {
+        Object[] idDerivedAndResults = iterator.next();
+        derivedDerivative(idDerivedAndResults, context, this.getExpansionList());
+      }
+    }
+  }
+
+  private void derivedDerivative(Object[] idDerivedAndResults, ExecutionContext context, List expansionList) throws FunctionDomainException, TypeMismatchException, NameResolutionException,
+      QueryInvocationTargetException {
+
+    String idDerived = (String) idDerivedAndResults[0];
+    SelectResults results = (SelectResults) idDerivedAndResults[1];
+    RuntimeIterator ritr = getMatchingRuntimeIterator(idDerived, expansionList);
+    List remainingOps = this.getRemainingOps();
+    Iterator iterator = results.iterator();
+    while (iterator.hasNext()) {
+      Object val = iterator.next();
+      ritr.setCurrent(val);
+      createDerivedJoinResultsFromOpsList(idDerived, context, remainingOps);
+    }
+
+  }
+
+  private RuntimeIterator getMatchingRuntimeIterator(String receiverId, List expansionList) throws QueryInvocationTargetException {
+    Iterator iterator = expansionList.iterator();
+    while (iterator.hasNext()) {
+      RuntimeIterator ritr = (RuntimeIterator) iterator.next();
+      if (ritr.getCmpIteratorDefn().getName().equals(receiverId)) {
+        return ritr;
+      }
+    }
+    throw new QueryInvocationTargetException("Unable to locate correct iterator for " + receiverId);
+  }
+
+  /*
+   Example query : "Select * from /region1 r, /region2 s where r.id = 1 and r.id = s.id"
+   Up until this point we have evaluated the r.id portion
+   We determine if the path (r) matches any of the paths in the current cc (r.id = s.id)
+   If so we figure out which side it matches (in this case the left side and create a new compiled comparison
+   This new cc will set the left side as s.id and the right side as the evaluated value, in this case it happens to be 1 but
+   it could be another field from the object instead.
+   */
+  private void createDerivedJoinResultsFromCC(String theCallingIndexReceiverId, CompiledComparison cc, ExecutionContext context) throws FunctionDomainException, TypeMismatchException,
+      NameResolutionException, QueryInvocationTargetException {
+    if (isCompiledPath(cc._right) && matchingPathIds(theCallingIndexReceiverId, cc._left)) {
+      evaluateDerivedJoin(context, cc._right, new CompiledLiteral(cc._left.evaluate(context)), cc.getOperator());
+    } else if (isCompiledPath(cc._left) && matchingPathIds(theCallingIndexReceiverId, cc._right)) {
+      evaluateDerivedJoin(context, cc._left, new CompiledLiteral(cc._right.evaluate(context)), cc.getOperator());
+    }
+  }
+ 
+  /*
+   Called by createDerivedJoinResultsFromCCa
+   Creates the new cc, executes the cc and releases any newly obtain index locks
+  */ 
+  private void evaluateDerivedJoin(ExecutionContext context, CompiledValue newLeftSide, CompiledValue newRightSide, int operator) 
+   throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException {
+   CompiledComparison dcc = createDerivedJoin(context, newLeftSide, newRightSide, operator);
+   IndexInfo[] indexInfos = (IndexInfo[]) dcc.getIndexInfo(context);
+   try {
+     if (indexInfos != null && isValidIndexTypeToDerive(indexInfos[0]._getIndex())) {
+       populateDerivedResultsFromDerivedJoin(context, dcc, indexInfos[0]);
+     }
+   } finally {
+     if (indexInfos != null) {
+       Index index = (Index) indexInfos[0]._index;
+       Index prIndex = ((AbstractIndex) index).getPRIndex();
+       if (prIndex != null) {
+         ((PartitionedIndex) prIndex).releaseIndexReadLockForRemove();
+       } else {
+         ((AbstractIndex) index).releaseIndexReadLockForRemove();
+       }
+     }
+   }
+  }
+
+  /*
+   Does the evaluation/execution of the cc and stores them into our map
+   We prevent limit and order by to be conducted by the index at this time as we do not those applied
+   We have no idea what the other operands are and do not want to limit results as the first X results may not 
+   fulfill all operands.
+   */
+  private void populateDerivedResultsFromDerivedJoin(ExecutionContext context, CompiledComparison dcc, IndexInfo indexInfo) throws FunctionDomainException, TypeMismatchException,
+      NameResolutionException, QueryInvocationTargetException {
+    // overwrite context values to disable limit, order by etc that should not be done by a derived join
+    // If we apply limit at this point, we cannot guarantee that after we iterate, the we do not continue to
+    // reduce the count below the limited amount
+    Boolean originalCanApplyLimit = (Boolean) context.cacheGet(CompiledValue.CAN_APPLY_LIMIT_AT_INDEX);
+    context.cachePut(CompiledValue.CAN_APPLY_LIMIT_AT_INDEX, Boolean.FALSE);
+    Boolean originalCanApplyOrderBy = (Boolean) context.cacheGet(CompiledValue.CAN_APPLY_ORDER_BY_AT_INDEX);
+    context.cachePut(CompiledValue.CAN_APPLY_ORDER_BY_AT_INDEX, Boolean.FALSE);
+
+    SelectResults sr = dcc.filterEvaluate(context, null, false, null, null, false, false, false);
+
+    context.cachePut(CompiledValue.CAN_APPLY_LIMIT_AT_INDEX, originalCanApplyLimit);
+    context.cachePut(CompiledValue.CAN_APPLY_ORDER_BY_AT_INDEX, originalCanApplyOrderBy);
+    ObjectType ot = indexInfo._index.getResultSetType();
+    //The following if block is not currently used other than the else
+    //This would be needed once we figure out how to handle nested object indexes (range, map, etc)
+    //The issue we have right now with these indexes is the results will come back as a tuple, if we use those as is, we end up 
+    //reusing the evaluated values even if they did not come from the top level object leading to duplicate results or incorrect tupling
+    if (ot.isStructType()) {
+      //createObjectResultsFromStructResults(indexInfo, sr);
+    } else if (ot.isMapType()) {
+
+    } else if (ot.isCollectionType()) {
+
+    } else {
+      this.addDerivedResults(dcc.getIndexInfo(context)[0], sr);
+    }
+  }
+
+  //Not used at this time.  Was left over from attempt to speed up Range Indexes
+  /*
+  private void createObjectResultsFromStructResults(IndexInfo indexInfo, SelectResults sr) {
+    Iterator srIterator = sr.iterator();
+    SelectResults[] newSrs = null;
+
+    while (srIterator.hasNext()) {
+      Struct struct = (Struct) srIterator.next();
+      Object[] fieldValues = struct.getFieldValues();
+      int structLength = struct.getFieldValues().length;
+      if (newSrs == null) {
+        newSrs = new FakeSelectResults[structLength];
+        for (int x = 0; x < structLength; x++) {
+          newSrs[x] = new FakeSelectResults();
+        }
+      }
+      for (int i = 0; i < structLength; i++) {
+        newSrs[i].add(fieldValues[i]);
+      }
+    }
+
+    if (newSrs != null) {
+      this.addDerivedResults(indexInfo, newSrs);
+    }
+  }
+  */
+
+  private boolean isValidIndexTypeToDerive(IndexProtocol index) {
+    ObjectType type = index.getResultSetType();
+    return !(type.isCollectionType() || type.isMapType() || type.isStructType());
+  }
+
+  private CompiledComparison createDerivedJoin(ExecutionContext context, CompiledValue newLeft, CompiledValue newRight, int op)
+    throws TypeMismatchException, NameResolutionException {
+    CompiledComparison cc = new CompiledComparison(newLeft, newRight, op);
+    cc.computeDependencies(context);
+    return cc;
+  }
+
+  //Given a compiled value, we check to see if the receiver id of a CompiledPath matches the receiverId passed in
+  private boolean matchingPathIds(String receiverId, CompiledValue cv) {
+    if (isCompiledPath(cv)) {
+      CompiledPath path = (CompiledPath)cv;
+      return receiverId.equals(QueryUtils.getCompiledIdFromPath(path).getId());
+    }
+    return false;
+  }
+
+  private boolean isCompiledPath(CompiledValue cv) {
+    return cv.getType() == CompiledValue.PATH;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d232e259/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/GroupJunction.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/GroupJunction.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/GroupJunction.java
index 37c13f2..6918436 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/GroupJunction.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/GroupJunction.java
@@ -132,7 +132,10 @@ public class GroupJunction extends AbstractGroupOrRangeJunction {
           // either tru or false for an AND junction but always false for an
           // OR Junction.
           PlanInfo pi = _operands[i].getPlanInfo(context);
-          if (pi.evalAsFilter) {
+          //we check for size == 1 now because of the join optimization can 
+          //leave an operand with two indexes, but the key element is not set
+          //this will throw an npe
+          if (pi.evalAsFilter && pi.indexes.size() == 1) {       
             if(pi.isPreferred) {
               if(currentBestFilter != null) {
                 evalOperands.add(currentBestFilter);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d232e259/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/QueryUtils.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/QueryUtils.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/QueryUtils.java
index 91e20f0..e19318c 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/QueryUtils.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/QueryUtils.java
@@ -23,6 +23,7 @@ import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.ListIterator;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.logging.log4j.Logger;
@@ -38,6 +39,7 @@ import com.gemstone.gemfire.cache.query.Struct;
 import com.gemstone.gemfire.cache.query.TypeMismatchException;
 import com.gemstone.gemfire.cache.query.internal.index.AbstractIndex;
 import com.gemstone.gemfire.cache.query.internal.index.IndexData;
+import com.gemstone.gemfire.cache.query.internal.index.IndexManager;
 import com.gemstone.gemfire.cache.query.internal.index.IndexProtocol;
 import com.gemstone.gemfire.cache.query.internal.index.IndexUtils;
 import com.gemstone.gemfire.cache.query.internal.index.PartitionedIndex;
@@ -52,7 +54,6 @@ import com.gemstone.gemfire.internal.cache.CachePerfStats;
 import com.gemstone.gemfire.internal.cache.PartitionedRegion;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 import com.gemstone.gemfire.internal.logging.LogService;
-import com.gemstone.gemfire.internal.logging.log4j.LocalizedMessage;
 
 /**
  * 
@@ -556,7 +557,7 @@ public class QueryUtils {
           indexFieldToItrsMapping[level], icdeh[level])) {
         if (level == (values.length - 1)) {
           doNestedIterationsForIndex(expansionListIterator.hasNext(), result,
-              finalItrs, expansionListIterator, context, iterOps, limit);
+              finalItrs, expansionListIterator, context, iterOps, limit, null);
           if (limit != -1 && result.size() >= limit) {
             break;
           }     
@@ -633,7 +634,7 @@ public class QueryUtils {
   private static SelectResults cutDownAndExpandIndexResults(
       SelectResults result, RuntimeIterator[] indexFieldToItrsMapping,
       List expansionList, List finalItrs, ExecutionContext context,
-      List checkList, CompiledValue iterOps) throws FunctionDomainException,
+      List checkList, CompiledValue iterOps, IndexInfo theFilteringIndex) throws FunctionDomainException,
       TypeMismatchException, NameResolutionException,
       QueryInvocationTargetException {
     SelectResults returnSet = null;
@@ -666,7 +667,7 @@ public class QueryUtils {
       
     }
     cutDownAndExpandIndexResults(returnSet, result, indexFieldToItrsMapping,
-        expansionList, finalItrs, context, checkList, iterOps);
+        expansionList, finalItrs, context, checkList, iterOps, theFilteringIndex);
     return returnSet;
   }
 
@@ -674,7 +675,7 @@ public class QueryUtils {
   private static void cutDownAndExpandIndexResults(SelectResults returnSet,
       SelectResults result, RuntimeIterator[] indexFieldToItrsMapping,
       List expansionList, List finalItrs, ExecutionContext context,
-      List checkList, CompiledValue iterOps) throws FunctionDomainException,
+      List checkList, CompiledValue iterOps, IndexInfo theFilteringIndex) throws FunctionDomainException,
       TypeMismatchException, NameResolutionException,
       QueryInvocationTargetException {
 //    Object[] checkFields = null;
@@ -692,13 +693,20 @@ public class QueryUtils {
     int limit = getLimitValue(context);
     
     while (itr.hasNext()) {
+      DerivedInfo derivedInfo = null;
+      if (IndexManager.JOIN_OPTIMIZATION) {
+        derivedInfo = new DerivedInfo();
+        derivedInfo.setExpansionList(expansionList);
+      }
       Object value = itr.next();
       if (setIndexFieldValuesInRespectiveIterators(value,  
           indexFieldToItrsMapping, icdeh)) {  //does that mean we don't get dupes even if they exist in the index?
         //         DO NESTED LOOPING
-       
+        if (IndexManager.JOIN_OPTIMIZATION) {
+          derivedInfo.computeDerivedJoinResults(theFilteringIndex, context, iterOps);
+        }  
         doNestedIterationsForIndex(expansionListIterator.hasNext(), returnSet,
-            finalItrs, expansionListIterator, context, iterOps, limit);
+            finalItrs, expansionListIterator, context, iterOps, limit, derivedInfo.derivedResults);
         if (limit != -1 && returnSet.size() >= limit) {
           break;
         }     
@@ -718,11 +726,19 @@ public class QueryUtils {
     }   
     return limit;
   }
+  
+  public static CompiledID getCompiledIdFromPath(CompiledValue path) {
+    int type = path.getType();
+    if (type == OQLLexerTokenTypes.Identifier) {
+      return (CompiledID) path;
+    } 
+    return getCompiledIdFromPath(path.getReceiver());
+  }
 
   //Add comments
   private static void doNestedIterationsForIndex(boolean continueRecursion,
       SelectResults resultSet, List finalItrs, ListIterator expansionItrs,
-      ExecutionContext context, CompiledValue iterOps, int limit)
+      ExecutionContext context, CompiledValue iterOps, int limit, Map<String, SelectResults> derivedResults)
       throws FunctionDomainException, TypeMismatchException,
       NameResolutionException, QueryInvocationTargetException {
     
@@ -768,7 +784,24 @@ public class QueryUtils {
     }
     else {
       RuntimeIterator currentLevel = (RuntimeIterator) expansionItrs.next();
-      SelectResults c = currentLevel.evaluateCollection(context);
+      SelectResults c = null;
+      // Calculate the key to find the derived join results. If we are a non nested lookup it will be a Compiled Region otherwise it will be a CompiledPath that
+      // we can extract the id from. In the end the result will be the alias which is used as a prefix
+      CompiledValue collectionExpression = currentLevel.getCmpIteratorDefn().getCollectionExpr();
+      String key = null;
+      boolean useDerivedResults = true;
+      if (currentLevel.getCmpIteratorDefn().getCollectionExpr().getType() == OQLLexerTokenTypes.RegionPath) {
+        key = currentLevel.getCmpIteratorDefn().getName() + ":" + currentLevel.getDefinition();
+      } else if (currentLevel.getCmpIteratorDefn().getCollectionExpr().getType() == OQLLexerTokenTypes.LITERAL_select) {
+        useDerivedResults = false;
+      } else {
+        key = getCompiledIdFromPath(currentLevel.getCmpIteratorDefn().getCollectionExpr()).getId() + ":" + currentLevel.getDefinition();
+      }
+      if (useDerivedResults && derivedResults != null && derivedResults.containsKey(key)) {
+        c = derivedResults.get(key);
+      } else {
+        c = currentLevel.evaluateCollection(context);
+      }
       //  RuntimeIterator next = expansionItrs.hasNext() ?
       // (RuntimeIterator)expansionItrs.next() : null;
       if (c == null) {
@@ -783,7 +816,7 @@ public class QueryUtils {
 
         currentLevel.setCurrent(cIter.next());
         doNestedIterationsForIndex(expansionItrs.hasNext(), resultSet,
-            finalItrs, expansionItrs, context, iterOps, limit);
+            finalItrs, expansionItrs, context, iterOps, limit, derivedResults);
         if (limit != -1 && resultSet.size() >= limit) {
           break;
         }     
@@ -1091,7 +1124,7 @@ public class QueryUtils {
             indexResults);
         indexResults = QueryUtils.cutDownAndExpandIndexResults(indexResults,
             ich.indexFieldToItrsMapping, ich.expansionList, ich.finalList,
-            context, ich.checkList, iterOperands);
+            context, ich.checkList, iterOperands, indexInfo);
       }
       finally {
         observer.afterCutDownAndExpansionOfSingleIndexResult(indexResults);
@@ -1108,7 +1141,7 @@ public class QueryUtils {
               indexInfo._index, indexResults);
           indexResults = QueryUtils.cutDownAndExpandIndexResults(indexResults,
               ich.indexFieldToItrsMapping, ich.expansionList, ich.finalList,
-              context, ich.checkList, iterOperands);
+              context, ich.checkList, iterOperands, indexInfo);
         }
         finally {
           observer.afterCutDownAndExpansionOfSingleIndexResult(indexResults);
@@ -1475,7 +1508,7 @@ public class QueryUtils {
               singlUsblIndxRes, context);
           cutDownAndExpandIndexResults(returnSet, singlUsblIndxRes,
               singleUsableICH.indexFieldToItrsMapping, totalExpList, finalList,
-              context, singleUsableICH.checkList, iterOperands);
+              context, singleUsableICH.checkList, iterOperands, singleUsableICH.indxInfo);
           singlUsblIndxRes.clear();
         }
       }
@@ -1700,7 +1733,7 @@ public class QueryUtils {
     return cutDownAndExpandIndexResults((SelectResults) dataList.get(0),
         (RuntimeIterator[]) dataList.get(1), (List) dataList.get(2),
         (List) dataList.get(3), (ExecutionContext) dataList.get(4),
-        (List) dataList.get(5), null);
+        (List) dataList.get(5), null, null);
   } 
   
   static List queryEquijoinConditionBucketIndexes(IndexInfo[] indxInfo,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d232e259/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/IndexManager.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/IndexManager.java b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/IndexManager.java
index 534f757..3784327 100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/IndexManager.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/cache/query/internal/index/IndexManager.java
@@ -113,6 +113,8 @@ public class IndexManager  {
   // Threshold for Queue.
   private final int INDEX_MAINTENANCE_BUFFER = Integer.getInteger("gemfire.AsynchIndexMaintenanceThreshold", -1).intValue();
 
+  public static boolean JOIN_OPTIMIZATION = !Boolean.getBoolean("gemfire.index.DisableJoinOptimization");
+  
   // Added for test purposes only.
   public static boolean INPLACE_OBJECT_MODIFICATION_FOR_TEST = false;    
 
@@ -120,6 +122,8 @@ public class IndexManager  {
   public static boolean IS_TEST_LDM = false; 
 
   public static boolean IS_TEST_EXPANSION = false;
+  
+  
 
   /**
    * System property to maintain the ReverseMap to take care in-place modification of the 

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d232e259/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/EquijoinDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/EquijoinDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/EquijoinDUnitTest.java
new file mode 100644
index 0000000..5718fce
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/EquijoinDUnitTest.java
@@ -0,0 +1,437 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.query.internal.index;
+
+import java.io.Serializable;
+import java.text.ParseException;
+import java.util.HashMap;
+import java.util.Map;
+
+import junit.framework.TestCase;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.AttributesFactory;
+import com.gemstone.gemfire.cache.Cache;
+import com.gemstone.gemfire.cache.DataPolicy;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionAttributes;
+import com.gemstone.gemfire.cache.query.CacheUtils;
+import com.gemstone.gemfire.cache.query.FunctionDomainException;
+import com.gemstone.gemfire.cache.query.Index;
+import com.gemstone.gemfire.cache.query.IndexExistsException;
+import com.gemstone.gemfire.cache.query.IndexNameConflictException;
+import com.gemstone.gemfire.cache.query.NameResolutionException;
+import com.gemstone.gemfire.cache.query.QueryInvocationTargetException;
+import com.gemstone.gemfire.cache.query.QueryService;
+import com.gemstone.gemfire.cache.query.RegionNotFoundException;
+import com.gemstone.gemfire.cache.query.SelectResults;
+import com.gemstone.gemfire.cache.query.TypeMismatchException;
+import com.gemstone.gemfire.cache.query.functional.StructSetOrResultsSet;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+@Category(IntegrationTest.class)
+public class EquijoinDUnitTest extends TestCase {
+  QueryService qs;
+  Region region1, region2, region3, region4;
+  
+  @Before
+  public void setUp() throws java.lang.Exception {
+    CacheUtils.startCache();
+    qs = CacheUtils.getQueryService();
+  }
+  
+  @After
+  public void tearDown() {
+    region2.destroyRegion();
+    region1.destroyRegion();
+  }
+  
+  protected void createRegions() throws Exception {
+    region1 = createReplicatedRegion("region1");
+    region2 = createReplicatedRegion("region2");
+  }
+  
+  protected void createAdditionalRegions() throws Exception {
+    region3 = createReplicatedRegion("region3");
+    region4 = createReplicatedRegion("region4");
+  }
+  
+  protected void destroyAdditionalRegions() throws Exception {
+    if (region3 != null) {
+      region3.destroyRegion();
+    }
+    if (region4 != null) {
+      region4.destroyRegion();
+    }
+  }
+
+  @Test
+  public void testSingleFilterWithSingleEquijoinOneToOneMapping() throws Exception {
+    createRegions();
+
+    String[] queries = new String[]{
+        "<trace>select * from /region1 c, /region2 s where c.pkid=1 and c.pkid = s.pkid",
+        "<trace>select * from /region1 c, /region2 s where c.pkid=1 and s.pkid = c.pkid",
+        "<trace>select * from /region1 c, /region2 s where c.pkid = s.pkid and c.pkid=1",
+        "<trace>select * from /region1 c, /region2 s where s.pkid = c.pkid and c.pkid=1",
+    };
+    
+    for (int i = 0; i < 1000; i++) {
+      region1.put( i, new Customer(i, i));
+      region2.put( i, new Customer(i, i));
+    }
+    
+    executeQueriesWithIndexCombinations(queries);
+  }
+  
+  @Test
+  public void testSingleFilterWithSingleEquijoinOneToOneMappingWithAdditionalJoins() throws Exception {
+    createRegions();
+    try {
+      createAdditionalRegions();
+      
+      String[] queries = new String[]{
+          "<trace>select * from /region1 c, /region2 s, /region3 d where c.pkid=1 and c.pkid = s.pkid and d.pkid = s.pkid",  //this should derive d after deriving s from c
+          "<trace>select * from /region1 c, /region2 s, /region3 d, /region4 f where c.pkid=1 and c.pkid = s.pkid and d.pkid = s.pkid and f.pkid = d.pkid",  //this should f from d from s from c
+          "<trace>select * from /region1 c, /region2 s, /region3 d where c.pkid=1 and c.pkid = s.pkid and d.pkid = c.pkid",  //this should derive d and s from c 
+          "<trace>select * from /region1 c, /region2 s, /region3 d where c.pkid=1 and c.pkid = s.pkid and s.pkid = d.pkid",  //this should derive d after deriving s from c (order is just switched in the query)
+      };
+      
+      for (int i = 0; i < 30; i++) {
+        region1.put( i, new Customer(i, i));
+        region2.put( i, new Customer(i, i));
+        region3.put( i, new Customer(i, i));
+        region4.put( i, new Customer(i, i));
+      }
+      
+      executeQueriesWithIndexCombinations(queries);
+    }
+    finally {
+      destroyAdditionalRegions();
+    }
+  }
+
+  
+  /**
+   * We do not want to test this with Primary Key on the many side or else only 1 result will be returned
+   */
+  @Test
+  public void testSingleFilterWithSingleEquijoinOneToManyMapping() throws Exception {
+    createRegions();
+
+    String[] queries = new String[]{
+        "select * from /region1 c, /region2 s where c.pkid=1 and c.pkid = s.pkid",
+        "select * from /region1 c, /region2 s where c.pkid=1 and s.pkid = c.pkid",
+        "select * from /region1 c, /region2 s where c.pkid = s.pkid and c.pkid=1",
+        "select * from /region1 c, /region2 s where s.pkid = c.pkid and c.pkid=1",
+    };
+    
+    for (int i = 0; i < 1000; i++) {
+      region1.put( i, new Customer(i, i));
+      region2.put( i, new Customer(i % 100, i));
+    }
+    
+    executeQueriesWithIndexCombinations(queries, new DefaultIndexCreatorCallback(qs) {
+      protected String[] createIndexTypesForRegion2() {
+        return new String[] { "Compact", "Hash"};
+      }
+    }, false);
+  }
+
+  @Test
+  public void testSingleFilterWithSingleEquijoinMultipleFiltersOnSameRegionOnSameIteratorMapping() throws Exception {
+    createRegions();
+
+    String[] queries = new String[]{
+        "select * from /region1 c, /region2 s where c.pkid=1 and c.pkid = s.pkid and c.id = 1",
+        "select * from /region1 c, /region2 s where c.id = 1 and c.pkid=1 and s.pkid = c.pkid",
+        
+    };
+    
+    for (int i = 0; i < 1000; i++) {
+      region1.put( i, new Customer(i, i % 10));
+      region2.put( i, new Customer(i, i));
+    }
+    
+    executeQueriesWithIndexCombinations(queries, new DefaultIndexCreatorCallback(qs) {
+      Index secondaryIndex;
+      
+      @Override
+      public void createIndexForRegion1(int indexTypeId) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException {
+        secondaryIndex = qs.createIndex("region1 id", "p.id", "/region1 p");
+        super.createIndexForRegion1(indexTypeId);
+      }
+
+      @Override
+      public void destroyIndexForRegion1(int indexTypeId) {
+        qs.removeIndex(secondaryIndex);
+        super.destroyIndexForRegion1(indexTypeId);
+      }
+      
+    }, false /*want to compare actual results and not size only*/);
+  }
+
+  @Test  
+  public void testSingleFilterWithSingleEquijoinWithRangeFilters() throws Exception {
+    createRegions();
+
+    String[] queries = new String[]{
+        "<trace>select * from /region1 c, /region2 s where c.pkid = 1 and c.id > 1 and c.id < 10 and c.pkid = s.pkid",
+        "<trace>select * from /region1 c, /region2 s where c.pkid >= 0 and c.pkid < 10 and c.id < 10 and c.pkid = s.pkid"
+    };
+    
+    //just need enough so that there are 1-10 ids per pkid
+    for (int i = 0; i < 1000; i++) {
+      region1.put(i, new Customer(i % 5, i % 10));
+      region2.put(i, new Customer(i, i));
+    }
+    
+    executeQueriesWithIndexCombinations(queries, new DefaultIndexCreatorCallback(qs) {
+      protected String[] createIndexTypesForRegion1() {
+        return new String[] { "Compact", "Hash"};
+      }
+    }, false /*want to compare actual results and not size only*/);
+  }
+
+  @Test 
+  public void testSingleFilterWithSingleEquijoinLimit() throws Exception {
+    //In this test we are hoping the index being used will properly use the limit while taking into consideration the filters of c.id and c.pkid
+    //This test is set up so that if the pkid index is used and limit applied, if id is not taken into consideration until later stages, it will lead to incorrect results (0)
+    createRegions();
+
+    String[] queries = new String[]{
+        "select * from /region1 c, /region2 s where c.id = 3 and c.pkid > 2  and c.pkid = s.pkid limit 1",
+    };
+    
+    for (int i = 0; i < 1000; i++) {
+      region1.put( i, new Customer(i, i % 10));
+      region2.put( i, new Customer(i, i));
+    }
+    
+    executeQueriesWithIndexCombinations(queries, new DefaultIndexCreatorCallback(qs) {
+      Index secondaryIndex;
+      
+      @Override
+      public void createIndexForRegion1(int indexTypeId) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException {
+        secondaryIndex = qs.createIndex("region1 id", "p.id", "/region1 p");
+        super.createIndexForRegion1(indexTypeId);
+      }
+
+      @Override
+      public void destroyIndexForRegion1(int indexTypeId) {
+        qs.removeIndex(secondaryIndex);
+        super.destroyIndexForRegion1(indexTypeId);
+      }
+      
+    }, true);
+  }
+
+  @Test
+  public void testSingleFilterWithSingleEquijoinNestedQuery() throws Exception {
+    createRegions();
+
+    String[] queries = new String[]{
+        "select * from /region1 c, /region2 s where c.pkid=1 and c.pkid = s.pkid and c.pkid in (select t.pkid from /region1 t,/region2 s where s.pkid=t.pkid and s.pkid = 1)",
+        "select * from /region1 c, /region2 s where c.pkid=1 and c.pkid = s.pkid or c.pkid in set (1,2,3,4)",
+    };
+    
+    for (int i = 0; i < 1000; i++) {
+      region1.put( i, new Customer(i, i));
+      region2.put( i, new Customer(i, i));
+    }
+    
+    executeQueriesWithIndexCombinations(queries);
+  }
+
+  public static class Customer implements Serializable {
+    public int pkid;
+    public int id;
+    public String name;
+    public Map<String, Customer> nested = new HashMap<String, Customer>();
+
+    public Customer(int pkid, int id) {
+      this.pkid = pkid;
+      this.id = id;
+      this.name = "name" + pkid;
+    }
+
+    public String toString() {
+      return "Customer pkid = " + pkid + ", id: " + id + " name:" + name;
+    }
+  }
+
+  private Region createReplicatedRegion(String regionName) throws ParseException {
+    Cache cache = CacheUtils.getCache();
+    AttributesFactory attributesFactory = new AttributesFactory();
+    attributesFactory.setDataPolicy(DataPolicy.REPLICATE);
+    RegionAttributes regionAttributes = attributesFactory.create();
+    return cache.createRegion(regionName, regionAttributes);
+  }
+
+  protected void executeQueriesWithIndexCombinations(String[] queries) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException, QueryInvocationTargetException, NameResolutionException, TypeMismatchException, FunctionDomainException {
+    executeQueriesWithIndexCombinations(queries, new DefaultIndexCreatorCallback(qs), false);
+  }
+  
+  protected void executeQueriesWithIndexCombinations(String[] queries, IndexCreatorCallback indexCreator, boolean sizeOnly) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException, QueryInvocationTargetException, NameResolutionException, TypeMismatchException, FunctionDomainException {
+    Object[] nonIndexedResults = executeQueries(queries);
+    
+    for (int r1Index = 0; r1Index < indexCreator.getNumIndexTypesForRegion1(); r1Index++) {
+      indexCreator.createIndexForRegion1(r1Index);
+      for (int r2Index = 0; r2Index < indexCreator.getNumIndexTypesForRegion2(); r2Index++) {
+        indexCreator.createIndexForRegion2(r2Index);
+        Object[] indexedResults = executeQueries(queries);
+        compareResults(nonIndexedResults, indexedResults, queries, sizeOnly);
+        indexCreator.destroyIndexForRegion2(r2Index);
+      }
+      indexCreator.destroyIndexForRegion1(r1Index);
+    }
+  }
+  
+  protected Object[] executeQueries(String[] queries) throws QueryInvocationTargetException, NameResolutionException, TypeMismatchException, FunctionDomainException {
+    Object[] results = new SelectResults[queries.length];
+    for (int i = 0; i < queries.length; i++) {
+      results[i] = qs.newQuery(queries[i]).execute();
+    }
+    return results;
+  }
+  
+  interface IndexCreatorCallback {
+    int getNumIndexTypesForRegion1();
+    int getNumIndexTypesForRegion2();
+    void createIndexForRegion1(int indexTypeId) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException;
+    void createIndexForRegion2(int indexTypeId) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException;
+    void destroyIndexForRegion1(int indexTypeId) ;
+    void destroyIndexForRegion2(int indexTypeId) ;
+  }
+  
+  static class DefaultIndexCreatorCallback implements IndexCreatorCallback {
+    protected String[] indexTypesForRegion1 = createIndexTypesForRegion1();
+    protected String[] indexTypesForRegion2 = createIndexTypesForRegion2();
+    protected Index indexOnR1, indexOnR2;
+    protected QueryService qs;
+    
+    DefaultIndexCreatorCallback(QueryService qs) {
+      this.qs = qs;
+    }
+    protected String[] createIndexTypesForRegion1() {
+      return new String[] { "Compact", "Hash", "PrimaryKey"};
+    }
+    
+    protected String[] createIndexTypesForRegion2() {
+      return new String[] { "Compact", "Hash", "PrimaryKey"};
+    }
+    
+    public int getNumIndexTypesForRegion1() {
+      return indexTypesForRegion1.length; 
+    }
+    
+    public int getNumIndexTypesForRegion2() {
+      return indexTypesForRegion2.length;
+    }
+    
+    public void createIndexForRegion1(int indexTypeId) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException {
+      indexOnR1 = createIndex(indexTypesForRegion1[indexTypeId], "region1", "pkid");
+
+    }
+    
+    public void createIndexForRegion2(int indexTypeId) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException {
+      indexOnR2 = createIndex(indexTypesForRegion2[indexTypeId], "region2", "pkid");
+    }
+
+    //Type id is not used here but at some future time we could store a map of indexes or find a use for this id?
+    public void destroyIndexForRegion1(int indexTypeId) {
+      qs.removeIndex(indexOnR1);
+    }
+    
+    public void destroyIndexForRegion2(int indexTypeId) {
+      qs.removeIndex(indexOnR2);
+    }
+    
+    
+    private Index createIndex(String type, String regionName, String field) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException {
+      Index index = null;
+      switch (type) {
+      case "Compact":
+        index = createCompactRangeIndex(regionName, field);
+        break;
+      case "Range":
+        index = createRangeIndexOnFirstIterator(regionName, field);
+        break;
+      case "Hash":
+        index = createHashIndex(regionName, field);
+        break;
+      case "PrimaryKey":
+        index = createPrimaryKeyIndex(regionName, field);
+        break;
+      }
+      return index;
+    }
+    
+    private Index createCompactRangeIndex(String regionName, String fieldName) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException {
+      String fromClause = "/" + regionName + " r";
+      String indexedExpression = "r." + fieldName;
+      return qs.createIndex("Compact " + fromClause + ":" + indexedExpression, indexedExpression, fromClause);
+    }
+    
+    private Index createHashIndex(String regionName, String fieldName) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException {
+      String fromClause = "/" + regionName + " r";
+      String indexedExpression = "r." + fieldName;
+      return qs.createHashIndex("Hash " + fromClause + ":" + indexedExpression, indexedExpression, fromClause);
+    }
+    
+    private Index createPrimaryKeyIndex(String regionName, String fieldName) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException {
+      String fromClause = "/" + regionName + " r";
+      String indexedExpression = "r." + fieldName;
+      return qs.createKeyIndex("PrimaryKey " + fromClause + ":" + indexedExpression, indexedExpression, fromClause);
+    }
+    
+    private Index createRangeIndexOnFirstIterator(String regionName, String fieldName) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException {
+      String fromClause = "/" + regionName + " r, r.nested.values v";
+      String indexedExpression = "r." + fieldName;
+      return qs.createIndex("Range " + fromClause + ":" + indexedExpression, indexedExpression, fromClause);
+    }
+    
+    private Index createRangeIndexOnSecondIterator(String regionName, String fieldName) throws RegionNotFoundException, IndexExistsException, IndexNameConflictException {
+      String fromClause = "/" + regionName + " r, r.nested.values v";
+      String indexedExpression = "v." + fieldName;
+      return qs.createIndex("Range " + fromClause + ":" + indexedExpression, indexedExpression, fromClause);
+    }
+  }
+  
+  private void compareResults(Object[] nonIndexedResults, Object[] indexedResults, String[] queries, boolean sizeOnly) {
+    if (sizeOnly) {
+      for (int i = 0; i < queries.length; i++) {
+        assertTrue(((SelectResults)nonIndexedResults[i]).size() == ((SelectResults)indexedResults[i]).size());
+        assertTrue(((SelectResults)nonIndexedResults[i]).size() > 0);
+      }
+    }
+    else {
+      StructSetOrResultsSet util = new StructSetOrResultsSet();
+      for (int i = 0; i < queries.length; i++) {
+        Object[][] resultsToCompare = new Object[1][2];
+        resultsToCompare[0][0] = nonIndexedResults[i];
+        resultsToCompare[0][1] = indexedResults[i];
+        util.CompareQueryResultsWithoutAndWithIndexes(resultsToCompare, 1, new String[]{queries[i]});
+        assertTrue(((SelectResults)nonIndexedResults[i]).size() > 0);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/d232e259/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/PartitionedRegionEquijoinDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/PartitionedRegionEquijoinDUnitTest.java b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/PartitionedRegionEquijoinDUnitTest.java
new file mode 100644
index 0000000..3fa6848
--- /dev/null
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/cache/query/internal/index/PartitionedRegionEquijoinDUnitTest.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.gemstone.gemfire.cache.query.internal.index;
+
+import java.util.ArrayList;
+
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.gemstone.gemfire.cache.PartitionAttributesFactory;
+import com.gemstone.gemfire.cache.Region;
+import com.gemstone.gemfire.cache.RegionFactory;
+import com.gemstone.gemfire.cache.RegionShortcut;
+import com.gemstone.gemfire.cache.execute.Function;
+import com.gemstone.gemfire.cache.execute.FunctionContext;
+import com.gemstone.gemfire.cache.execute.FunctionService;
+import com.gemstone.gemfire.cache.execute.RegionFunctionContext;
+import com.gemstone.gemfire.cache.execute.ResultCollector;
+import com.gemstone.gemfire.cache.query.CacheUtils;
+import com.gemstone.gemfire.cache.query.QueryService;
+import com.gemstone.gemfire.cache.query.SelectResults;
+import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+
+@Category(IntegrationTest.class)
+public class PartitionedRegionEquijoinDUnitTest extends EquijoinDUnitTest {
+ 
+  @Override
+  protected void createRegions() {
+    region1 = createPartitionRegion("region1");
+    region2 = createColocatedPartitionRegion("region2", "region1");
+    FunctionService.registerFunction(equijoinTestFunction);
+  }
+  
+  @Override
+  protected void createAdditionalRegions() throws Exception {
+    region3 = createColocatedPartitionRegion("region3", "region1");
+    region4 = createColocatedPartitionRegion("region4", "region1");
+  }
+ 
+  @Test
+  public void testSingleFilterWithSingleEquijoinNestedQuery() throws Exception {
+    createRegions();
+
+    String[] queries = new String[]{
+        "select * from /region1 c, /region2 s where c.pkid=1 and c.pkid = s.pkid or c.pkid in set (1,2,3,4)",
+    };
+    
+    for (int i = 0; i < 1000; i++) {
+      region1.put( i, new Customer(i, i));
+      region2.put( i, new Customer(i, i));
+    }
+    
+    executeQueriesWithIndexCombinations(queries);
+  }
+
+  public Region createPartitionRegion(String regionName) {
+    PartitionAttributesFactory paf = new PartitionAttributesFactory();
+    RegionFactory factory = CacheUtils.getCache().createRegionFactory(RegionShortcut.PARTITION)
+        .setPartitionAttributes(paf.create());
+    return factory.create(regionName);
+  }
+ 
+  public Region createColocatedPartitionRegion(String regionName, final String colocatedRegion) {
+     PartitionAttributesFactory paf = new PartitionAttributesFactory();
+        paf.setColocatedWith(colocatedRegion);
+    RegionFactory factory = CacheUtils.getCache().createRegionFactory(RegionShortcut.PARTITION).setPartitionAttributes(paf.create());
+    return factory.create(regionName);
+  }
+  
+
+  @Override
+  protected Object[] executeQueries(String[] queries) {
+    ResultCollector collector = FunctionService.onRegion(region1).withArgs(queries).execute(equijoinTestFunction.getId());
+    Object result = collector.getResult();
+    return (Object[])((ArrayList)result).get(0);
+  }
+  
+  Function equijoinTestFunction = new Function(){
+    @Override
+    public boolean hasResult() {
+      return true;
+    }
+
+    @Override
+    public void execute(FunctionContext context) {
+      try {
+        String[] queries = (String[]) context.getArguments();
+        QueryService qs = CacheUtils.getCache().getQueryService();
+        
+        Object[] results = new SelectResults[queries.length];
+        for (int i = 0; i < queries.length; i++) {
+          results[i] = qs.newQuery(queries[i]).execute((RegionFunctionContext)context);
+        }
+        context.getResultSender().lastResult(results);
+      }
+      catch (Exception e) {
+        e.printStackTrace();
+      }
+    }
+
+    @Override
+    public String getId() {
+      return "Equijoin Query";
+    }
+
+    @Override
+    public boolean optimizeForWrite() {
+      return false;
+    }
+
+    @Override
+    public boolean isHA() {
+      return false;
+    }
+  };
+}


[61/62] [abbrv] incubator-geode git commit: Merge branch 'develop' into feature/GEODE-17

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/build.gradle
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/main/java/com/gemstone/gemfire/cache/operations/OperationContext.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/DistributedSystem.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionConfigImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/ManagementAgent.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/RestAgent.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/SystemManagementService.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommands.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommands.java
index 9270363,570b6f5..2dfe6eb
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommands.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/LauncherLifecycleCommands.java
@@@ -112,14 -112,7 +112,9 @@@ import com.gemstone.gemfire.management.
  import com.gemstone.gemfire.management.internal.configuration.domain.SharedConfigurationStatus;
  import com.gemstone.gemfire.management.internal.configuration.messages.SharedConfigurationStatusRequest;
  import com.gemstone.gemfire.management.internal.configuration.messages.SharedConfigurationStatusResponse;
 -
 +import com.gemstone.gemfire.management.internal.security.Resource;
 +import com.gemstone.gemfire.management.internal.security.ResourceConstants;
 +import com.gemstone.gemfire.management.internal.security.ResourceOperation;
- import com.gemstone.gemfire.security.GemFireSecurityException;
- //import com.gemstone.org.jgroups.stack.tcpserver.TcpClient;
- import com.sun.tools.attach.VirtualMachine;
- import com.sun.tools.attach.VirtualMachineDescriptor;
- 
  import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
  import org.springframework.shell.core.annotation.CliCommand;
  import org.springframework.shell.core.annotation.CliOption;

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/commands/ShellCommands.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/shell/JmxOperationInvoker.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
index 0000000,ef98575..5455818
mode 000000,100644..100644
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/security/JSONAuthorization.java
@@@ -1,0 -1,308 +1,292 @@@
 -/*
 - * Licensed to the Apache Software Foundation (ASF) under one or more
 - * contributor license agreements.  See the NOTICE file distributed with
 - * this work for additional information regarding copyright ownership.
 - * The ASF licenses this file to You under the Apache License, Version 2.0
 - * (the "License"); you may not use this file except in compliance with
 - * the License.  You may obtain a copy of the License at
 - *
 - *      http://www.apache.org/licenses/LICENSE-2.0
 - *
 - * Unless required by applicable law or agreed to in writing, software
 - * distributed under the License is distributed on an "AS IS" BASIS,
 - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 - * See the License for the specific language governing permissions and
 - * limitations under the License.
 - */
+ package com.gemstone.gemfire.management.internal.security;
+ 
+ import java.io.File;
+ import java.io.FileReader;
+ import java.io.IOException;
+ import java.security.Principal;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Map;
+ import java.util.Properties;
+ import java.util.Set;
+ 
+ import javax.management.remote.JMXPrincipal;
+ 
+ import org.json.JSONArray;
+ import org.json.JSONException;
+ import org.json.JSONObject;
+ 
+ import com.gemstone.gemfire.GemFireConfigException;
+ import com.gemstone.gemfire.LogWriter;
+ import com.gemstone.gemfire.cache.Cache;
+ import com.gemstone.gemfire.cache.operations.OperationContext;
+ import com.gemstone.gemfire.distributed.DistributedMember;
+ import com.gemstone.gemfire.internal.logging.LogService;
+ import com.gemstone.gemfire.management.internal.security.ResourceOperationContext.ResourceOperationCode;
+ import com.gemstone.gemfire.security.AccessControl;
+ import com.gemstone.gemfire.security.AuthenticationFailedException;
+ import com.gemstone.gemfire.security.Authenticator;
+ import com.gemstone.gemfire.security.NotAuthorizedException;
+ 
+ public class JSONAuthorization implements AccessControl, Authenticator {
+ 	
+ 	public static class Role{
+ 		String[] permissions;
+ 		String name;
+ 		String regionName;
+ 		String serverGroup;		
+ 	}
+ 	
+ 	public static class User{
+ 		String name;
+ 		Role[] roles;
+ 		String pwd;
+ 	}
+ 	
+ 	private static Map<String,User> acl = null;
+ 	
+ 	public static JSONAuthorization create() throws IOException, JSONException {
+ 	  if(acl==null){
+ 	    readSecurityDescriptor(readDefault());
+ 	  }
+ 	  return new JSONAuthorization();
+ 	}
+ 	
+   public JSONAuthorization() {
+     if (acl == null) {
+       try {
+         readSecurityDescriptor(readDefault());
+       } catch (IOException e) {
+         throw new GemFireConfigException("Error creating JSONAuth", e);
+       } catch (JSONException e) {
+         throw new GemFireConfigException("Error creating JSONAuth", e);
+       }
+     }
+   }
+ 	
+ 	public static Set<ResourceOperationCode> getAuthorizedOps(User user, ResourceOperationContext context) {
+     Set<ResourceOperationCode> codeList = new HashSet<ResourceOperationCode>();
+     for(Role role : user.roles) {
+       for (String perm : role.permissions) {
+         ResourceOperationCode code = ResourceOperationCode.parse(perm);
+         if (role.regionName == null && role.serverGroup == null) {
+           addPermissions(code, codeList);
+         } else if (role.regionName != null) {
+           LogService.getLogger().info("This role requires region=" + role.regionName);
+           if (context instanceof CLIOperationContext) {
+             CLIOperationContext cliContext = (CLIOperationContext) context;
+             String region = cliContext.getCommandOptions().get("region");
+             if (region != null && region.equals(role.regionName)) {
+               addPermissions(code, codeList);
+             } else {
+               LogService.getLogger().info("Not adding permission " + code + " since region=" + region + " does not match");
+             }
+           }
+         }
+         // Same to be implemented for ServerGroup
+       }
+     }
+     LogService.getLogger().info("Final set of permisions " + codeList);
+     return codeList;
+   }
+ 	
+ 	private static void addPermissions(ResourceOperationCode code, Set<ResourceOperationCode> codeList) {
+ 	  if(code!=null) {
+       if(code.getChildren()==null)
+         codeList.add(code);
+       else {
+         for(ResourceOperationCode c : code.getChildren()){
+           codeList.add(c);
+         }
+       }
+     }    
+   }
+ 
+   private static String readDefault() throws IOException, JSONException {
+ 	  String str = System.getProperty(ResourceConstants.RESORUCE_SEC_DESCRIPTOR, ResourceConstants.RESORUCE_DEFAULT_SEC_DESCRIPTOR);
+ 		File file = new File(str);
+ 		FileReader reader = new FileReader(file);
+ 		char[] buffer = new char[(int) file.length()];
+ 		reader.read(buffer);
+ 		String json = new String(buffer);
+ 		reader.close();
+ 		return json;
+ 	}
+ 
+ 	public JSONAuthorization(String json) throws IOException, JSONException{
+ 		readSecurityDescriptor(json);
+ 	}
+ 	
+ 
+ 	private static void readSecurityDescriptor(String json) throws IOException, JSONException {		
+ 		JSONObject jsonBean = new JSONObject(json);		
+ 		acl = new HashMap<String,User>();		
+ 		Map<String,Role> roleMap = readRoles(jsonBean);
+ 		readUsers(acl,jsonBean,roleMap);		
+ 	}
+ 
+ 	private static void readUsers(Map<String, User> acl, JSONObject jsonBean,
+ 			Map<String, Role> roleMap) throws JSONException {
+ 		JSONArray array = jsonBean.getJSONArray("users");
+ 		for(int i=0;i<array.length();i++){
+ 			JSONObject obj = array.getJSONObject(i);
+ 			User user = new User();
+ 			user.name = obj.getString("name");
+ 			if(obj.has("password"))
+ 			  user.pwd = obj.getString("password");
+ 			else 
+ 			  user.pwd = user.name;
+ 			
+ 			JSONArray ops = obj.getJSONArray("roles");
+ 			user.roles = new Role[ops.length()];
+ 			for(int j=0;j<ops.length();j++){
+ 				String roleName = ops.getString(j);
+ 				user.roles[j] = roleMap.get(roleName);
+ 				if(user.roles[j]==null){
+ 					throw new RuntimeException("Role not present " + roleName);
+ 				}
+ 			}
+ 			acl.put(user.name, user);
+ 		}		
+ 	}
+ 
+ 	private static Map<String, Role> readRoles(JSONObject jsonBean) throws JSONException {
+ 		Map<String,Role> roleMap = new HashMap<String,Role>();
+ 		JSONArray array = jsonBean.getJSONArray("roles");
+ 		for(int i=0;i<array.length();i++){
+ 			JSONObject obj = array.getJSONObject(i);
+ 			Role role = new Role();
+ 			role.name = obj.getString("name");
+ 			
+ 			if(obj.has("operationsAllowed")){
+ 				JSONArray ops = obj.getJSONArray("operationsAllowed");
+ 				role.permissions = new String[ops.length()];
+ 				for(int j=0;j<ops.length();j++){
+ 					role.permissions[j] = ops.getString(j);
+ 				}
+ 			}else {
+ 				if (!obj.has("inherit"))
+ 					throw new RuntimeException(
+ 							"Role "
+ 									+ role.name
+ 									+ " does not have any permission neither it inherits any parent role");
+ 			}
+ 			
+ 			roleMap.put(role.name,role);
+ 			
+ 			if(obj.has("region")){
+ 				role.regionName = obj.getString("region");
+ 			}
+ 			
+ 			if(obj.has("serverGroup")){
+ 				role.serverGroup = obj.getString("serverGroup");
+ 			}
+ 		}
+ 		
+ 		for(int i=0;i<array.length();i++){
+ 			JSONObject obj = array.getJSONObject(i);
+ 			String name = obj.getString("name");
+ 			Role role = roleMap.get(name);
+ 			if (role == null) {
+ 				throw new RuntimeException("Role not present "
+ 						+ role);
+ 			}
+ 			if(obj.has("inherit")){				
+ 				JSONArray parentRoles = obj.getJSONArray("inherit");
+ 				for (int m = 0; m < parentRoles.length(); m++) {
+ 					String parentRoleName = parentRoles.getString(m);
+ 					Role parentRole = roleMap.get(parentRoleName);
+ 					if (parentRole == null) {
+ 						throw new RuntimeException("Role not present "
+ 								+ parentRoleName);
+ 					}
+ 					int oldLenth=0;
+ 					if(role.permissions!=null)
+ 						oldLenth = role.permissions.length;
+ 					int newLength = oldLenth + parentRole.permissions.length;
+ 					String[] str = new String[newLength];
+ 					int k = 0;
+ 					if(role.permissions!=null) {
+ 						for (; k < role.permissions.length; k++) {
+ 							str[k] = role.permissions[k];
+ 						}
+ 					}
+ 
+ 					for (int l = 0; l < parentRole.permissions.length; l++) {
+ 						str[k + l] = parentRole.permissions[l];
+ 					}
+ 					role.permissions = str;
+ 				}
+ 			}
+ 			
+ 		}		
+ 		return roleMap;
+ 	}
+ 
+ 	public static Map<String, User> getAcl() {
+ 		return acl;
+ 	}
+ 	
+ 	private Principal principal=null;
+ 
+   @Override
+   public void close() {
+     
+   }
+ 
+   @Override
+   public boolean authorizeOperation(String arg0, OperationContext context) {
+     
+     if(principal!=null) {
+       User user = acl.get(principal.getName());
+       if(user!=null) {
+         LogService.getLogger().info("Context received " + context);
+         ResourceOperationContext ctx = (ResourceOperationContext)context;
+         LogService.getLogger().info("Checking for code " + ctx.getResourceOperationCode());
+         
+         //TODO : This is for un-annotated commands
+         if(ctx.getResourceOperationCode()==null)
+           return true;        
+         
+         boolean found = false;
+         for(ResourceOperationCode code : getAuthorizedOps(user, (ResourceOperationContext) context)) {
+           if(ctx.getResourceOperationCode().equals(code)){
+             found =true;
+             LogService.getLogger().info("found code " + code.toString());
+             break;
+           }             
+         }
+         if(found)
+           return true;
+         LogService.getLogger().info("Did not find code " + ctx.getResourceOperationCode());
+         return false;        
+       }
+     } 
+     return false;
+   }
+ 
+   @Override
+   public void init(Principal principal, DistributedMember arg1, Cache arg2) throws NotAuthorizedException {
+     this.principal = principal;    
+   }
+ 
+   @Override
+   public Principal authenticate(Properties props, DistributedMember arg1) throws AuthenticationFailedException {
 -    String user = props.getProperty(ManagementInterceptor.USER_NAME);
 -    String pwd = props.getProperty(ManagementInterceptor.PASSWORD);
++    String user = props.getProperty(ResourceConstants.USER_NAME);
++    String pwd = props.getProperty(ResourceConstants.PASSWORD);
+     User userObj = acl.get(user);
+     if(userObj==null)
+       throw new AuthenticationFailedException("Wrong username/password");
+     LogService.getLogger().info("User="+user + " pwd="+pwd);
+     if (user!=null && !userObj.pwd.equals(pwd) && !"".equals(user))
+       throw new AuthenticationFailedException("Wrong username/password");
+     LogService.getLogger().info("Authentication successful!! for " + user);
+     return new JMXPrincipal(user);    
+   }
+ 
+   @Override
+   public void init(Properties arg0, LogWriter arg1, LogWriter arg2) throws AuthenticationFailedException {   
+     
+   }	
+ 
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/security/ManagementInterceptor.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/security/ManagementInterceptor.java
index aa972c5,c4e7dc5..aa5d194
--- a/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/security/ManagementInterceptor.java
+++ b/gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/security/ManagementInterceptor.java
@@@ -57,79 -38,46 +57,84 @@@ import javax.security.auth.Subject
  import org.apache.logging.log4j.Logger;
  
  import com.gemstone.gemfire.GemFireConfigException;
 -import com.gemstone.gemfire.internal.logging.LogService;
 +import com.gemstone.gemfire.cache.Cache;
 +import com.gemstone.gemfire.distributed.DistributedSystem;
 +import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 +import com.gemstone.gemfire.internal.ClassLoadUtil;
 +import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
 +import com.gemstone.gemfire.internal.lang.StringUtils;
 +import com.gemstone.gemfire.internal.logging.InternalLogWriter;
 +import com.gemstone.gemfire.management.internal.ManagementConstants;
  import com.gemstone.gemfire.security.AccessControl;
 +import com.gemstone.gemfire.security.AuthenticationFailedException;
  import com.gemstone.gemfire.security.Authenticator;
  
 -@SuppressWarnings("rawtypes")
 +/**
 + * 
 + * ManagementInterceptor is central go-to place for all M&M Clients Authentication and Authorization
 + * requests
 + * 
 + * @author tushark
 + * @since 9.0
 + * 
 + */
  public class ManagementInterceptor implements JMXAuthenticator {
  
 -	public static final String USER_NAME = "security-username";
 -	public static final String PASSWORD = "security-password";
 -	public static final String OBJECT_NAME_ACCESSCONTROL = "GemFire:service=AccessControl,type=Distributed";
 -	private MBeanServerWrapper mBeanServerForwarder;
 -	private Logger logger;  
++  // FIXME: Merged from GEODE-17. Are they necessary?
++  public static final String USER_NAME = "security-username";
++  public static final String PASSWORD = "security-password";
++  public static final String OBJECT_NAME_ACCESSCONTROL = "GemFire:service=AccessControl,type=Distributed";
+ 
 -	public ManagementInterceptor(Logger logger) {
 -		this.logger = logger;		
 -		this.mBeanServerForwarder = new MBeanServerWrapper(this);
 -		registerAccessContorlMbean();
 -		LogService.getLogger().info("Starting management interceptor");
 -	}
 +  private MBeanServerWrapper mBeanServerForwarder;
 +  private Logger logger;
 +  private ObjectName accessControlMBeanON;
 +  private Cache cache;
 +  private String authzFactoryName;
 +  private String postAuthzFactoryName;
 +  private String authenticatorFactoryName;
 +  private ConcurrentMap<Principal, AccessControl> cachedAuthZCallback;
 +  private ConcurrentMap<Principal, AccessControl> cachedPostAuthZCallback;
 +
 +  public ManagementInterceptor(Cache gemFireCacheImpl, Logger logger) {
 +    this.cache = gemFireCacheImpl;
 +    this.logger = logger;
 +    this.mBeanServerForwarder = new MBeanServerWrapper(this);
 +    DistributedSystem system = cache.getDistributedSystem();
 +    Properties sysProps = system.getProperties();
 +    this.authzFactoryName = sysProps.getProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_NAME);
 +    this.postAuthzFactoryName = sysProps.getProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_PP_NAME);
 +    this.authenticatorFactoryName = sysProps.getProperty(DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME);
 +    this.cachedAuthZCallback = new ConcurrentHashMap<Principal, AccessControl>();
 +    this.cachedPostAuthZCallback = new ConcurrentHashMap<Principal, AccessControl>();
 +    registerAccessContorlMbean();
 +    logger.info("Started Management interceptor on JMX connector");
 +  }
  
 -	private void registerAccessContorlMbean() {    
 +  /**
 +   * This method registers an AccessControlMBean which allows any remote JMX Client (for example Pulse) to check for
 +   * access allowed for given Operation Code.
 +   */
 +  private void registerAccessContorlMbean() {
      try {
 -      com.gemstone.gemfire.management.internal.security.AccessControl acc = new com.gemstone.gemfire.management.internal.security.AccessControl(this);
 -      ObjectName name = new ObjectName(OBJECT_NAME_ACCESSCONTROL);
 +      com.gemstone.gemfire.management.internal.security.AccessControl acc = new com.gemstone.gemfire.management.internal.security.AccessControl(
 +          this);
 +      accessControlMBeanON = new ObjectName(ResourceConstants.OBJECT_NAME_ACCESSCONTROL);
        MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer();
 -      Set<ObjectName> names = platformMBeanServer.queryNames(name, null);
 -      if(names.isEmpty()) {
 +      Set<ObjectName> names = platformMBeanServer.queryNames(accessControlMBeanON, null);
 +      if (names.isEmpty()) {
          try {
 -          platformMBeanServer.registerMBean(acc, name);
 -          logger.info("Registered AccessContorlMBean on " + name);
 +          platformMBeanServer.registerMBean(acc, accessControlMBeanON);
 +          logger.info("Registered AccessContorlMBean on " + accessControlMBeanON);
          } catch (InstanceAlreadyExistsException e) {
 -          throw new GemFireConfigException("Error while configuring accesscontrol for jmx resource",e);
 +          throw new GemFireConfigException("Error while configuring accesscontrol for jmx resource", e);
          } catch (MBeanRegistrationException e) {
 -          throw new GemFireConfigException("Error while configuring accesscontrol for jmx resource",e);
 +          throw new GemFireConfigException("Error while configuring accesscontrol for jmx resource", e);
          } catch (NotCompliantMBeanException e) {
 -          throw new GemFireConfigException("Error while configuring accesscontrol for jmx resource",e);
 +          throw new GemFireConfigException("Error while configuring accesscontrol for jmx resource", e);
          }
        }
 -    } catch (MalformedObjectNameException e) {      
 -      e.printStackTrace();
 +    } catch (MalformedObjectNameException e) {
 +      throw new GemFireConfigException("Error while configuring accesscontrol for jmx resource", e);
      }
    }
  

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/GfshParserJUnitTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfsh.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfsh.java
index 0000000,f8b3fd4..03dcd7d
mode 000000,100644..100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfsh.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/cli/HeadlessGfsh.java
@@@ -1,0 -1,374 +1,374 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package com.gemstone.gemfire.management.internal.cli;
+ 
+ import com.gemstone.gemfire.management.internal.cli.shell.Gfsh;
+ import com.gemstone.gemfire.management.internal.cli.shell.GfshConfig;
+ import com.gemstone.gemfire.management.internal.cli.shell.jline.GfshUnsupportedTerminal;
+ import jline.console.ConsoleReader;
+ import org.springframework.shell.core.ExitShellRequest;
+ import org.springframework.shell.event.ShellStatus.Status;
+ 
+ import java.io.BufferedWriter;
+ import java.io.ByteArrayOutputStream;
+ import java.io.File;
+ import java.io.FileDescriptor;
+ import java.io.FileInputStream;
+ import java.io.IOException;
+ import java.io.OutputStreamWriter;
+ import java.io.PrintStream;
+ import java.io.PrintWriter;
+ import java.io.Writer;
+ import java.util.Properties;
+ import java.util.concurrent.CountDownLatch;
+ import java.util.concurrent.LinkedBlockingQueue;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.locks.Condition;
+ import java.util.concurrent.locks.Lock;
+ import java.util.concurrent.locks.ReentrantLock;
+ import java.util.logging.Level;
+ 
+ 
+ /**
+  * This is headless shell which can be used to submit random commands and get command-result It is used for commands
+  * testing but can be used as for anything like programmatically sending commands to operate on GemFire Distributed
+  * systems. TODO : Merge HeadlessGfsh and HeadlessGfshShell TODO : Provide constructor for optionally specifying
+  * GfshConfig to provide logDirectory and logLevel
+  *
+  * @author tushark
+  */
+ @SuppressWarnings("rawtypes")
+ public class HeadlessGfsh implements ResultHandler {
+ 
+   public static final String ERROR_RESULT = "_$_ERROR_RESULT";
+ 
+   private HeadlessGfshShell shell = null;
+   private LinkedBlockingQueue queue = new LinkedBlockingQueue<>();
+   private long timeout = 20;
+   public String outputString = null;
+ 
+   public HeadlessGfsh(String name, int timeout) throws ClassNotFoundException, IOException {
+     this(name, timeout, null);
+   }
+ 
+   public HeadlessGfsh(String name, int timeout, Properties envProps) throws ClassNotFoundException, IOException {
+     this.timeout = timeout;
+     System.setProperty("jline.terminal", GfshUnsupportedTerminal.class.getName());
+     this.shell = new HeadlessGfshShell(name, this);
+     this.shell.setEnvProperty(Gfsh.ENV_APP_RESULT_VIEWER, "non-basic");
+ 
+     if (envProps != null) {
+       for (String key : envProps.stringPropertyNames()) {
+         this.shell.setEnvProperty(key, envProps.getProperty(key));
+       }
+     }
+ 
+     // This allows us to avoid race conditions during startup - in particular a NPE on the ConsoleReader which is
+     // created in a separate thread during start()
+     CountDownLatch shellStarted = new CountDownLatch(1);
+     this.shell.addShellStatusListener((oldStatus, newStatus) -> {
+       if (newStatus.getStatus() == Status.STARTED) {
+         shellStarted.countDown();
+       }
+     });
+ 
+     this.shell.start();
+     this.setThreadLocalInstance();
+ 
+     try {
+       shellStarted.await();
+     } catch (InterruptedException e) {
+       e.printStackTrace(System.out);
+     }
+   }
+ 
+   public void setThreadLocalInstance() {
+     shell.setThreadLocalInstance();
+   }
+ 
+   //TODO : Have non-blocking method also where we move executeCommand call to separate thread-pool
+   public boolean executeCommand(String command) {
+     boolean status = false;
+     try {
+       outputString = null;
+       status = shell.executeScriptLine(command);
+     } catch (Exception e) {
+       outputString = e.getMessage();
+     }
+     return status;
+   }
+ 
 -  int getCommandExecutionStatus() {
++  public int getCommandExecutionStatus() {
+     return shell.getCommandExecutionStatus();
+   }
+ 
+   @SuppressWarnings("unchecked")
+   @Override
+   public void handleExecutionResult(Object result, String sysout) {
+     queue.add(result);
+     outputString = sysout;
+   }
+ 
+   public Object getResult() throws InterruptedException {
+     //Dont wait for when some command calls gfsh.stop();
+     if (shell.stopCalledThroughAPI) return null;
+     try {
+       Object result = queue.poll(timeout, TimeUnit.SECONDS);
+       queue.clear();
+       return result;
+     } catch (InterruptedException e) {
+       e.printStackTrace();
+       throw e;
+     }
+   }
+ 
+   public void clear() {
+     queue.clear();
+     outputString = null;
+   }
+ 
+   public void clearEvents() {
+     queue.clear();
+     outputString = null;
+   }
+ 
+   public void terminate() {
+     shell.terminate();
+   }
+ 
+   public boolean isConnectedAndReady() {
+     return shell.isConnectedAndReady();
+   }
+ 
+   public String getErrorString() {
+     return shell.errorString;
+   }
+ 
+   public boolean hasError() {
+     return shell.hasError();
+   }
+ 
+   public String getError() {
+     return shell.errorString;
+   }
+ 
+   public static class HeadlessGfshShell extends Gfsh {
+ 
+     private ResultHandler handler = null;
+     private final Lock lock = new ReentrantLock();
+     private final Condition endOfShell = lock.newCondition();
+     private ByteArrayOutputStream output = null;
+     private String errorString = null;
+     private boolean hasError = false;
+     boolean stopCalledThroughAPI = false;
+ 
+     protected HeadlessGfshShell(String testName, ResultHandler handler) throws ClassNotFoundException, IOException {
+       super(false, new String[]{}, new HeadlessGfshConfig(testName));
+       this.handler = handler;
+     }
+ 
+     public void setThreadLocalInstance() {
+       gfshThreadLocal.set(this);
+     }
+ 
+     protected void handleExecutionResult(Object result) {
+       if (!result.equals(ERROR_RESULT)) {
+         super.handleExecutionResult(result);
+         handler.handleExecutionResult(result, output.toString());
+         output.reset();
+       } else {
+         //signal waiting queue with error condition with empty output
+         output.reset();
+         handler.handleExecutionResult(result, output.toString());
+       }
+     }
+ 
+     int getCommandExecutionStatus() {
+       return getLastExecutionStatus();
+     }
+ 
+     public void terminate() {
+       closeShell();
+       stopPromptLoop();
+       stop();
+     }
+ 
+     public void stop() {
+       stopCalledThroughAPI = true;
+     }
+ 
+     private void stopPromptLoop() {
+       lock.lock();
+       try {
+         endOfShell.signalAll();
+       } finally {
+         lock.unlock();
+       }
+     }
+ 
+     public String getErrorString() {
+       return errorString;
+     }
+ 
+     public boolean hasError() {
+       return hasError;
+     }
+ 
+     /**
+      * We override this method just to fool runner thread in reading from nothing. It waits for Condition endOfShell
+      * which is signalled when terminate is called. This achieves clean shutdown of runner thread.
+      */
+     @Override
+     public void promptLoop() {
+       lock.lock();
+       try {
+         while (true) {
+           try {
+             endOfShell.await();
+           } catch (InterruptedException e) {
+             //e.printStackTrace();
+           }
+           this.exitShellRequest = ExitShellRequest.NORMAL_EXIT;
+           setShellStatus(Status.SHUTTING_DOWN);
+           break;
+         }
+       } finally {
+         lock.unlock();
+       }
+     }
+ 
+     private static void setGfshOutErr(PrintStream outToUse) {
+       Gfsh.gfshout = outToUse;
+       Gfsh.gfsherr = outToUse;
+     }
+ 
+     /**
+      * This prints out error messages when Exceptions occur in shell. Capture it and set error flag=true and send
+      * ERROR_RESULT on the queue to signal thread waiting for CommandResult
+      */
+     @Override
+     public void logWarning(String message, Throwable t) {
+       super.logWarning(message, t);
+       errorString = message;
+       hasError = true;
+       //signal waiting queue with error condition
+       handleExecutionResult(ERROR_RESULT);
+     }
+ 
+     /**
+      * This prints out error messages when Exceptions occur in shell. Capture it and set error flag=true and send
+      * ERROR_RESULT on the queue to signal thread waiting for CommandResult
+      */
+     @Override
+     public void logSevere(String message, Throwable t) {
+       super.logSevere(message, t);
+       errorString = message;
+       hasError = true;
+       //signal waiting queue with error condition
+       handleExecutionResult(ERROR_RESULT);
+     }
+ 
+     /**
+      * Setup console-reader to capture Shell output
+      */
+     @Override
+     protected ConsoleReader createConsoleReader() {
+       try {
+         output = new ByteArrayOutputStream(1024 * 10);
+         PrintStream sysout = new PrintStream(output);
+         setGfshOutErr(sysout);
+         return new ConsoleReader(new FileInputStream(FileDescriptor.in), sysout);
+       } catch (IOException e) {
+         throw new RuntimeException(e);
+       }
+     }
+   }
+ 
+ 
+   /**
+    * HeadlessGfshConfig for tests. Taken from TestableGfsh
+    */
+   static class HeadlessGfshConfig extends GfshConfig {
+     {
+       // set vm as a gfsh vm
+       CliUtil.isGfshVM = true;
+     }
+ 
+     private File parentDir;
+     private String fileNamePrefix;
+     private String name;
+     private String generatedHistoryFileName = null;
+ 
+     public HeadlessGfshConfig(String name) {
+       this.name = name;
+ 
+       if (isDUnitTest(this.name)) {
+         fileNamePrefix = this.name;
+       } else {
+         fileNamePrefix = "non-hydra-client";
+       }
+ 
+       parentDir = new File("gfsh_files");
+       parentDir.mkdirs();
+     }
+ 
+     private static boolean isDUnitTest(String name) {
+       boolean isDUnitTest = false;
+       if (name != null) {
+         String[] split = name.split("_");
+         if (split.length != 0 && split[0].endsWith("DUnitTest")) {
+           isDUnitTest = true;
+         }
+       }
+       return isDUnitTest;
+     }
+ 
+     @Override
+     public String getLogFilePath() {
+       return new File(parentDir, getFileNamePrefix() + "-gfsh.log").getAbsolutePath();
+     }
+ 
+     private String getFileNamePrefix() {
+       String timeStamp = new java.sql.Time(System.currentTimeMillis()).toString();
+       timeStamp = timeStamp.replace(':', '_');
+       return fileNamePrefix + "-" + timeStamp;
+     }
+ 
+     @Override
+     public String getHistoryFileName() {
+       if (generatedHistoryFileName == null) {
+         String fileName = new File(parentDir, (getFileNamePrefix() + "-gfsh.history")).getAbsolutePath();
+         generatedHistoryFileName = fileName;
+         return fileName;
+       } else {
+         return generatedHistoryFileName;
+       }
+     }
+ 
+     @Override
+     public boolean isTestConfig() {
+       return true;
+     }
+ 
+     @Override
+     public Level getLogLevel() {
+       // Keep log level fine for tests
+       return Level.FINE;
+     }
+   }
+ 
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/AuthorizeOperationForMBeansIntegrationTest.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/AuthorizeOperationForMBeansIntegrationTest.java
index 0000000,d63947b..c3f1658
mode 000000,100644..100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/AuthorizeOperationForMBeansIntegrationTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/AuthorizeOperationForMBeansIntegrationTest.java
@@@ -1,0 -1,323 +1,325 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one or more
+  * contributor license agreements.  See the NOTICE file distributed with
+  * this work for additional information regarding copyright ownership.
+  * The ASF licenses this file to You under the Apache License, Version 2.0
+  * (the "License"); you may not use this file except in compliance with
+  * the License.  You may obtain a copy of the License at
+  *
+  *      http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package com.gemstone.gemfire.management.internal.security;
+ 
+ import static org.junit.Assert.*;
+ import static org.assertj.core.api.Assertions.assertThat;
+ 
+ import java.io.Serializable;
+ import java.security.Principal;
+ import java.util.HashMap;
+ import java.util.Map;
+ import java.util.Properties;
+ 
+ import javax.management.JMX;
+ import javax.management.MBeanServerConnection;
+ import javax.management.ObjectName;
+ import javax.management.remote.JMXConnector;
+ import javax.management.remote.JMXConnectorFactory;
+ import javax.management.remote.JMXServiceURL;
+ 
+ import org.junit.After;
+ import org.junit.Before;
+ import org.junit.Rule;
+ import org.junit.Test;
+ import org.junit.contrib.java.lang.system.RestoreSystemProperties;
+ import org.junit.experimental.categories.Category;
+ import org.junit.rules.TestName;
+ 
+ import com.gemstone.gemfire.LogWriter;
+ import com.gemstone.gemfire.cache.Cache;
+ import com.gemstone.gemfire.cache.CacheFactory;
+ import com.gemstone.gemfire.cache.operations.OperationContext;
+ import com.gemstone.gemfire.distributed.DistributedMember;
+ import com.gemstone.gemfire.distributed.DistributedSystem;
+ import com.gemstone.gemfire.distributed.internal.DistributionConfig;
+ import com.gemstone.gemfire.internal.AvailablePort;
+ import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
+ import com.gemstone.gemfire.management.DistributedSystemMXBean;
+ import com.gemstone.gemfire.management.MemberMXBean;
+ import com.gemstone.gemfire.management.internal.MBeanJMXAdapter;
+ import com.gemstone.gemfire.management.internal.security.ResourceOperationContext.ResourceOperationCode;
+ import com.gemstone.gemfire.security.AccessControl;
+ import com.gemstone.gemfire.security.AuthenticationFailedException;
+ import com.gemstone.gemfire.security.Authenticator;
+ import com.gemstone.gemfire.security.NotAuthorizedException;
+ import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
+ 
+ /**
+  * Tests <code>JSONAuthorization.authorizeOperation(...)</code> with GemFire MBeans.
+  */
+ @Category(IntegrationTest.class)
+ @SuppressWarnings("deprecation")
+ public class AuthorizeOperationForMBeansIntegrationTest {
+ 
+   private GemFireCacheImpl cache;
+   private DistributedSystem ds;
+   private int jmxManagerPort = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
+   private JMXConnector jmxConnector;
+   private MBeanServerConnection mbeanServer;
+ 
+   @Rule
+   public TestName testName = new TestName();
+   
+   @Rule
+   public RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties();
+ 
+   @Before
+   public void setUp() throws Exception {
+     System.setProperty("resource-auth-accessor", TestAccessControl.class.getName());
+     System.setProperty("resource-authenticator", TestAuthenticator.class.getName());
+     
+     Properties properties = new Properties();
+     properties.put("name", this.testName.getMethodName());
+     properties.put(DistributionConfig.LOCATORS_NAME, "");
+     properties.put(DistributionConfig.MCAST_PORT_NAME, "0");
+     properties.put(DistributionConfig.JMX_MANAGER_NAME, "true");
+     properties.put(DistributionConfig.JMX_MANAGER_START_NAME, "true");
+     properties.put(DistributionConfig.JMX_MANAGER_PORT_NAME, String.valueOf(this.jmxManagerPort));
+     properties.put(DistributionConfig.HTTP_SERVICE_PORT_NAME, "0");
+     
+     this.ds = DistributedSystem.connect(properties);
+     this.cache = (GemFireCacheImpl) CacheFactory.create(ds);
+ 
+     this.jmxConnector = getGemfireMBeanServer(this.jmxManagerPort, "tushark", "tushark");
+     this.mbeanServer = this.jmxConnector.getMBeanServerConnection();
+   }
+ 
+   @After
+   public void tearDown() throws Exception {
+     if (this.jmxConnector != null) {
+       this.jmxConnector.close();
+       this.jmxConnector = null;
+     }
+     if (this.cache != null) {
+       this.cache.close();
+       this.cache = null;
+     }
+     if (this.ds != null) {
+       this.ds.disconnect();
+       this.ds = null;
+     }
+   }
+ 
+   /**
+    * This is testing a sampling of operations for DistributedSystemMXBean and AccessControlMXBean
+    */
+   @Test
+   public void operationsShouldBeCoveredByAuthorization() throws Exception {
+     ObjectName objectName = MBeanJMXAdapter.getDistributedSystemName();
+     
+     checkListCacheServerObjectNames(objectName);
+     checkAlertLevel(objectName);
+     checkAccessControlMXBean();
+     checkBackUpMembers(objectName);
+     checkShutDownAllMembers(objectName);
+     checkCLIContext(this.mbeanServer);
+   }
+   
+   private void checkListCacheServerObjectNames(final ObjectName objectName) throws Exception {
+     Object cacheServerObjectNames = this.mbeanServer.invoke(objectName, "listCacheServerObjectNames", null, null);
+     assertThat(cacheServerObjectNames).isNotNull().isInstanceOf(ObjectName[].class);
+     assertThat((ObjectName[])cacheServerObjectNames).hasSize(0); // this isn't really testing much since there are no CacheServers
+   }
+   
+   private void checkAlertLevel(final ObjectName objectName) throws Exception {
+     // attribute AlertLevel
+     String oldLevel = (String) this.mbeanServer.getAttribute(objectName, "AlertLevel");
+     assertThat(oldLevel).isEqualTo("severe");
+     
+     // operation changeAlertLevel
+     this.mbeanServer.invoke(
+         objectName, 
+         "changeAlertLevel", 
+         new Object[] { "warning" },
+         new String[] { String.class.getName() }
+     );
+     String newLevel = (String) this.mbeanServer.getAttribute(objectName, "AlertLevel");
+     assertThat(newLevel).isEqualTo("warning");
+   }
+   
+   private void checkAccessControlMXBean() throws Exception {
+     final ResourceOperationCode resourceOperationCodes[] = { 
 -        ResourceOperationCode.LIST_DS, 
 -        ResourceOperationCode.READ_DS, 
 -        ResourceOperationCode.CHANGE_ALERT_LEVEL_DS,
 -        ResourceOperationCode.LOCATE_ENTRY_REGION 
++        ResourceOperationCode.LIST_DS,
++// FIXME: what should this be?
++//        ResourceOperationCode.READ_DS,
++        ResourceOperationCode.CHANGE_ALERT_LEVEL,
++        ResourceOperationCode.LOCATE_ENTRY
+     };
+     
+     ObjectName objectName = new ObjectName(ManagementInterceptor.OBJECT_NAME_ACCESSCONTROL);
+     for (ResourceOperationCode resourceOperationCode : resourceOperationCodes) {
+       boolean isAuthorizedForOperation = (Boolean) this.mbeanServer.invoke(
+           objectName, 
+           "authorize", 
+           new Object[] { resourceOperationCode.toString() },
+           new String[] { String.class.getName() }
+       );
+       assertThat(isAuthorizedForOperation).isTrue();
+     }
+ 
+     boolean isAuthorizedForAllOperations = (Boolean) mbeanServer.invoke(
+         objectName, 
+         "authorize", 
 -        new Object[] { ResourceOperationCode.ADMIN_DS.toString() },
++        new Object[] { ResourceOperationCode.ADMIN.toString() },
+         new String[] { String.class.getName() }
+     );
+     assertThat(isAuthorizedForAllOperations).isFalse();
+   }
+ 
+   private void checkBackUpMembers(final ObjectName objectName) throws Exception {
+     try {
+       this.mbeanServer.invoke(
+           objectName, 
+           "backupAllMembers", 
+           new Object[] { "targetPath", "baseLinePath" },
+           new String[] { String.class.getCanonicalName(), String.class.getCanonicalName() });
+       fail("Should not be authorized for backupAllMembers");
+     } catch (SecurityException expected) {
+       // expected
+     }
+   }
+   
+   private void checkShutDownAllMembers(final ObjectName objectName) throws Exception {
+     try {
+       this.mbeanServer.invoke(
+           objectName, 
+           "shutDownAllMembers", 
+           null, 
+           null
+       );
+       fail("Should not be authorized for shutDownAllMembers");
+     } catch (SecurityException expected) {
+       // expected
+     }
+   }
+   
+   private void checkCLIContext(MBeanServerConnection mbeanServer) {
+     ObjectName objectName = MBeanJMXAdapter.getDistributedSystemName();
+     DistributedSystemMXBean proxy = JMX.newMXBeanProxy(mbeanServer, objectName, DistributedSystemMXBean.class);
+     ObjectName managerMemberObjectName = proxy.getMemberObjectName();
+     MemberMXBean memberMXBeanProxy = JMX.newMXBeanProxy(mbeanServer, managerMemberObjectName, MemberMXBean.class);
+ 
+     Map<String, String> map = new HashMap<String, String>();
+     map.put("APP", "GFSH");
+     String result = memberMXBeanProxy.processCommand("locate entry --key=k1 --region=/region1", map);
+     
+     assertThat(result).isNotNull().doesNotContain(SecurityException.class.getSimpleName());
+   }
+ 
+   private JMXConnector getGemfireMBeanServer(final int port, final String user, final String pwd) throws Exception {
+     JMXServiceURL url = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://:" + port + "/jmxrmi");
+     if (user != null) { // TODO: why isn't this deterministic? need to create 2nd test without a user?
+       Map<String, String[]> env = new HashMap<String, String[]>();
+       String[] creds = { user, pwd };
+       env.put(JMXConnector.CREDENTIALS, creds);
+       JMXConnector jmxc = JMXConnectorFactory.connect(url, env);
+       return jmxc;
+     } else {
+       JMXConnector jmxc = JMXConnectorFactory.connect(url, null);
+       return jmxc;
+     }
+   }
+ 
+   /**
+    * Fake Principal for testing.
+    */
+   @SuppressWarnings("serial")
+   public static class TestUsernamePrincipal implements Principal, Serializable {
+ 
+     private final String userName;
+ 
+     public TestUsernamePrincipal(final String userName) {
+       this.userName = userName;
+     }
+ 
+     @Override
+     public String getName() {
+       return this.userName;
+     }
+ 
+     @Override
+     public String toString() {
+       return this.userName;
+     }
+   }
+ 
+   /**
+    * Fake Authenticator for testing.
+    */
+   public static class TestAuthenticator implements Authenticator {
+ 
+     @Override
+     public void close() {
+     }
+ 
+     @Override
+     public void init(final Properties securityProps, final LogWriter systemLogger, final LogWriter securityLogger) throws AuthenticationFailedException {
+     }
+ 
+     @Override
+     public Principal authenticate(final Properties props, final DistributedMember member) throws AuthenticationFailedException {
+       String user = props.getProperty(ManagementInterceptor.USER_NAME);
+       String pwd = props.getProperty(ManagementInterceptor.PASSWORD);
+       if (user != null && !user.equals(pwd) && !"".equals(user)) {
+         throw new AuthenticationFailedException("Wrong username/password");
+       }
+       return new TestUsernamePrincipal(user);
+     }
+   }
+ 
+   /**
+    * Fake AccessControl for testing.
+    */
+   public static class TestAccessControl implements AccessControl {
+ 
+     private Principal principal;
+ 
+     @Override
+     public void close() {
+     }
+ 
+     @Override
+     public void init(final Principal principal, final DistributedMember remoteMember, final Cache cache) throws NotAuthorizedException {
+       this.principal = principal;
+     }
+ 
+     @Override
+     public boolean authorizeOperation(String regionName, OperationContext context) {
+       if (principal.getName().equals("tushark")) {
+         ResourceOperationCode authorizedOps[] = { 
+             ResourceOperationCode.LIST_DS, 
 -            ResourceOperationCode.READ_DS, 
 -            ResourceOperationCode.CHANGE_ALERT_LEVEL_DS,
 -            ResourceOperationCode.LOCATE_ENTRY_REGION 
++// FOXME: Is this necessary?
++//            ResourceOperationCode.READ_DS,
++            ResourceOperationCode.CHANGE_ALERT_LEVEL,
++            ResourceOperationCode.LOCATE_ENTRY
+         };
+ 
+         ResourceOperationContext ctx = (ResourceOperationContext) context;
+         boolean found = false;
+         for (ResourceOperationCode code : authorizedOps) {
+           if (ctx.getResourceOperationCode().equals(code)) {
+             found = true;
+             break;
+           }
+         }
+         return found;
+       }
+       return false;
+     }
+   }
+ }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/c39f8a5f/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/CLISecurityDUnitTest.java
----------------------------------------------------------------------
diff --cc gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/CLISecurityDUnitTest.java
index 167b3dd,0000000..02912e8
mode 100644,000000..100644
--- a/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/CLISecurityDUnitTest.java
+++ b/gemfire-core/src/test/java/com/gemstone/gemfire/management/internal/security/CLISecurityDUnitTest.java
@@@ -1,594 -1,0 +1,594 @@@
 +package com.gemstone.gemfire.management.internal.security;
 +
 +import java.io.File;
 +import java.io.FileWriter;
 +import java.io.IOException;
 +import java.util.Enumeration;
 +import java.util.Properties;
 +
 +import com.gemstone.gemfire.distributed.internal.DistributionConfig;
++import com.gemstone.gemfire.test.dunit.LogWriterUtils;
 +
 +public class CLISecurityDUnitTest extends CommandTestBase {
 +
 +  private static final long serialVersionUID = 1L;
 +
 +  public static final String ACCESS_DENIED = "Access Denied";
 +
 +  protected File tempSecFile;
 +
 +  protected String tempFilePath;
 +
 +  public CLISecurityDUnitTest(String name) {
 +    super(name);
 +
 +  }
 +
 +  protected void writeToLog(String text, String resultAsString) {
-     getLogWriter().info(testName + "\n");
-     getLogWriter().info(resultAsString);
++    LogWriterUtils.getLogWriter().info(getTestMethodName() + "\n");
++    LogWriterUtils.getLogWriter().info(resultAsString);
 +  }
 +
 +  public void setUp() throws Exception {
 +    super.setUp();
 +    createTempFile();
 +  }
 +
 +  @Override
-   public void tearDown2() throws Exception {
++  public void preTearDownCacheTestCase() throws Exception {
 +    deleteTempFile();
-     super.tearDown2();
 +  }
 +
 +  private void createTempFile() {
 +
 +    try {
 +      File current = new java.io.File(".");
 +      tempSecFile = File.createTempFile("gemfire", "sec", current);
 +      tempSecFile.deleteOnExit();
 +      tempFilePath = tempSecFile.getCanonicalPath();
 +    } catch (IOException e) {
 +      fail("could not create temp file " + e);
 +    }
 +  }
 +
 +  private void deleteTempFile() {
 +
 +    try {
 +      tempSecFile.delete();
 +    } catch (Exception e) {
 +      fail("could not delete temp file " + e);
 +    }
 +  }
 +
 +  protected void writeToFile(Properties props) {
 +
 +    try {
 +
 +      FileWriter fw = new FileWriter(tempSecFile, true);
 +      Enumeration en = props.keys();
 +      while (en.hasMoreElements()) {
 +        String key = (String) en.nextElement();
 +        String val = props.getProperty(key);
 +        String line = key + "=" + val;
 +        fw.append(line);
 +        fw.append("\n");
 +      }
 +      fw.flush();
 +
 +    } catch (IOException x) {
 +      fail("could not write to temp file " + x);
 +    }
 +
 +  }
 +
 +  public class Assertor {
 +
 +    private String errString;
 +
 +    public Assertor() {
 +      this.errString = null;
 +    }
 +
 +    public Assertor(String errString) {
 +      this.errString = errString;
 +    }
 +
 +    public void assertTest() {
 +      boolean hasErr = getDefaultShell().hasError();
 +      // getLogWriter().info(testName + "hasErr = " +hasErr);
 +      if (hasErr) {
 +        String error = getDefaultShell().getError();
 +        if (errString != null) {
 +          assertTrue(error.contains(errString));
 +        } else {
 +          fail("Command should have passed but failed with error = " + error);
 +        }
 +
 +      } else {
 +        if (errString != null) {
 +          fail("Command should have failed with error " + errString + " but it passed");
 +        }
 +      }
 +
 +    }
 +  }
 +
 +  protected void createDefaultSetup(Properties props, String propertyFile) {
 +    this.securityFile = propertyFile;
 +    createDefaultSetup(props);
 +  }
 +
 +  private void securityCheckForCommand(String command, String propertyFile, Assertor assertor) {
 +    Properties props = new Properties();
 +    props.setProperty(DistributionConfig.SECURITY_CLIENT_AUTHENTICATOR_NAME,
 +        "com.gemstone.gemfire.management.internal.security.CustomAuthenticator.create");
 +    props.setProperty(DistributionConfig.SECURITY_CLIENT_ACCESSOR_NAME,
 +        "com.gemstone.gemfire.management.internal.security.CustomAccessControl.create");
 +    createDefaultSetup(props, propertyFile);
 +    try {
 +      executeCommandWithoutClear(command);
 +      assertor.assertTest();
 +    } catch (Exception e) {
 +      fail("Test failed with exception " + e);
 +    } finally {
 +      getDefaultShell().clearEvents();
 +      destroyDefaultSetup();
 +    }
 +
 +  }
 +
 +  protected Properties getSecuredProperties(int authCode) {
 +    Properties props = new Properties();
 +    props.put(CommandBuilders.SEC_USER_NAME, "AUTHC_" + authCode);
 +    props.put(CommandBuilders.SEC_USER_PWD, "AUTHC_" + authCode);
 +    return props;
 +  }
 +
 +  protected Assertor getAssertor() {
 +    return new Assertor();
 +  }
 +
 +  /**
 +   * The below test is to test the framework for proper error.
 +   */
 +
 +  /*
 +   * public void _testCreateIndexParentOP() { String commandString =
 +   * CommandBuilders.CREATE_INDEX();
 +   * writeToFile(CommandBuilders.getSecuredAdminProperties
 +   * (CommandBuilders.OP_CREATE_INDEX)); securityCheckForCommand(commandString,
 +   * tempFilePath, getAssertor()); }
 +   */
 +
 +  public void test_ALTER_RUNTIME() {
 +    String commandString = CommandBuilders.ALTER_RUNTIME();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_ALTER_RUNTIME));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  
 +  public void test_CHANGE_LOGLEVEL() {
 +    String commandString = CommandBuilders.CHANGE_LOGLEVEL();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CHANGE_ALERT_LEVEL));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  //This test is disabled becoz only access level required for this command is LIST_DS
 +  //which is the lowest access level
 +  //ant test marked with _test is not really required here
 +  public void _test_DESCRIBE_CONFIG() {
 +    String commandString = CommandBuilders.DESCRIBE_CONFIG();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_EXPORT_CONFIG() {
 +    String commandString = CommandBuilders.EXPORT_CONFIG();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_EXPORT_CONFIG));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_EXPORT_SHARED_CONFIG() {
 +    String commandString = CommandBuilders.EXPORT_SHARED_CONFIG();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_EXPORT_CONFIG));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_IMPORT_SHARED_CONFIG() throws IOException {
 +
 +    String commandString = CommandBuilders.IMPORT_SHARED_CONFIG();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_IMPORT_CONFIG));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_STATUS_SHARED_CONFIG() {
 +    String commandString = CommandBuilders.STATUS_SHARED_CONFIG();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_EXPORT_DATA() {
 +    String commandString = CommandBuilders.EXPORT_DATA();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_EXPORT_DATA));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +  
 +  public void test_GET() {
 +    String commandString = CommandBuilders.GET();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_GET));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_IMPORT_DATA() {
 +    String commandString = CommandBuilders.IMPORT_DATA();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_IMPORT_DATA));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +  
 +  public void test_PUT() {
 +    String commandString = CommandBuilders.PUT();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_PUT));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +  
 +  public void test_QUERY(){
 +    String commandString = CommandBuilders.QUERY();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_QUERY));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_REMOVE(){
 +    String commandString = CommandBuilders.REMOVE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_REMOVE));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +  
 +  public void test_LOCATE_ENTRY() {
 +    String commandString = CommandBuilders.LOCATE_ENTRY();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LOCATE_ENTRY));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_DEPLOY() throws IOException {
 +    String commandString = CommandBuilders.DEPLOY();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_DEPLOY));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_LIST_DEPLOYED() {
 +    String commandString = CommandBuilders.LIST_DEPLOYED();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_UNDEPLOY() {
 +    String commandString = CommandBuilders.UNDEPLOY();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_UNDEPLOY));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void ISSUE_NO_OP_CODE_test_ALTER_DISK_STORE() {
 +    String commandString = CommandBuilders.ALTER_DISK_STORE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_UNDEPLOY));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_BACKUP_DISK_STORE() {
 +    String commandString = CommandBuilders.BACKUP_DISK_STORE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_BACKUP_DISKSTORE));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_COMPACT_DISKSTORE() {
 +    String commandString = CommandBuilders.COMPACT_DISK_STORE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_COMPACT_DISKSTORE));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_CREATE_DISK_STORE() {
 +    String commandString = CommandBuilders.CREATE_DISK_STORE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CREATE_DISKSTORE));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_DESCRIBE_DISK_STORE() {
 +    String commandString = CommandBuilders.DESCRIBE_DISK_STORE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_DESTROY_DISK_STORE() {
 +    String commandString = CommandBuilders.DESTROY_DISK_STORE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_DESTROY_DISKSTORE));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_LIST_DISK_STORE() {
 +    String commandString = CommandBuilders.LIST_DISK_STORE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_REVOKE_MISSING_DISK_STORE() {
 +    String commandString = CommandBuilders.REVOKE_MISSING_DISK_STORE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_REVOKE_MISSING_DISKSTORE));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void ISSUE_test_SHOW_MISSING_DISK_STORE() {
 +    String commandString = CommandBuilders.SHOW_MISSING_DISK_STORE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_SHOW_MISSING_DISKSTORES));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_LIST_DURABLE_CQS() {
 +    String commandString = CommandBuilders.LIST_DURABLE_CQS();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_CLOSE_DURABLE_CQS() {
 +    String commandString = CommandBuilders.CLOSE_DURABLE_CQS();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CLOSE_DURABLE_CQ));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +  
 +  public void ISSUE_test_COUNT_DURABLE_CQ_EVENTS() {
 +    String commandString = CommandBuilders.COUNT_DURABLE_CQ_EVENTS();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_SHOW_SUBSCRIPTION_QUEUE_SIZE));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +  
 +  public void test_CLOSE_DURABLE_CLIENTS() {
 +    String commandString = CommandBuilders.CLOSE_DURABLE_CLIENTS();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CLOSE_DURABLE_CLIENT));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_DESTROY_FUNCTION() {
 +    String commandString = CommandBuilders.DESTROY_FUNCTION();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_DESTROY_FUNCTION));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_EXECUTE_FUNCTION() {
 +    String commandString = CommandBuilders.EXECUTE_FUNCTION();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_EXECUTE_FUNCTION));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_LIST_FUNCTION() {
 +    String commandString = CommandBuilders.LIST_FUNCTION();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_CREATE_ASYNC_EVENT_QUEUE() {
 +    String commandString = CommandBuilders.CREATE_ASYNC_EVENT_QUEUE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CREATE_AEQ));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_CREATE_GATEWAYRECEIVER() {
 +    String commandString = CommandBuilders.CREATE_GATEWAYRECEIVER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CREATE_GW_RECEIVER));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_CREATE_GATEWAYSENDER() {
 +    String commandString = CommandBuilders.CREATE_GATEWAYSENDER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CREATE_GW_SENDER));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_LIST_ASYNC_EVENT_QUEUES() {
 +    String commandString = CommandBuilders.LIST_ASYNC_EVENT_QUEUES();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_LIST_GATEWAY() {
 +    String commandString = CommandBuilders.LIST_GATEWAY();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_LOAD_BALANCE_GW_SENDER() {
 +    String commandString = CommandBuilders.LOAD_BALANCE_GW_SENDER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LOAD_BALANCE_GW_SENDER));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_PAUSE_GATEWAYSENDER() {
 +    String commandString = CommandBuilders.PAUSE_GATEWAYSENDER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_PAUSE_GW_SENDER));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_RESUME_GATEWAYSENDER() {
 +    String commandString = CommandBuilders.RESUME_GATEWAYSENDER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_RESUME_GW_SENDER));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_START_GATEWAYRECEIVER() {
 +    String commandString = CommandBuilders.START_GATEWAYRECEIVER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_START_GW_RECEIVER));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_START_GATEWAYSENDER() {
 +    String commandString = CommandBuilders.START_GATEWAYSENDER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_START_GW_SENDER));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_STATUS_GATEWAYSENDER() {
 +    String commandString = CommandBuilders.STATUS_GATEWAYSENDER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_STATUS_GATEWAYRECEIVER() {
 +    String commandString = CommandBuilders.STATUS_GATEWAYRECEIVER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_STOP_GATEWAYRECEIVER() {
 +    String commandString = CommandBuilders.STOP_GATEWAYRECEIVER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_STOP_GW_RECEIVER));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_STOP_GATEWAYSENDER() {
 +    String commandString = CommandBuilders.STOP_GATEWAYSENDER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_STOP_GW_SENDER));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_DESCRIBE_CLIENT() {
 +    String commandString = CommandBuilders.DESCRIBE_CLIENT();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_DESCRIBE_MEMBER() {
 +    String commandString = CommandBuilders.DESCRIBE_MEMBER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_EXPORT_LOGS() {
 +    String commandString = CommandBuilders.EXPORT_LOGS();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_EXPORT_LOGS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_EXPORT_STACKTRACE() {
 +    String commandString = CommandBuilders.EXPORT_STACKTRACE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_EXPORT_STACKTRACE));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_GC() {
 +    String commandString = CommandBuilders.GC();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_GC));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_LIST_CLIENTS() {
 +    String commandString = CommandBuilders.LIST_CLIENTS();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_LIST_MEMBER() {
 +    String commandString = CommandBuilders.LIST_MEMBER();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_NETSTAT() {
 +    String commandString = CommandBuilders.NETSTAT();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_NETSTAT));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_SHOW_DEADLOCK() {
 +    String commandString = CommandBuilders.SHOW_DEADLOCK();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_SHOW_DEADLOCKS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_SHOW_LOG() {
 +    String commandString = CommandBuilders.SHOW_LOG();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_SHOW_LOG));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_SHOW_METRICS() {
 +    String commandString = CommandBuilders.SHOW_METRICS();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_SHOW_METRICS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_CLEAR_DEFINED_INDEXES() {
 +    String commandString = CommandBuilders.CLEAR_DEFINED_INDEXES();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CREATE_INDEX));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_CREATE_DEFINED_INDEXES() {
 +    String commandString = CommandBuilders.CREATE_DEFINED_INDEXES();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CREATE_INDEX));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_CREATE_INDEX() {
 +    String commandString = CommandBuilders.CREATE_INDEX();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CREATE_INDEX));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_DEFINE_INDEX() {
 +    String commandString = CommandBuilders.DEFINE_INDEX();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CREATE_INDEX));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_DESTROY_INDEX() {
 +    String commandString = CommandBuilders.DESTROY_INDEX();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_DESTROY_INDEX));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_LIST_INDEX() {
 +    String commandString = CommandBuilders.LIST_INDEX();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_CONFIGURE_PDX() {
 +    String commandString = CommandBuilders.CONFIGURE_PDX();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CONFIGURE_PDX));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_ALTER_REGION() {
 +    String commandString = CommandBuilders.ALTER_REGION();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_ALTER_REGION));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_CREATE_REGION() {
 +    String commandString = CommandBuilders.CREATE_REGION();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_CREATE_REGION));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_DESCRIBE_REGION() {
 +    String commandString = CommandBuilders.DESCRIBE_REGION();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_DESTROY_REGION() {
 +    String commandString = CommandBuilders.DESTROY_REGION();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_DESTROY_REGION));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void _test_LIST_REGION() {
 +    String commandString = CommandBuilders.LIST_REGION();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_LIST_DS));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +  public void test_REBALANCE() {
 +    String commandString = CommandBuilders.REBALANCE();
 +    writeToFile(getSecuredProperties(CommandBuilders.OP_REBALANCE));
 +    securityCheckForCommand(commandString, tempFilePath, getAssertor());
 +  }
 +
 +}


[09/62] [abbrv] [partial] incubator-geode git commit: GEODE-773: Extract static methods from DistributedTestCase

Posted by je...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryDUnitTest.java
index 1ea2fdd..24187a0 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryDUnitTest.java
@@ -64,10 +64,15 @@ import com.gemstone.gemfire.internal.cache.DistributedRegion;
 import com.gemstone.gemfire.internal.cache.DistributedTombstoneOperation;
 import com.gemstone.gemfire.internal.cache.EventID;
 import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
-import com.gemstone.gemfire.test.dunit.DistributedTestCase;
+import com.gemstone.gemfire.test.dunit.Assert;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
+import com.gemstone.gemfire.test.dunit.WaitCriterion;
 
 /**
  * This class tests the ContiunousQuery mechanism in GemFire.
@@ -173,7 +178,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     // avoid IllegalStateException from HandShake by connecting all vms tor
     // system before creating connection pools
     getSystem();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         getSystem();
       }
@@ -208,7 +213,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         "Create Cache Server") {
       public void run2() throws CacheException
       {
-        getLogWriter().info("### Create Cache Server. ###");
+        LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
         AttributesFactory factory = new AttributesFactory();
         factory.setScope(Scope.DISTRIBUTED_ACK);
         factory.setMirrorType(mirrorType);
@@ -223,16 +228,16 @@ public class CqQueryDUnitTest extends CacheTestCase {
         for (int i = 0; i < regions.length; i++) {
           createRegion(regions[i], factory.createRegionAttributes());
         }
-        pause(2000);
+        Wait.pause(2000);
 
         try {
           startBridgeServer(thePort, true);
         }
 
         catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
-        pause(2000);
+        Wait.pause(2000);
         
       }
     };
@@ -246,13 +251,13 @@ public class CqQueryDUnitTest extends CacheTestCase {
         "Create Cache Server") {
       public void run2() throws CacheException
       {
-        getLogWriter().info("### Create Cache Server. ###");
+        LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
         try {
           startBridgeServer(thePort, true);
         }
 
         catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }        
       }
     };
@@ -295,33 +300,33 @@ public class CqQueryDUnitTest extends CacheTestCase {
   public void closeServer(VM server) {
     server.invoke(new SerializableRunnable("Close CacheServer") {
       public void run() {
-        getLogWriter().info("### Close CacheServer. ###");
+        LogWriterUtils.getLogWriter().info("### Close CacheServer. ###");
         stopBridgeServer(getCache());
       }
     });
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
   }
   
   public void crashServer(VM server) {
     server.invoke(new SerializableRunnable("Crash CacheServer") {
       public void run() {
         com.gemstone.gemfire.cache.client.internal.ConnectionImpl.setTEST_DURABLE_CLIENT_CRASH(true);
-        getLogWriter().info("### Crashing CacheServer. ###");
+        LogWriterUtils.getLogWriter().info("### Crashing CacheServer. ###");
         stopBridgeServer(getCache());
       }
     });
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
   }
   
   public void closeCrashServer(VM server) {
     server.invoke(new SerializableRunnable("Close CacheServer") {
       public void run() {
         com.gemstone.gemfire.cache.client.internal.ConnectionImpl.setTEST_DURABLE_CLIENT_CRASH(false);
-        getLogWriter().info("### Crashing CacheServer. ###");
+        LogWriterUtils.getLogWriter().info("### Crashing CacheServer. ###");
         stopBridgeServer(getCache());
       }
     });
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
   }
   
   /* Create Client */
@@ -335,7 +340,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     SerializableRunnable createQService =
       new CacheSerializableRunnable("Create Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Client. ###");
+        LogWriterUtils.getLogWriter().info("### Create Client. ###");
         //Region region1 = null;
         // Initialize CQ Service.
         try {
@@ -354,7 +359,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         }
         for (int i=0; i < regions.length; i++) {        
           createRegion(regions[i], regionFactory.createRegionAttributes());
-          getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
+          LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
           //region1.getAttributesMutator().setCacheListener(new CqListener());
         }
       }
@@ -368,7 +373,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     SerializableRunnable createQService =
       new CacheSerializableRunnable("Create Local Region") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Local Region. ###");
+        LogWriterUtils.getLogWriter().info("### Create Local Region. ###");
         AttributesFactory af = new AttributesFactory();
         af.setScope(Scope.LOCAL);
 
@@ -381,7 +386,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         RegionFactory rf = getCache().createRegionFactory(af.create());
         for (int i = 0; i < regionNames.length; i++) {
           rf.create(regionNames[i]);
-          getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
+          LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + regions[i]);
         }
       }
     };
@@ -393,7 +398,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     SerializableRunnable createQService =
       new CacheSerializableRunnable("Create Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create Client. ###");
+        LogWriterUtils.getLogWriter().info("### Create Client. ###");
         //Region region1 = null;
         // Initialize CQ Service.
         try {
@@ -416,8 +421,8 @@ public class CqQueryDUnitTest extends CacheTestCase {
         }
         createRegion(regions[0], regionFactory0.createRegionAttributes());
         createRegion(regions[1], regionFactory1.createRegionAttributes());
-        getLogWriter().info("### Successfully Created Region on Client :" + regions[0]);
-        getLogWriter().info("### Successfully Created Region on Client :" + regions[1]);
+        LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + regions[0]);
+        LogWriterUtils.getLogWriter().info("### Successfully Created Region on Client :" + regions[1]);
         
       }
     };
@@ -431,18 +436,18 @@ public class CqQueryDUnitTest extends CacheTestCase {
     SerializableRunnable closeCQService =
       new CacheSerializableRunnable("Close Client") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close Client. ###");
+        LogWriterUtils.getLogWriter().info("### Close Client. ###");
         try {
           ((DefaultQueryService)getCache().getQueryService()).closeCqService();
         } catch (Exception ex) {
-          getLogWriter().info("### Failed to get CqService during ClientClose() ###");
+          LogWriterUtils.getLogWriter().info("### Failed to get CqService during ClientClose() ###");
         }
         
       }
     };
     
     client.invoke(closeCQService);
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
   }
 
   
@@ -454,7 +459,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         for (int i = 1; i <= size; i++) {
           region1.put(KEY+i, new Portfolio(i));
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -469,7 +474,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
           portfolio.createTime = System.currentTimeMillis();
           region1.put(KEY+i, portfolio);
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -483,7 +488,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
           portfolio.shortID = new Short(""+i);
           region1.put(KEY+i, portfolio);
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -515,7 +520,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
               
           }
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -547,7 +552,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
               
           }
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -559,7 +564,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         for (int i = 1; i <= size; i++) {
           region1.put("key" + i, new Portfolio(i));
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
   }
@@ -587,7 +592,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         for (int i = 1; i <= size; i++) {
           region1.destroy(KEY+i);
         }
-        getLogWriter().info("### Number of Entries In Region after Delete :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries In Region after Delete :" + region1.keys().size());
       }
       
     });
@@ -603,7 +608,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         for (int i = 1; i <= size; i++) {
           region1.invalidate(KEY+i);
         }
-        getLogWriter().info("### Number of Entries In Region after Delete :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries In Region after Delete :" + region1.keys().size());
       }
       
     });
@@ -621,7 +626,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         //getLogWriter().info("### DEBUG CREATE CQ START ####");
         //pause(20 * 1000);
         
-        getLogWriter().info("### Create CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
@@ -632,7 +637,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         }
         // Create CQ Attributes.
         CqAttributesFactory cqf = new CqAttributesFactory();
-        CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+        CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
 //        ((CqQueryTestListener)cqListeners[0]).cqName = cqName;
 //        if (isBridgeMemberTest) {
 //          testListenerForBridgeMembershipTest = (CqQueryTestListener)cqListeners[0];
@@ -648,7 +653,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         } catch (Exception ex){
           AssertionError err = new AssertionError("Failed to create CQ " + cqName + " . ");
           err.initCause(ex);
-          getLogWriter().info("CqService is :" + cqService, err);
+          LogWriterUtils.getLogWriter().info("CqService is :" + cqService, err);
           throw err;
         }
       }
@@ -660,10 +665,10 @@ public class CqQueryDUnitTest extends CacheTestCase {
     vm.invoke(new CacheSerializableRunnable("Create CQ with no name:" ) {
       public void run2() throws CacheException {
         //pause(60 * 1000);
-        getLogWriter().info("### DEBUG CREATE CQ START ####");
+        LogWriterUtils.getLogWriter().info("### DEBUG CREATE CQ START ####");
         //pause(20 * 1000);
         
-        getLogWriter().info("### Create CQ with no name. ###");
+        LogWriterUtils.getLogWriter().info("### Create CQ with no name. ###");
         // Get CQ Service.
         QueryService cqService = null;
         CqQuery cq1 = null;
@@ -679,7 +684,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         for (int i = 0; i < 20; ++i) {
           // Create CQ Attributes.
           CqAttributesFactory cqf = new CqAttributesFactory();
-          CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+          CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
           
           cqf.initCqListeners(cqListeners);
           CqAttributes cqa = cqf.create();
@@ -689,40 +694,40 @@ public class CqQueryDUnitTest extends CacheTestCase {
             cq1 = cqService.newCq(queryStr, cqa);
             ((CqQueryTestListener)cqListeners[0]).cqName = cq1.getName();
           } catch (Exception ex){
-            getLogWriter().info("CQService is :" + cqService);
+            LogWriterUtils.getLogWriter().info("CQService is :" + cqService);
             ex.printStackTrace();
             fail("Failed to create CQ with no name" +  " . " + ex.getMessage());
           }
           
           if (cq1 == null) {
-            getLogWriter().info("Failed to get CqQuery object for CQ with no name.");
+            LogWriterUtils.getLogWriter().info("Failed to get CqQuery object for CQ with no name.");
           }
           else {
             cqName = cq1.getName();
-            getLogWriter().info("Created CQ with no name, generated CQ name: " + cqName + " CQ state:" + cq1.getState());
+            LogWriterUtils.getLogWriter().info("Created CQ with no name, generated CQ name: " + cqName + " CQ state:" + cq1.getState());
             assertTrue("Create CQ with no name illegal state", cq1.getState().isStopped());
           }
           if ( i%2 == 0) {
             try {
               cqResults = cq1.executeWithInitialResults();
             } catch (Exception ex){
-              getLogWriter().info("CqService is :" + cqService);
+              LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
               ex.printStackTrace();
               fail("Failed to execute CQ with initial results, cq name: " + cqName + " . " + ex.getMessage());
             }
-            getLogWriter().info("initial result size = " + cqResults.size());
-            getLogWriter().info("CQ state after execute with initial results = " + cq1.getState());
+            LogWriterUtils.getLogWriter().info("initial result size = " + cqResults.size());
+            LogWriterUtils.getLogWriter().info("CQ state after execute with initial results = " + cq1.getState());
             assertTrue("executeWithInitialResults() state mismatch", cq1.getState().isRunning());
           }
           else {
             try {
               cq1.execute();
             } catch (Exception ex){
-              getLogWriter().info("CQService is :" + cqService);
+              LogWriterUtils.getLogWriter().info("CQService is :" + cqService);
               ex.printStackTrace();
               fail("Failed to execute CQ " + cqName + " . " + ex.getMessage());
             }
-            getLogWriter().info("CQ state after execute = " + cq1.getState());
+            LogWriterUtils.getLogWriter().info("CQ state after execute = " + cq1.getState());
             assertTrue("execute() state mismatch", cq1.getState().isRunning());
           }
           
@@ -730,7 +735,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
           try {
             cq1.close();
           } catch (Exception ex){
-            getLogWriter().info("CqService is :" + cqService, ex);
+            LogWriterUtils.getLogWriter().info("CqService is :" + cqService, ex);
             fail("Failed to close CQ " + cqName + " . " + ex.getMessage());
           }
           assertTrue("closeCq() state mismatch", cq1.getState().isClosed());
@@ -758,7 +763,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
 
       private void work() throws CacheException {
       //pause(60 * 1000);
-      getLogWriter().info("### DEBUG EXECUTE CQ START ####");
+      LogWriterUtils.getLogWriter().info("### DEBUG EXECUTE CQ START ####");
       //pause(20 * 1000);
       
       // Get CQ Service.
@@ -778,16 +783,16 @@ public class CqQueryDUnitTest extends CacheTestCase {
       try {
         cq1 = cqService.getCq(cqName);
         if (cq1 == null) {
-          getLogWriter().info("Failed to get CqQuery object for CQ name: " + cqName);
+          LogWriterUtils.getLogWriter().info("Failed to get CqQuery object for CQ name: " + cqName);
           fail("Failed to get CQ " + cqName);
         }
         else {
-          getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
+          LogWriterUtils.getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
           assertTrue("newCq() state mismatch", cq1.getState().isStopped());
         }
       } catch (Exception ex){
-        getLogWriter().info("CqService is :" + cqService);
-        getLogWriter().error(ex);
+        LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
+        LogWriterUtils.getLogWriter().error(ex);
         AssertionError err = new AssertionError("Failed to execute  CQ " + cqName);
         err.initCause(ex);
         throw err;
@@ -799,13 +804,13 @@ public class CqQueryDUnitTest extends CacheTestCase {
         try {
           cqResults = cq1.executeWithInitialResults();
         } catch (Exception ex){
-          getLogWriter().info("CqService is :" + cqService);
+          LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
           ex.printStackTrace();
           AssertionError err = new AssertionError("Failed to execute  CQ " + cqName);
           err.initCause(ex);
           throw err;
         }
-        getLogWriter().info("initial result size = " + cqResults.size());
+        LogWriterUtils.getLogWriter().info("initial result size = " + cqResults.size());
         assertTrue("executeWithInitialResults() state mismatch", cq1.getState().isRunning());
         if (expectedResultsSize >= 0) {
           assertEquals("unexpected results size", expectedResultsSize, cqResults.size());
@@ -818,7 +823,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
           AssertionError err = new AssertionError("Failed to execute  CQ " + cqName);
           err.initCause(ex);
           if (expectedErr == null) {
-            getLogWriter().info("CqService is :" + cqService, err);
+            LogWriterUtils.getLogWriter().info("CqService is :" + cqService, err);
           }
           throw err;
         }
@@ -848,7 +853,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
   public void stopCQ(VM vm, final String cqName) throws Exception {
     vm.invoke(new CacheSerializableRunnable("Stop CQ :" + cqName) {
       public void run2() throws CacheException {
-        getLogWriter().info("### Stop CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Stop CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
@@ -878,7 +883,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     vm.invoke(new CacheSerializableRunnable("Stop CQ :" + cqName) {
       public void run2() throws CacheException {
         CqQuery cq1 = null;
-        getLogWriter().info("### Stop and Exec CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Stop and Exec CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
@@ -905,8 +910,8 @@ public class CqQueryDUnitTest extends CacheTestCase {
             fail("Count = " + i + "Failed to stop CQ " + cqName + " . " + ex.getMessage());
           }
           assertTrue("Stop CQ state mismatch, count = " + i, cq1.getState().isStopped());
-          getLogWriter().info("After stop in Stop and Execute loop, ran successfully, loop count: " + i);
-          getLogWriter().info("CQ state: " + cq1.getState());
+          LogWriterUtils.getLogWriter().info("After stop in Stop and Execute loop, ran successfully, loop count: " + i);
+          LogWriterUtils.getLogWriter().info("CQ state: " + cq1.getState());
           
           // Re-execute CQ
           try {
@@ -916,8 +921,8 @@ public class CqQueryDUnitTest extends CacheTestCase {
             fail("Count = " + i + "Failed to execute CQ " + cqName + " . " + ex.getMessage());
           }
           assertTrue("Execute CQ state mismatch, count = " + i, cq1.getState().isRunning());
-          getLogWriter().info("After execute in Stop and Execute loop, ran successfully, loop count: " + i);
-          getLogWriter().info("CQ state: " + cq1.getState());
+          LogWriterUtils.getLogWriter().info("After execute in Stop and Execute loop, ran successfully, loop count: " + i);
+          LogWriterUtils.getLogWriter().info("CQ state: " + cq1.getState());
         }
       }
     });
@@ -928,7 +933,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
   public void closeCQ(VM vm, final String cqName) throws Exception {
     vm.invoke(new CacheSerializableRunnable("Close CQ :" + cqName) {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Close CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
@@ -961,7 +966,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         Region region = null;
         try {
           region = getRootRegion().getSubregion(regionName);
-          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(getLogWriter()));
+          region.getAttributesMutator().setCacheListener(new CertifiableTestCacheListener(LogWriterUtils.getLogWriter()));
         } catch (Exception cqe) {
           AssertionError err = new AssertionError("Failed to get Region.");
           err.initCause(cqe);
@@ -988,7 +993,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
   public void executeAndCloseAndExecuteIRMultipleTimes(VM vm, final String cqName, final String queryStr) {
     vm.invoke(new CacheSerializableRunnable("Create CQ :" + cqName) {
       public void run2() throws CacheException {
-        getLogWriter().info("### Create CQ. ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### Create CQ. ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
@@ -999,7 +1004,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         }
         // Create CQ Attributes.
         CqAttributesFactory cqf = new CqAttributesFactory();
-        CqListener[] cqListeners = {new CqQueryTestListener(getLogWriter())};
+        CqListener[] cqListeners = {new CqQueryTestListener(LogWriterUtils.getLogWriter())};
         
         cqf.initCqListeners(cqListeners);
         CqAttributes cqa = cqf.create();
@@ -1012,7 +1017,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         } catch (Exception ex){
           AssertionError err = new AssertionError("Failed to create CQ " + cqName + " . ");
           err.initCause(ex);
-          getLogWriter().info("CqService is :" + cqService, err);
+          LogWriterUtils.getLogWriter().info("CqService is :" + cqService, err);
           throw err;
         }
         
@@ -1077,7 +1082,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
   private void failIfCQExists(VM vm, final String cqName) {
     vm.invoke(new CacheSerializableRunnable("Fail if CQ exists") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Fail if CQ Exists. ### " + cqName);
+        LogWriterUtils.getLogWriter().info("### Fail if CQ Exists. ### " + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {          
@@ -1100,7 +1105,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     vm.invoke(new CacheSerializableRunnable("Validate CQs") {
       public void run2() throws CacheException {
         
-        getLogWriter().info("### Validating CQ. ### " + cqName);
+        LogWriterUtils.getLogWriter().info("### Validating CQ. ### " + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {          
@@ -1150,7 +1155,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
       final int totalEvents) {
     vm.invoke(new CacheSerializableRunnable("Validate CQs") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating CQ. ### " + cqName);
+        LogWriterUtils.getLogWriter().info("### Validating CQ. ### " + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {          
@@ -1420,7 +1425,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
             return "cqState never became " + state;
           }
         };
-        DistributedTestCase.waitForCriterion(ev, MAX_TIME, 200, true);
+        Wait.waitForCriterion(ev, MAX_TIME, 200, true);
       }
     });
   }
@@ -1454,7 +1459,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
   private void validateQuery(VM vm, final String query, final int resultSize) {
     vm.invoke(new CacheSerializableRunnable("Validate Query") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Validating Query. ###");
+        LogWriterUtils.getLogWriter().info("### Validating Query. ###");
         QueryService qs = getCache().getQueryService();
         
         Query q = qs.newQuery(query);
@@ -1462,7 +1467,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
           Object r = q.execute();
           if(r instanceof Collection){
             int rSize = ((Collection)r).size();
-            getLogWriter().info("### Result Size is :" + rSize);
+            LogWriterUtils.getLogWriter().info("### Result Size is :" + rSize);
             assertEquals(rSize, rSize);
           }
         }
@@ -1513,7 +1518,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     vm.invoke(new CacheSerializableRunnable("Stop CQ :" + cqName) {
       public void run2() throws CacheException {
         CqQuery cq1 = null;
-        getLogWriter().info("### CQ attributes mutator for ###" + cqName);
+        LogWriterUtils.getLogWriter().info("### CQ attributes mutator for ###" + cqName);
         // Get CQ Service.
         QueryService cqService = null;
         try {
@@ -1614,7 +1619,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server2);
 
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Create client.
     createClient(client, thePort, host0);
@@ -1622,7 +1627,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
       /* CQ Test with initial Values. */
       int size = 5;
       createValuesWithShort(server, regions[0], size);
-      pause(1*500);
+      Wait.pause(1*500);
       
       final String cqName = "testCQResultSet_0"; 
   
@@ -1657,7 +1662,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     createClient(client, thePort, host0);
     
     
@@ -1827,7 +1832,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     createClient(client, thePort, host0);
     
     /* Create CQs. */
@@ -1910,7 +1915,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     createClient(client, thePort, host0);
     
     /* Create CQs. */
@@ -2006,7 +2011,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     createClient(client, thePort, host0);
     
     /* debug */
@@ -2097,12 +2102,12 @@ public class CqQueryDUnitTest extends CacheTestCase {
     } catch (com.gemstone.gemfire.test.dunit.RMIException rmiExc) {
       Throwable cause = rmiExc.getCause();
       if (!(cause instanceof AssertionError)) {
-        getLogWriter().severe("Expected to see an AssertionError.", cause);
+        LogWriterUtils.getLogWriter().severe("Expected to see an AssertionError.", cause);
         fail("wrong error");
       }
       Throwable causeCause = cause.getCause(); // should be a RegionNotFoundException
       if (!(causeCause instanceof RegionNotFoundException)) {
-        getLogWriter().severe("Expected cause to be RegionNotFoundException", cause);
+        LogWriterUtils.getLogWriter().severe("Expected cause to be RegionNotFoundException", cause);
         fail("wrong cause");
       }
     }
@@ -2118,14 +2123,14 @@ public class CqQueryDUnitTest extends CacheTestCase {
     
     client.invoke(new CacheSerializableRunnable("CloseAll CQ :") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close All CQ. ###");
+        LogWriterUtils.getLogWriter().info("### Close All CQ. ###");
         // Get CQ Service.
         QueryService cqService = null;
         try {          
           cqService = getCache().getQueryService();
         } catch (Exception cqe) {
           cqe.printStackTrace();
-          getLogWriter().info("Failed to getCQService.", cqe);
+          LogWriterUtils.getLogWriter().info("Failed to getCQService.", cqe);
           fail("Failed to getCQService.");
         }
         
@@ -2134,7 +2139,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
           cqService.closeCqs();
         } catch (Exception ex){
           ex.printStackTrace();
-          getLogWriter().info("Failed to close All CQ.", ex);
+          LogWriterUtils.getLogWriter().info("Failed to close All CQ.", ex);
           fail("Failed to close All CQ. " + ex.getMessage());
         }
       }
@@ -2154,7 +2159,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     // Call close all CQ.
     client.invoke(new CacheSerializableRunnable("CloseAll CQ 2 :") {
       public void run2() throws CacheException {
-        getLogWriter().info("### Close All CQ 2. ###");
+        LogWriterUtils.getLogWriter().info("### Close All CQ 2. ###");
         // Get CQ Service.
         QueryService cqService = null;
         try {          
@@ -2192,7 +2197,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     /* Init Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     createClient(client, thePort, host0);
     
     
@@ -2254,7 +2259,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
       }
     });
     
-    pause(4 * 1000);
+    Wait.pause(4 * 1000);
     validateCQCount(client, 0);
     
     closeClient(client);
@@ -2276,7 +2281,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     /* Create Server and Client */
     createServer(server);
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     createClient(client1, thePort, host0);
     createClient(client2, thePort, host0);
     
@@ -2426,7 +2431,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server);
     
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     // Create client.
     createClient(client, thePort, host0);
@@ -2434,7 +2439,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     /* CQ Test with initial Values. */
     int size = 10;
     createValues(server, regions[0], size);
-    pause(1*500);
+    Wait.pause(1*500);
     
     // Create CQs.
     createCQ(client, "testCQResultSet_0", cqs[0]);    
@@ -2497,7 +2502,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server);
     
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     // Create client.
     createClient(client, thePort, host0);
@@ -2568,7 +2573,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
       }
     });
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     // cqs should not get any creates, deletes or updates. rdubey.
     validateCQ(client, "testCQEvents_0",
         /* resultSize: */ noTest,
@@ -2600,7 +2605,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
 
     final int thePort = server.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Create client.
     createClient(client, thePort, host0);
@@ -2665,7 +2670,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
           value.put("field2", "key" + i);
           exampleRegion.put(KEY + i, value);
         }
-        getLogWriter().info(
+        LogWriterUtils.getLogWriter().info(
             "### Number of Entries in Region :" + exampleRegion.keys().size());
       }
     });
@@ -2684,7 +2689,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server);
     
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     // Create client.
     createClient(client, thePort, host0);
@@ -2708,11 +2713,11 @@ public class CqQueryDUnitTest extends CacheTestCase {
       }      
     });
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     // Init values at server.
     int size = 10;
     createValues(server, regions[0], size);
-    pause(1 * 500);
+    Wait.pause(1 * 500);
     // There should not be any creates.
     validateCQ(client, "testEnableDisable_0",
         /* resultSize: */ noTest,
@@ -2738,7 +2743,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         }                
       }
     });
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     createValues(server, regions[0], size);    
     waitForUpdated(client, "testEnableDisable_0", KEY+size);
     // It gets created on the CQs
@@ -2767,9 +2772,9 @@ public class CqQueryDUnitTest extends CacheTestCase {
       }
     });
     
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     deleteValues(server, regions[0], size / 2);
-    pause(1 * 500);    
+    Wait.pause(1 * 500);    
     // There should not be any deletes.
     validateCQ(client, "testEnableDisable_0",
         /* resultSize: */ noTest,
@@ -2795,7 +2800,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
         }                
       }
     });
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     createValues(server, regions[0], size / 2);    
     waitForCreated(client, "testEnableDisable_0", KEY+(size / 2));
     // Gets updated on the CQ.
@@ -2826,7 +2831,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server);
     
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     // Create client.
     createClient(client, thePort, host0);
@@ -2868,7 +2873,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server1);
     
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     // Create client.
 //    Properties props = new Properties();
     // Create client with redundancyLevel -1
@@ -2883,23 +2888,23 @@ public class CqQueryDUnitTest extends CacheTestCase {
       createCQ(client, "testCQFailOver_" + i, cqs[i]);
       executeCQ(client, "testCQFailOver_" + i, false, null);
     }
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     // CREATE.
     createValues(server1, regions[0], 10);
     createValues(server1, regions[1], 10);
     waitForCreated(client, "testCQFailOver_0", KEY+10);
 
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     createServer(server2, ports[0]);
     final int thePort2 = server2.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
     System.out.println("### Port on which server1 running : " + port1 + 
         " Server2 running : " + thePort2);
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
 
     // Extra pause - added after downmerging trunk r17050
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     
     // UPDATE - 1.
     createValues(server1, regions[0], 10);    
@@ -2917,7 +2922,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     closeServer(server1);
     
     // Fail over should happen.
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
     for (int i=0; i < numCQs; i++) {
       validateCQ(client, "testCQFailOver_" + i, noTest, resultsCnt[i], resultsCnt[i], noTest);
@@ -2956,7 +2961,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server1);
     
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(2);
     
@@ -2983,7 +2988,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
       executeCQ(client, "testCQHA_" + i, false, null);
     }
     
-    pause(1 * 1000);
+    Wait.pause(1 * 1000);
     
     // CREATE.
     createValues(server1, regions[0], 10);
@@ -2999,7 +3004,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     // Close server1.
     // To maintain the redundancy; it will make connection to endpoint-3.
     closeServer(server1);
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     
     // UPDATE-1.
     createValues(server2, regions[0], 10);
@@ -3016,7 +3021,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     
     // Close server-2
     closeServer(server2);
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     
     // UPDATE - 2.
     clearCQListenerEvents(client, "testCQHA_0");
@@ -3054,7 +3059,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server2);
     
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     
     final int thePort2 = server2.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
     
@@ -3097,7 +3102,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     });
     
     
-    pause(2 * 1000);
+    Wait.pause(2 * 1000);
     closeServer(server1);
     closeServer(server2);
   }
@@ -3114,7 +3119,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server);
     
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     // Create client.
     createClient(client, thePort, host0);
@@ -3158,7 +3163,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
           getCache().getLogger().info("cqs for region: /root/"+regions[1]+" : "+cq.length);
           assertNotNull("CQservice should not return null for cqs on this region : /root/"+regions[1], cq);
         } catch (Exception cqe) {
-          fail("Failed to getCQService",cqe);
+          Assert.fail("Failed to getCQService",cqe);
         }                
       }
     });
@@ -3183,7 +3188,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     
     
     final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
     
     // Create client.
     createLocalRegion(client, new int[] {thePort}, host0, "-1", new String[]{regions[0]});
@@ -3219,7 +3224,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
 
     final int thePort = server.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Create client.
     createClient(client, thePort, host0);
@@ -3270,7 +3275,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     "Create Cache Server") {
       public void run2() throws CacheException
       {
-        getLogWriter().info("### Create Cache Server. ###");
+        LogWriterUtils.getLogWriter().info("### Create Cache Server. ###");
         
         // Create region with Global scope
         AttributesFactory factory1 = new AttributesFactory();
@@ -3284,16 +3289,16 @@ public class CqQueryDUnitTest extends CacheTestCase {
         factory2.setMirrorType(MirrorType.KEYS_VALUES);
         createRegion(regions[1], factory2.createRegionAttributes());
         
-        pause(2000);
+        Wait.pause(2000);
 
         try {
           startBridgeServer(port, true);
         }
 
         catch (Exception ex) {
-          fail("While starting CacheServer", ex);
+          Assert.fail("While starting CacheServer", ex);
         }
-        pause(2000);
+        Wait.pause(2000);
 
       }
     };
@@ -3304,7 +3309,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
 
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     final int thePort2 = server2.invokeInt(CqQueryDUnitTest.class,
     "getCacheServerPort");
@@ -3371,7 +3376,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
       createServer(server);
       
       final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-      final String host0 = getServerHostName(server.getHost());
+      final String host0 = NetworkUtils.getServerHostName(server.getHost());
       
       // Create client.
       createClient(client, thePort, host0);
@@ -3379,7 +3384,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
       /* CQ Test with initial Values. */
       int size = 5;
       createValuesWithShort(server, regions[0], size);
-      pause(1*500);
+      Wait.pause(1*500);
       
       // Create CQs.
       createCQ(client, "testCQResultSet_0", shortTypeCQs[0]);    
@@ -3403,7 +3408,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
       createServer(server);
       
       final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-      final String host0 = getServerHostName(server.getHost());
+      final String host0 = NetworkUtils.getServerHostName(server.getHost());
       
       // Create client.
       createClient(client, thePort, host0);
@@ -3412,7 +3417,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
       int size = 10;
       //create values
       createValuesAsPrimitives(server, regions[0], size);
-      pause(1*500);
+      Wait.pause(1*500);
       
       // Create CQs.
       createCQ(client, "equalsQuery1", "select * from /root/regionA p where p.equals('seeded')");    
@@ -3455,7 +3460,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
       createServer(server);
       
       final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-      final String host0 = getServerHostName(server.getHost());
+      final String host0 = NetworkUtils.getServerHostName(server.getHost());
       
       // Create client.
       createClient(client, thePort, host0);
@@ -3465,7 +3470,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
       //create values
       createIndex(server, "index1", "p.status", "/root/regionA p");
       createValuesAsPrimitives(server, regions[0], size);
-      pause(1*500);
+      Wait.pause(1*500);
       
       // Create CQs.
       createCQ(client, "equalsQuery1", "select * from /root/regionA p where p.equals('seeded')");    
@@ -3507,7 +3512,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
 
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
     createClient(client, new int[] { port1, ports[0] }, host0, "-1");
@@ -3517,7 +3522,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createCQ(client, "testCQAllServersLeave_" + 11, cqs[11], true);
     executeCQ(client, "testCQAllServersLeave_" + 11, false, null);
 
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     waitForCqsConnected(client, "testCQAllServersLeave_11", 1);
 
     // CREATE.
@@ -3527,16 +3532,16 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server2, ports[0]);
     final int thePort2 = server2.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    pause(8 * 1000);
+    Wait.pause(8 * 1000);
 
     // Close server1.
     crashServer(server1);
 
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
 
     crashServer(server2);
 
-    pause(3 * 1000);
+    Wait.pause(3 * 1000);
     waitForCqsDisconnected(client, "testCQAllServersLeave_11", 1);
     
     // Close.
@@ -3555,17 +3560,17 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server1);
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
     
     createClient(client, new int[] { port1, ports[0] }, host0, "-1");
 
-    pause(5*1000);
+    Wait.pause(5*1000);
     // Create CQs.
     createCQ(client, "testCQAllServersLeave_" + 11, cqs[11], true);
     executeCQ(client, "testCQAllServersLeave_" + 11, false, null);
 
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     waitForCqsConnected(client, "testCQAllServersLeave_11", 1);
     // CREATE.
     createValues(server1, regions[0], 10);
@@ -3574,16 +3579,16 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server2, ports[0]);
     final int thePort2 = server2.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    pause(10 * 1000);
+    Wait.pause(10 * 1000);
 
     // Close server1 and pause so server has chance to close
     closeServer(server1);
-    pause(10 * 1000);
+    Wait.pause(10 * 1000);
     waitForCqsDisconnected(client, "testCQAllServersLeave_11", 0);
 
     //Close server 2 and pause so server has a chance to close
     closeServer(server2);
-    pause(10 * 1000);
+    Wait.pause(10 * 1000);
     waitForCqsDisconnected(client, "testCQAllServersLeave_11", 1);
 
     // Close.
@@ -3602,7 +3607,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server1);
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
     
@@ -3611,7 +3616,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     // Create CQs.
     createCQ(client, "testCQAllServersLeave_" + 11, cqs[11], true);
     executeCQ(client, "testCQAllServersLeave_" + 11, false, null);
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     //listener should have had onCqConnected invoked
     waitForCqsConnected(client, "testCQAllServersLeave_11", 1);
     
@@ -3623,27 +3628,27 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server2, ports[0]);
     final int thePort2 = server2.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    pause(8 * 1000);
+    Wait.pause(8 * 1000);
 
     // Close server1.
     closeServer(server1);
     // Give the server time to shut down
-    pause(10 * 1000);
+    Wait.pause(10 * 1000);
     //We should not yet get a disconnect because we still have server2
     waitForCqsDisconnected(client, "testCQAllServersLeave_11", 0);
 
     //Close the server2 
     closeServer(server2);
-    pause(10 * 1000);
+    Wait.pause(10 * 1000);
     waitForCqsDisconnected(client, "testCQAllServersLeave_11", 1);
 
     //reconnect server1.  Our total connects for this test run are now 2
     restartBridgeServer(server1, port1);
-    pause(10 * 1000);
+    Wait.pause(10 * 1000);
     waitForCqsConnected(client, "testCQAllServersLeave_11", 2);
     
     //Disconnect again and now our total disconnects should be 2
-    pause(10 * 1000);
+    Wait.pause(10 * 1000);
     closeServer(server1);
     waitForCqsDisconnected(client, "testCQAllServersLeave_11", 2);
     
@@ -3664,7 +3669,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server1);
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
 
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
     createClient(client, new int[] { port1, ports[0] }, host0, "-1");
@@ -3673,7 +3678,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createCQ(client, "testCQAllServersLeave_" + 11, cqs[11], true);
     executeCQ(client, "testCQAllServersLeave_" + 11, false, null);
 
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     waitForCqsConnected(client, "testCQAllServersLeave_11", 1);
     // CREATE.
     createValues(server1, regions[0], 10);
@@ -3682,16 +3687,16 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createServer(server2, ports[0]);
     final int thePort2 = server2.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    pause(8 * 1000);
+    Wait.pause(8 * 1000);
 
     // Close server1 and give time for server1 to actually shutdown
     closeServer(server1);
-    pause(10 * 1000);
+    Wait.pause(10 * 1000);
     waitForCqsDisconnected(client, "testCQAllServersLeave_11", 0);
 
     // Close server2 and give time for server2 to shutdown before checking disconnected count
     closeServer(server2);
-    pause(10 * 1000);
+    Wait.pause(10 * 1000);
     waitForCqsDisconnected(client, "testCQAllServersLeave_11", 1);
     
     // Close.
@@ -3713,13 +3718,13 @@ public class CqQueryDUnitTest extends CacheTestCase {
 
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
 
     createServer(server2, ports[0]);
     final int thePort2 = server2.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    pause(8 * 1000);
+    Wait.pause(8 * 1000);
 
     // Create client
     createClientWith2Pools(client, new int[] { port1 }, new int[] { thePort2 },
@@ -3732,7 +3737,7 @@ public class CqQueryDUnitTest extends CacheTestCase {
     createCQ(client, "testCQAllServersLeave_" + 12, cqs[12], true);
     executeCQ(client, "testCQAllServersLeave_" + 12, false, null);
 
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     waitForCqsConnected(client, "testCQAllServersLeave_11", 1);
     waitForCqsConnected(client, "testCQAllServersLeave_12", 1);
     // CREATE.
@@ -3766,7 +3771,7 @@ public void testCqCloseAndExecuteWithInitialResults() throws Exception {
    createServer(server);
    
    final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-   final String host0 = getServerHostName(server.getHost());
+   final String host0 = NetworkUtils.getServerHostName(server.getHost());
    
    // Create client.
    createClient(client, thePort, host0);
@@ -3774,7 +3779,7 @@ public void testCqCloseAndExecuteWithInitialResults() throws Exception {
    /* CQ Test with initial Values. */
    int size = 5;
    createValuesWithShort(server, regions[0], size);
-   pause(1*500);
+   Wait.pause(1*500);
    
    // Create CQs.
    executeAndCloseAndExecuteIRMultipleTimes(client, "testCQResultSet_0", shortTypeCQs[0]);    
@@ -3795,7 +3800,7 @@ public void testCQEventsWithNotEqualsUndefined() throws Exception {
   createServer(server);
   
   final int thePort = server.invokeInt(CqQueryDUnitTest.class, "getCacheServerPort");
-  final String host0 = getServerHostName(server.getHost());
+  final String host0 = NetworkUtils.getServerHostName(server.getHost());
   
   // Create client.
   createClient(client, thePort, host0);
@@ -3866,7 +3871,7 @@ public void testCQEventsWithNotEqualsUndefined() throws Exception {
     }
   });
   
-  pause(1 * 1000);
+  Wait.pause(1 * 1000);
   // cqs should get any creates and inserts even for invalid 
   // since this is a NOT EQUALS query which adds Undefined to
   // results
@@ -3894,14 +3899,14 @@ public void testCQEventsWithNotEqualsUndefined() throws Exception {
     server.invoke(new CacheSerializableRunnable("Server Region Entries") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
-        getLogWriter().info("### Entries in Server :" + region.keys().size());
+        LogWriterUtils.getLogWriter().info("### Entries in Server :" + region.keys().size());
       }
     });
     
     client.invoke(new CacheSerializableRunnable("Client Region Entries") {
       public void run2() throws CacheException {
         Region region = getRootRegion().getSubregion(regionName);
-        getLogWriter().info("### Entries in Client :" + region.keys().size()); 
+        LogWriterUtils.getLogWriter().info("### Entries in Client :" + region.keys().size()); 
       }
     });
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/820cfd63/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryOptimizedExecuteDUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryOptimizedExecuteDUnitTest.java b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryOptimizedExecuteDUnitTest.java
index 54ec12d..6be233a 100644
--- a/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryOptimizedExecuteDUnitTest.java
+++ b/gemfire-cq/src/test/java/com/gemstone/gemfire/cache/query/cq/dunit/CqQueryOptimizedExecuteDUnitTest.java
@@ -25,8 +25,12 @@ import com.gemstone.gemfire.cache30.CacheSerializableRunnable;
 import com.gemstone.gemfire.internal.AvailablePortHelper;
 import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
 import com.gemstone.gemfire.test.dunit.Host;
+import com.gemstone.gemfire.test.dunit.Invoke;
+import com.gemstone.gemfire.test.dunit.LogWriterUtils;
+import com.gemstone.gemfire.test.dunit.NetworkUtils;
 import com.gemstone.gemfire.test.dunit.SerializableRunnable;
 import com.gemstone.gemfire.test.dunit.VM;
+import com.gemstone.gemfire.test.dunit.Wait;
 
 /**
  * Test class for testing {@link CqServiceImpl#EXECUTE_QUERY_DURING_INIT} flag
@@ -40,7 +44,7 @@ public class CqQueryOptimizedExecuteDUnitTest extends CqQueryDUnitTest{
 
   public void setUp() throws Exception {
     super.setUp();
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = false;
       }
@@ -48,14 +52,13 @@ public class CqQueryOptimizedExecuteDUnitTest extends CqQueryDUnitTest{
   }
   
   @Override
-  public void tearDown2() throws Exception {
-    invokeInEveryVM(new SerializableRunnable("getSystem") {
+  protected final void preTearDownCacheTestCase() throws Exception {
+    Invoke.invokeInEveryVM(new SerializableRunnable("getSystem") {
       public void run() {
         CqServiceImpl.EXECUTE_QUERY_DURING_INIT = true;
         CqServiceProvider.MAINTAIN_KEYS = true;
       }
     });
-    super.tearDown2();
   }
   
   public void testCqExecuteWithoutQueryExecution() throws Exception {
@@ -71,7 +74,7 @@ public class CqQueryOptimizedExecuteDUnitTest extends CqQueryDUnitTest{
 
     final int thePort = server.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Create client.
     createClient(client, thePort, host0);
@@ -98,7 +101,7 @@ public class CqQueryOptimizedExecuteDUnitTest extends CqQueryDUnitTest{
         for (int i = numOfEntries+1; i <= numOfEntries*2; i++) {
           region1.put(KEY+i, new Portfolio(i));
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
     
@@ -170,7 +173,7 @@ public class CqQueryOptimizedExecuteDUnitTest extends CqQueryDUnitTest{
 
     final int thePort = server.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server.getHost());
 
     // Create client.
     createClient(client, thePort, host0);
@@ -198,7 +201,7 @@ public class CqQueryOptimizedExecuteDUnitTest extends CqQueryDUnitTest{
         for (int i = numOfEntries+1; i <= numOfEntries*2; i++) {
           region1.put(KEY+i, new Portfolio(i));
         }
-        getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
+        LogWriterUtils.getLogWriter().info("### Number of Entries in Region :" + region1.keys().size());
       }
     });
     
@@ -265,13 +268,13 @@ public class CqQueryOptimizedExecuteDUnitTest extends CqQueryDUnitTest{
 
     final int port1 = server1.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    final String host0 = getServerHostName(server1.getHost());
+    final String host0 = NetworkUtils.getServerHostName(server1.getHost());
     final int[] ports = AvailablePortHelper.getRandomAvailableTCPPorts(1);
 
     createServer(server2, ports[0]);
     final int thePort2 = server2.invokeInt(CqQueryDUnitTest.class,
         "getCacheServerPort");
-    pause(8 * 1000);
+    Wait.pause(8 * 1000);
 
     // Create client
     createClientWith2Pools(client, new int[] { port1 }, new int[] { thePort2 },
@@ -284,7 +287,7 @@ public class CqQueryOptimizedExecuteDUnitTest extends CqQueryDUnitTest{
     createCQ(client, "testCQAllServersLeave_" + 12, cqs[12], true);
     executeCQ(client, "testCQAllServersLeave_" + 12, false, null);
 
-    pause(5 * 1000);
+    Wait.pause(5 * 1000);
     waitForCqsConnected(client, "testCQAllServersLeave_11", 1);
     waitForCqsConnected(client, "testCQAllServersLeave_12", 1);
     // CREATE.