You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by ud...@apache.org on 2016/06/02 00:08:32 UTC

[46/67] [abbrv] incubator-geode git commit: GEODE-1377: Initial move of system properties from private to public

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
index d49b116..8115701 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ClientServerTransactionDUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
+
 import com.gemstone.gemfire.cache.*;
 import com.gemstone.gemfire.cache.Region.Entry;
 import com.gemstone.gemfire.cache.client.*;
@@ -147,10 +149,10 @@ public class ClientServerTransactionDUnitTest extends RemoteTransactionDUnitTest
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port);
         ccf.setPoolSubscriptionEnabled(false);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, getDUnitLogLevel());
         // these settings were used to manually check that tx operation stats were being updated
-        //ccf.set(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
-        //ccf.set(DistributionConfig.STATISTIC_ARCHIVE_FILE_NAME, "clientStats.gfs");
+        //ccf.set(STATISTIC_SAMPLING_ENABLED, "true");
+        //ccf.set(STATISTIC_ARCHIVE_FILE, "clientStats.gfs");
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<Integer, String> crf = cCache
             .createClientRegionFactory(isEmpty ? ClientRegionShortcut.PROXY
@@ -207,7 +209,7 @@ public class ClientServerTransactionDUnitTest extends RemoteTransactionDUnitTest
     ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port1);
     ccf.setPoolSubscriptionEnabled(false);
 
-    ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    ccf.set(LOG_LEVEL, getDUnitLogLevel());
 
     ClientCache cCache = getClientCache(ccf);
     
@@ -278,7 +280,7 @@ public class ClientServerTransactionDUnitTest extends RemoteTransactionDUnitTest
     ClientCacheFactory ccf = new ClientCacheFactory();
     ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port1);
     ccf.setPoolSubscriptionEnabled(false);
-    ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+    ccf.set(LOG_LEVEL, getDUnitLogLevel());
     ClientCache cCache = getClientCache(ccf);
     ClientRegionFactory<CustId, Customer> custrf = cCache
       .createClientRegionFactory(cachingProxy ? ClientRegionShortcut.CACHING_PROXY : ClientRegionShortcut.PROXY);
@@ -1333,7 +1335,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
         ClientCacheFactory ccf = new ClientCacheFactory();
         ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port1);
         ccf.setPoolSubscriptionEnabled(false);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<CustId, Customer> custrf = cCache
             .createClientRegionFactory(ClientRegionShortcut.PROXY);
@@ -1429,7 +1431,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
         ccf.addPoolServer("localhost", port2);
         ccf.setPoolLoadConditioningInterval(1);
         ccf.setPoolSubscriptionEnabled(false);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<CustId, Customer> custrf = cCache
             .createClientRegionFactory(ClientRegionShortcut.PROXY);
@@ -1546,7 +1548,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
         if (port2 != 0) ccf.addPoolServer("localhost", port2);
         if (port3 != 0) ccf.addPoolServer("localhost", port3);
         ccf.setPoolSubscriptionEnabled(false);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<CustId, Customer> custrf = cCache
             .createClientRegionFactory(cachingProxy ? ClientRegionShortcut.CACHING_PROXY : ClientRegionShortcut.PROXY);
@@ -2058,7 +2060,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
         ccf.addPoolServer("localhost", port2);
         ccf.setPoolSubscriptionEnabled(false);
         ccf.setPoolLoadConditioningInterval(1);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<CustId, Customer> custrf = cCache
             .createClientRegionFactory(ClientRegionShortcut.PROXY);
@@ -2451,7 +2453,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
         ccf.setPoolMinConnections(5);
         ccf.setPoolLoadConditioningInterval(-1);
         ccf.setPoolSubscriptionEnabled(false);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         Region r1 = cCache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create("r1");
         Region r2 = cCache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create("r2");
@@ -2684,10 +2686,10 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
           ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port);
           ccf.addPoolServer("localhost", port2);
           ccf.setPoolSubscriptionEnabled(false);
-          ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+          ccf.set(LOG_LEVEL, getDUnitLogLevel());
           // these settings were used to manually check that tx operation stats were being updated
-          //ccf.set(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
-          //ccf.set(DistributionConfig.STATISTIC_ARCHIVE_FILE_NAME, "clientStats.gfs");
+          //ccf.set(STATISTIC_SAMPLING_ENABLED, "true");
+          //ccf.set(STATISTIC_ARCHIVE_FILE, "clientStats.gfs");
           ClientCache cCache = getClientCache(ccf);
           ClientRegionFactory<Integer, String> crf = cCache
           .createClientRegionFactory(ClientRegionShortcut.PROXY);
@@ -2986,7 +2988,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
         ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port1);
         if (port2 != 0) ccf.addPoolServer("localhost", port2);
         ccf.setPoolSubscriptionEnabled(false);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<CustId, Customer> custrf = cCache
             .createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY);
@@ -3127,7 +3129,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
         ccf.addPoolServer("localhost", port2);
         ccf.setPoolMinConnections(0);
         ccf.setPoolSubscriptionEnabled(false);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<CustId, Customer> custrf = cCache
             .createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY);
@@ -3215,7 +3217,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
         ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port1);
         ccf.setPoolMinConnections(0);
         ccf.setPoolSubscriptionEnabled(false);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<CustId, Customer> custrf = cCache
             .createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY);
@@ -3305,7 +3307,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
         ccf.setPoolMinConnections(0);
         ccf.setPoolSubscriptionEnabled(true);
         ccf.setPoolSubscriptionRedundancy(0);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         Region r = cCache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).addCacheListener(new ClientListener()).create(regionName);
         r.registerInterestRegex(".*");
@@ -3321,7 +3323,7 @@ public void testClientCommitAndDataStoreGetsEvent() throws Exception {
         ccf.addPoolServer("localhost"/*getServerHostName(Host.getHost(0))*/, port1);
         ccf.setPoolMinConnections(0);
         ccf.setPoolSubscriptionEnabled(true);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         Region r = cCache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regionName);
         getCache().getCacheTransactionManager().begin();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
index 8a1586c..197b028 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConcurrentMapOpsDUnitTest.java
@@ -31,7 +31,6 @@ import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache.util.CacheListenerAdapter;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.DistributedMember;
-import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.distributed.internal.membership.MembershipManager;
 import com.gemstone.gemfire.distributed.internal.membership.gms.MembershipManagerHelper;
 import com.gemstone.gemfire.internal.AvailablePort;
@@ -46,6 +45,8 @@ import java.util.HashSet;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
+
 /**
  * tests for the concurrentMapOperations. there are more tests in ClientServerMiscDUnitTest
  *
@@ -135,7 +136,7 @@ public class ConcurrentMapOpsDUnitTest extends CacheTestCase {
           ccf.addPoolServer("localhost", port2);
         }
         ccf.setPoolSubscriptionEnabled(true);
-        ccf.set(DistributionConfig.LOG_LEVEL_NAME, LogWriterUtils.getDUnitLogLevel());
+        ccf.set(LOG_LEVEL, LogWriterUtils.getDUnitLogLevel());
         ClientCache cCache = getClientCache(ccf);
         ClientRegionFactory<Integer, String> crf = cCache
             .createClientRegionFactory(isEmpty ? ClientRegionShortcut.PROXY

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
index b6d5a12..34ad0fe 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/ConnectDisconnectDUnitTest.java
@@ -22,8 +22,7 @@ import com.gemstone.gemfire.test.dunit.*;
 
 import java.util.Properties;
 
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.LOCATORS;
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.START_LOCATOR;
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
 
 /** A test of 46438 - missing response to an update attributes message */
 public class ConnectDisconnectDUnitTest extends CacheTestCase {
@@ -151,8 +150,8 @@ public class ConnectDisconnectDUnitTest extends CacheTestCase {
   @Override
   public Properties getDistributedSystemProperties() {
     Properties props = super.getDistributedSystemProperties();
-    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "info");
-    props.setProperty(DistributionConfig.CONSERVE_SOCKETS_NAME, "false");
+    props.setProperty(LOG_LEVEL, "info");
+    props.setProperty(CONSERVE_SOCKETS, "false");
     if (LOCATOR_PORT > 0) {
       props.setProperty(START_LOCATOR, "localhost[" + LOCATOR_PORT + "]");
       props.setProperty(LOCATORS, LOCATORS_STRING);

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
index 7624d12..151a8e5 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DeltaPropagationDUnitTest.java
@@ -47,8 +47,7 @@ import com.gemstone.gemfire.test.dunit.*;
 import java.io.File;
 import java.util.Properties;
 
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.LOCATORS;
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.MCAST_PORT;
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
 
 /**
  * @since GemFire 6.1
@@ -649,8 +648,8 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
       Properties properties = new Properties();
       properties.setProperty(MCAST_PORT, "0");
       properties.setProperty(LOCATORS, "");
-      properties.setProperty(DistributionConfig.DURABLE_CLIENT_ID_NAME, durableClientId);
-      properties.setProperty(DistributionConfig.DURABLE_CLIENT_TIMEOUT_NAME, String.valueOf(60));
+      properties.setProperty(DURABLE_CLIENT_ID, durableClientId);
+      properties.setProperty(DURABLE_CLIENT_TIMEOUT, String.valueOf(60));
   
       createDurableCacheClient(((PoolFactoryImpl)pf).getPoolAttributes(),
           regionName, properties, new Integer(DURABLE_CLIENT_LISTENER), Boolean.TRUE);
@@ -722,8 +721,8 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
       Properties properties = new Properties();
       properties.setProperty(MCAST_PORT, "0");
       properties.setProperty(LOCATORS, "");
-      properties.setProperty(DistributionConfig.DURABLE_CLIENT_ID_NAME, durableClientId);
-      properties.setProperty(DistributionConfig.DURABLE_CLIENT_TIMEOUT_NAME, String.valueOf(60));
+      properties.setProperty(DURABLE_CLIENT_ID, durableClientId);
+      properties.setProperty(DURABLE_CLIENT_TIMEOUT, String.valueOf(60));
   
       createDurableCacheClient(((PoolFactoryImpl)pf).getPoolAttributes(),
           regionName, properties, new Integer(DURABLE_CLIENT_LISTENER), Boolean.FALSE);
@@ -1314,7 +1313,7 @@ public class DeltaPropagationDUnitTest extends DistributedTestCase {
     Properties props = new Properties();
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, "");
-    props.setProperty(DistributionConfig.CLIENT_CONFLATION_PROP_NAME, conflate);
+    props.setProperty(CONFLATE_EVENTS, conflate);
     new DeltaPropagationDUnitTest("temp").createCache(props);
     AttributesFactory factory = new AttributesFactory();
     pool = ClientServerTestCase.configureConnectionPool(factory, "localhost", ports,

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOfflineCompactionJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOfflineCompactionJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOfflineCompactionJUnitTest.java
index d57f706..60a0548 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOfflineCompactionJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOfflineCompactionJUnitTest.java
@@ -33,8 +33,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.Properties;
 
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.LOCATORS;
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.MCAST_PORT;
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
 import static org.junit.Assert.assertEquals;
 
 /**
@@ -64,10 +63,10 @@ public class DiskOfflineCompactionJUnitTest
     Properties props = new Properties();
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, "");
-    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "config"); // to keep diskPerf logs smaller
-    props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
-    props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
-    props.setProperty(DistributionConfig.STATISTIC_ARCHIVE_FILE_NAME, "stats.gfs");
+    props.setProperty(LOG_LEVEL, "config"); // to keep diskPerf logs smaller
+    props.setProperty(STATISTIC_SAMPLING_ENABLED, "true");
+    props.setProperty(ENABLE_TIME_STATISTICS, "true");
+    props.setProperty(STATISTIC_ARCHIVE_FILE, "stats.gfs");
     ds = DistributedSystem.connect(props);
     cache = CacheFactory.create(ds);
   }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOldAPIsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOldAPIsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOldAPIsJUnitTest.java
index 316813d..0ac0d11 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOldAPIsJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskOldAPIsJUnitTest.java
@@ -29,10 +29,8 @@ import java.io.File;
 import java.util.Properties;
 import java.util.Set;
 
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.LOCATORS;
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.MCAST_PORT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
+import static org.junit.Assert.*;
 
 /**
  * Tests the old disk apis to make sure they do the correct thing.
@@ -52,10 +50,10 @@ public class DiskOldAPIsJUnitTest
   static {
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, "");
-    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "config"); // to keep diskPerf logs smaller
-    props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
-    props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
-    props.setProperty(DistributionConfig.STATISTIC_ARCHIVE_FILE_NAME, "stats.gfs");
+    props.setProperty(LOG_LEVEL, "config"); // to keep diskPerf logs smaller
+    props.setProperty(STATISTIC_SAMPLING_ENABLED, "true");
+    props.setProperty(ENABLE_TIME_STATISTICS, "true");
+    props.setProperty(STATISTIC_ARCHIVE_FILE, "stats.gfs");
   }
 
   @Before

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.java
index b1a67e3..57bde39 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCacheXmlJUnitTest.java
@@ -20,7 +20,6 @@ import com.gemstone.gemfire.SystemFailure;
 import com.gemstone.gemfire.cache.*;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.SystemConfigurationProperties;
-import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 import com.gemstone.gemfire.util.test.TestUtil;
 import org.junit.After;
@@ -32,7 +31,7 @@ import java.util.Arrays;
 import java.util.Iterator;
 import java.util.Properties;
 
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.MCAST_PORT;
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -73,7 +72,7 @@ public class DiskRegCacheXmlJUnitTest
     props.setProperty(SystemConfigurationProperties.NAME, "test");
     String path = TestUtil.getResourcePath(getClass(), "DiskRegCacheXmlJUnitTest.xml");
     props.setProperty(MCAST_PORT, "0");
-    props.setProperty(DistributionConfig.CACHE_XML_FILE_NAME, path);
+    props.setProperty(CACHE_XML_FILE, path);
     ds = DistributedSystem.connect(props);
     try {
       // Create the cache which causes the cache-xml-file to be parsed

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
index a71be75..c7f1d18 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegCachexmlGeneratorJUnitTest.java
@@ -21,7 +21,6 @@ import com.gemstone.gemfire.cache.Region;
 import com.gemstone.gemfire.cache.Scope;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.SystemConfigurationProperties;
-import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 import org.junit.After;
@@ -34,7 +33,7 @@ import java.io.FileWriter;
 import java.io.PrintWriter;
 import java.util.Properties;
 
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.MCAST_PORT;
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
 import static org.junit.Assert.fail;
 
 /**
@@ -50,53 +49,9 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
 
   DiskRegionProperties diskProps = new DiskRegionProperties();
 
-  DiskRegionProperties diskProps1 = new DiskRegionProperties();
+  DiskRegionProperties[] diskRegionProperties = new DiskRegionProperties[12];
 
-  DiskRegionProperties diskProps2 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps3 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps4 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps5 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps6 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps7 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps8 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps9 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps10 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps11 = new DiskRegionProperties();
-
-  DiskRegionProperties diskProps12 = new DiskRegionProperties();
-
-  Region region1;
-
-  Region region2;
-
-  Region region3;
-
-  Region region4;
-
-  Region region5;
-
-  Region region6;
-
-  Region region7;
-
-  Region region8;
-
-  Region region9;
-
-  Region region10;
-
-  Region region11;
-
-  Region region12;
+  Region[] regions = new Region[12];
 
   @Before
   public void setUp() throws Exception
@@ -107,18 +62,17 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
     diskDirSize[1] = Integer.MAX_VALUE;
     diskDirSize[2] = 1073741824;
     diskDirSize[3] = 2073741824;
-    diskProps1.setDiskDirsAndSizes(dirs, diskDirSize);
-    diskProps2.setDiskDirs(dirs);
-    diskProps3.setDiskDirs(dirs);
-    diskProps4.setDiskDirs(dirs);
-    diskProps5.setDiskDirs(dirs);
-    diskProps6.setDiskDirs(dirs);
-    diskProps7.setDiskDirs(dirs);
-    diskProps8.setDiskDirs(dirs);
-    diskProps9.setDiskDirs(dirs);
-    diskProps10.setDiskDirs(dirs);
-    diskProps11.setDiskDirs(dirs);
-    diskProps12.setDiskDirs(dirs);
+
+    for (int i = 0; i < diskRegionProperties.length; i++) {
+      diskRegionProperties[i] = new DiskRegionProperties();
+      if(i == 0)
+      {
+        diskRegionProperties[i].setDiskDirsAndSizes(dirs, diskDirSize);
+      }
+      else{
+        diskRegionProperties[i].setDiskDirs(dirs);
+      }
+    }
   }
 
   @After
@@ -130,97 +84,97 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
 
   public void createCacheXML()
   {
-    // create the region1 which is SyncPersistOnly and set DiskWriteAttibutes
-    diskProps1.setRolling(true);
-    diskProps1.setMaxOplogSize(1073741824L);
-    diskProps1.setRegionName("region1");
-    region1 = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,
-        diskProps1, Scope.LOCAL);
-
-    // create the region2 which is SyncPersistOnly and set DiskWriteAttibutes
-
-    diskProps2.setRolling(false);
-    diskProps2.setRegionName("region2");
-    region2 = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,
-        diskProps2, Scope.LOCAL);
-
-    // create the region3 which AsyncPersistOnly, No buffer and Rolling oplog
-    diskProps3.setRolling(true);
-    diskProps3.setMaxOplogSize(1073741824L);
-    diskProps3.setRegionName("region3");
-    region3 = DiskRegionHelperFactory.getAsyncPersistOnlyRegion(cache,
-        diskProps3);
-
-    // create the region4 which is AsynchPersistonly, No buffer and fixed oplog
-    diskProps4.setRolling(false);
-    diskProps4.setRegionName("region4");
-    region4 = DiskRegionHelperFactory.getAsyncPersistOnlyRegion(cache,
-        diskProps4);
-
-    // create the region5 which is SynchOverflowOnly, Rolling oplog
-    diskProps5.setRolling(true);
-    diskProps5.setMaxOplogSize(1073741824L);
-    diskProps5.setRegionName("region5");
-    region5 = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,
-        diskProps5);
-
-    // create the region6 which is SyncOverflowOnly, Fixed oplog
-    diskProps6.setRolling(false);
-    diskProps6.setRegionName("region6");
-    region6 = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,
-        diskProps6);
-
-    // create the region7 which is AsyncOverflow, with Buffer and rolling oplog
-    diskProps7.setRolling(true);
-    diskProps7.setMaxOplogSize(1073741824L);
-    diskProps7.setBytesThreshold(10000l);
-    diskProps7.setTimeInterval(15l);
-    diskProps7.setRegionName("region7");
-    region7 = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,
-        diskProps7);
-
-    // create the region8 which is AsyncOverflow ,Time base buffer-zero byte
+    // create the regions[0] which is SyncPersistOnly and set DiskWriteAttibutes
+    diskRegionProperties[0].setRolling(true);
+    diskRegionProperties[0].setMaxOplogSize(1073741824L);
+    diskRegionProperties[0].setRegionName("regions1");
+    regions[0] = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,
+        diskRegionProperties[0], Scope.LOCAL);
+
+    // create the regions[1] which is SyncPersistOnly and set DiskWriteAttibutes
+
+    diskRegionProperties[1].setRolling(false);
+    diskRegionProperties[1].setRegionName("regions2");
+    regions[1] = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache,
+        diskRegionProperties[1], Scope.LOCAL);
+
+    // create the regions[2] which AsyncPersistOnly, No buffer and Rolling oplog
+    diskRegionProperties[2].setRolling(true);
+    diskRegionProperties[2].setMaxOplogSize(1073741824L);
+    diskRegionProperties[2].setRegionName("regions3");
+    regions[2] = DiskRegionHelperFactory.getAsyncPersistOnlyRegion(cache,
+        diskRegionProperties[2]);
+
+    // create the regions[3] which is AsynchPersistonly, No buffer and fixed oplog
+    diskRegionProperties[3].setRolling(false);
+    diskRegionProperties[3].setRegionName("regions4");
+    regions[3] = DiskRegionHelperFactory.getAsyncPersistOnlyRegion(cache,
+        diskRegionProperties[3]);
+
+    // create the regions[4] which is SynchOverflowOnly, Rolling oplog
+    diskRegionProperties[4].setRolling(true);
+    diskRegionProperties[4].setMaxOplogSize(1073741824L);
+    diskRegionProperties[4].setRegionName("regions5");
+    regions[4] = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,
+        diskRegionProperties[4]);
+
+    // create the regions[5] which is SyncOverflowOnly, Fixed oplog
+    diskRegionProperties[5].setRolling(false);
+    diskRegionProperties[5].setRegionName("regions6");
+    regions[5] = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache,
+        diskRegionProperties[5]);
+
+    // create the regions[6] which is AsyncOverflow, with Buffer and rolling oplog
+    diskRegionProperties[6].setRolling(true);
+    diskRegionProperties[6].setMaxOplogSize(1073741824L);
+    diskRegionProperties[6].setBytesThreshold(10000l);
+    diskRegionProperties[6].setTimeInterval(15l);
+    diskRegionProperties[6].setRegionName("regions7");
+    regions[6] = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,
+        diskRegionProperties[6]);
+
+    // create the regions[7] which is AsyncOverflow ,Time base buffer-zero byte
     // buffer
     // and Fixed oplog
-    diskProps8.setRolling(false);
-    diskProps8.setTimeInterval(15l);
-    diskProps8.setBytesThreshold(0l);
-    diskProps8.setRegionName("region8");
-    region8 = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,
-        diskProps8);
-
-    // create the region9 which is SyncPersistOverflow, Rolling oplog
-    diskProps9.setRolling(true);
-    diskProps9.setMaxOplogSize(1073741824L);
-    diskProps9.setRegionName("region9");
-    region9 = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,
-        diskProps9);
-
-    // create the region10 which is Sync PersistOverflow, fixed oplog
-    diskProps10.setRolling(false);
-    diskProps10.setRegionName("region10");
-    region10 = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,
-        diskProps10);
-    // create the region11 which is Async Overflow Persist ,with buffer and
+    diskRegionProperties[7].setRolling(false);
+    diskRegionProperties[7].setTimeInterval(15l);
+    diskRegionProperties[7].setBytesThreshold(0l);
+    diskRegionProperties[7].setRegionName("regions8");
+    regions[7] = DiskRegionHelperFactory.getAsyncOverFlowOnlyRegion(cache,
+        diskRegionProperties[7]);
+
+    // create the regions[8] which is SyncPersistOverflow, Rolling oplog
+    diskRegionProperties[8].setRolling(true);
+    diskRegionProperties[8].setMaxOplogSize(1073741824L);
+    diskRegionProperties[8].setRegionName("regions9");
+    regions[8] = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,
+        diskRegionProperties[8]);
+
+    // create the regions[9] which is Sync PersistOverflow, fixed oplog
+    diskRegionProperties[9].setRolling(false);
+    diskRegionProperties[9].setRegionName("regions10");
+    regions[9] = DiskRegionHelperFactory.getSyncOverFlowAndPersistRegion(cache,
+        diskRegionProperties[9]);
+    // create the regions[10] which is Async Overflow Persist ,with buffer and
     // rollong
     // oplog
-    diskProps11.setRolling(true);
-    diskProps11.setMaxOplogSize(1073741824L);
-    diskProps11.setBytesThreshold(10000l);
-    diskProps11.setTimeInterval(15l);
-    diskProps11.setRegionName("region11");
-    region11 = DiskRegionHelperFactory.getAsyncOverFlowAndPersistRegion(cache,
-        diskProps11);
-
-    // create the region12 which is Async Persist Overflow with time based
+    diskRegionProperties[10].setRolling(true);
+    diskRegionProperties[10].setMaxOplogSize(1073741824L);
+    diskRegionProperties[10].setBytesThreshold(10000l);
+    diskRegionProperties[10].setTimeInterval(15l);
+    diskRegionProperties[10].setRegionName("regions11");
+    regions[10] = DiskRegionHelperFactory.getAsyncOverFlowAndPersistRegion(cache,
+        diskRegionProperties[10]);
+
+    // create the regions[11] which is Async Persist Overflow with time based
     // buffer
     // and Fixed oplog
-    diskProps12.setRolling(false);
-    diskProps12.setBytesThreshold(0l);
-    diskProps12.setTimeInterval(15l);
-    diskProps12.setRegionName("region12");
-    region12 = DiskRegionHelperFactory.getAsyncOverFlowAndPersistRegion(cache,
-        diskProps12);
+    diskRegionProperties[11].setRolling(false);
+    diskRegionProperties[11].setBytesThreshold(0l);
+    diskRegionProperties[11].setTimeInterval(15l);
+    diskRegionProperties[11].setRegionName("regions12");
+    regions[11] = DiskRegionHelperFactory.getAsyncOverFlowAndPersistRegion(cache,
+        diskRegionProperties[11]);
 
 
     //cacheXmlGenerator: generates cacheXml file
@@ -246,58 +200,46 @@ public class DiskRegCachexmlGeneratorJUnitTest extends DiskRegionTestingBase
     props.setProperty(SystemConfigurationProperties.NAME, "DiskRegCachexmlGeneratorJUnitTest");
     props.setProperty(MCAST_PORT, "0");
     String path = "DiskRegCachexmlGeneratorJUnitTest.xml";
-    props.setProperty(DistributionConfig.CACHE_XML_FILE_NAME, path);
+    props.setProperty(CACHE_XML_FILE, path);
     ds = DistributedSystem.connect(props);
     // Create the cache which causes the cache-xml-file to be parsed
     cache = CacheFactory.create(ds);
 
-    // Get the region1 
-    region1 = cache.getRegion("region1");
-    verify((LocalRegion)region1, diskProps1);
+    // Get the regions[0] 
+    verify((LocalRegion) cache.getRegion("regions1"), diskRegionProperties[0]);
 
-    // Get the region2
-    Region region2 = cache.getRegion("region2");
-    verify((LocalRegion)region2, diskProps2);
+    // Get the regions[1]
+    verify((LocalRegion) cache.getRegion("regions2"), diskRegionProperties[1]);
 
-    // Get the region3 
-    Region region3 = cache.getRegion("region3");
-    verify((LocalRegion)region3, diskProps3);
+    // Get the regions[2] 
+    verify((LocalRegion) cache.getRegion("regions3"), diskRegionProperties[2]);
 
-    // Get the region4 
-    Region region4 = cache.getRegion("region4");
-    verify((LocalRegion)region4, diskProps4);
+    // Get the regions[3] 
+    verify((LocalRegion) cache.getRegion("regions4"), diskRegionProperties[3]);
     
-    // Get the region5 
-    Region region5 = cache.getRegion("region5");
-    verify((LocalRegion)region5, diskProps5);
+    // Get the regions[4] 
+    verify((LocalRegion) cache.getRegion("regions5"), diskRegionProperties[4]);
 
-    // Get the region6 
-    Region region6 = cache.getRegion("region6");
-    verify((LocalRegion)region6, diskProps6);
+    // Get the regions[5] 
+    verify((LocalRegion) cache.getRegion("regions6"), diskRegionProperties[5]);
     
-    // Get the region7 
-    Region region7 = cache.getRegion("region7");
-    verify((LocalRegion)region7, diskProps7);
+    // Get the regions[6] 
+    verify((LocalRegion) cache.getRegion("regions7"), diskRegionProperties[6]);
 
-    // Get the region8 
-    Region region8 = cache.getRegion("region8");
-    verify((LocalRegion)region8, diskProps8);
+    // Get the regions[7] 
+    verify((LocalRegion) cache.getRegion("regions8"), diskRegionProperties[7]);
 
-    // Get the region9 
-    Region region9 = cache.getRegion("region9");
-    verify((LocalRegion)region9, diskProps9);
+    // Get the regions[8] 
+    verify((LocalRegion) cache.getRegion("regions9"), diskRegionProperties[8]);
 
-    // Get the region10 
-    Region region10 = cache.getRegion("region10");
-    verify((LocalRegion)region10, diskProps10);
+    // Get the regions[9] 
+    verify((LocalRegion) cache.getRegion("regions10"), diskRegionProperties[9]);
 
-    // Get the region11
-    Region region11 = cache.getRegion("region11");
-    verify((LocalRegion)region11, diskProps11);
+    // Get the regions[10]
+    verify((LocalRegion) cache.getRegion("regions11"), diskRegionProperties[10]);
 
-    // Get the region12 
-    Region region12 = cache.getRegion("region12");
-    verify((LocalRegion)region12, diskProps12);
+    // Get the regions[11] 
+    verify((LocalRegion) cache.getRegion("regions12"), diskRegionProperties[11]);
   }
 
 }// end of DiskRegCachexmlGeneratorJUnitTest

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionIllegalArguementsJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionIllegalArguementsJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionIllegalArguementsJUnitTest.java
index 09f95f8..005555a 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionIllegalArguementsJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionIllegalArguementsJUnitTest.java
@@ -16,6 +16,8 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
+
 import com.gemstone.gemfire.cache.Cache;
 import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.DiskStoreFactory;
@@ -54,10 +56,10 @@ public class DiskRegionIllegalArguementsJUnitTest
   static {
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, "");
-    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "config"); // to keep diskPerf logs smaller
-    props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
-    props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
-    props.setProperty(DistributionConfig.STATISTIC_ARCHIVE_FILE_NAME, "stats.gfs");
+    props.setProperty(LOG_LEVEL, "config"); // to keep diskPerf logs smaller
+    props.setProperty(STATISTIC_SAMPLING_ENABLED, "true");
+    props.setProperty(ENABLE_TIME_STATISTICS, "true");
+    props.setProperty(STATISTIC_ARCHIVE_FILE, "stats.gfs");
   }
 
   @Before

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionIllegalCacheXMLvaluesJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionIllegalCacheXMLvaluesJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionIllegalCacheXMLvaluesJUnitTest.java
index cac5087..490aa8b 100755
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionIllegalCacheXMLvaluesJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionIllegalCacheXMLvaluesJUnitTest.java
@@ -19,7 +19,6 @@ package com.gemstone.gemfire.internal.cache;
 import com.gemstone.gemfire.cache.CacheFactory;
 import com.gemstone.gemfire.cache.CacheXmlException;
 import com.gemstone.gemfire.distributed.DistributedSystem;
-import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.test.junit.categories.IntegrationTest;
 import com.gemstone.gemfire.util.test.TestUtil;
 import org.junit.Test;
@@ -28,7 +27,7 @@ import org.junit.experimental.categories.Category;
 import java.io.File;
 import java.util.Properties;
 
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.MCAST_PORT;
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
 import static org.junit.Assert.fail;
 
 /**
@@ -52,7 +51,7 @@ public class DiskRegionIllegalCacheXMLvaluesJUnitTest
       dir.deleteOnExit();
       Properties props = new Properties();
       props.setProperty(MCAST_PORT, "0");
-      props.setProperty(DistributionConfig.CACHE_XML_FILE_NAME, TestUtil.getResourcePath(getClass(), path));
+      props.setProperty(CACHE_XML_FILE, TestUtil.getResourcePath(getClass(), path));
       ds = DistributedSystem.connect(props);
       try {
        

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionTestingBase.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionTestingBase.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionTestingBase.java
index 08772a4..83a29ae 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionTestingBase.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskRegionTestingBase.java
@@ -20,6 +20,8 @@
  */
 package com.gemstone.gemfire.internal.cache;
 
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
+
 import com.gemstone.gemfire.LogWriter;
 import com.gemstone.gemfire.SystemFailure;
 import com.gemstone.gemfire.cache.*;
@@ -77,10 +79,10 @@ public class DiskRegionTestingBase
   {
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, "");
-    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "config"); // to keep diskPerf logs smaller
-    props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
-    props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
-    props.setProperty(DistributionConfig.STATISTIC_ARCHIVE_FILE_NAME, "stats.gfs");
+    props.setProperty(LOG_LEVEL, "config"); // to keep diskPerf logs smaller
+    props.setProperty(STATISTIC_SAMPLING_ENABLED, "true");
+    props.setProperty(ENABLE_TIME_STATISTICS, "true");
+    props.setProperty(STATISTIC_ARCHIVE_FILE, "stats.gfs");
 
     File testingDirectory = new File("testingDirectory");
     testingDirectory.mkdir();
@@ -173,8 +175,8 @@ public class DiskRegionTestingBase
 
   protected Cache createCache() {
     // useful for debugging:
-//    props.put(DistributionConfig.LOG_FILE_NAME, "diskRegionTestingBase_system.log");
-//    props.put(DistributionConfig.LOG_LEVEL_NAME, getGemFireLogLevel());
+//    props.put(LOG_FILE, "diskRegionTestingBase_system.log");
+//    props.put(LOG_LEVEL, getGemFireLogLevel());
     cache = new CacheFactory(props).create();
     ds = cache.getDistributedSystem();
     logWriter = cache.getLogger();

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskStoreFactoryJUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskStoreFactoryJUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskStoreFactoryJUnitTest.java
index f2685f6..84dcd1b 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskStoreFactoryJUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/DiskStoreFactoryJUnitTest.java
@@ -30,8 +30,7 @@ import java.io.FilenameFilter;
 import java.util.Arrays;
 import java.util.Properties;
 
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.LOCATORS;
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.MCAST_PORT;
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
 import static org.junit.Assert.*;
 
 /**
@@ -51,10 +50,10 @@ public class DiskStoreFactoryJUnitTest
   static {
     props.setProperty(MCAST_PORT, "0");
     props.setProperty(LOCATORS, "");
-    props.setProperty(DistributionConfig.LOG_LEVEL_NAME, "config"); // to keep diskPerf logs smaller
-    props.setProperty(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
-    props.setProperty(DistributionConfig.ENABLE_TIME_STATISTICS_NAME, "true");
-    props.setProperty(DistributionConfig.STATISTIC_ARCHIVE_FILE_NAME, "stats.gfs");
+    props.setProperty(LOG_LEVEL, "config"); // to keep diskPerf logs smaller
+    props.setProperty(STATISTIC_SAMPLING_ENABLED, "true");
+    props.setProperty(ENABLE_TIME_STATISTICS, "true");
+    props.setProperty(STATISTIC_ARCHIVE_FILE, "stats.gfs");
   }
 
   @Before

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/cb291539/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
----------------------------------------------------------------------
diff --git a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
index c97ac23..8ffcf07 100644
--- a/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
+++ b/geode-core/src/test/java/com/gemstone/gemfire/internal/cache/FixedPRSinglehopDUnitTest.java
@@ -25,7 +25,6 @@ import com.gemstone.gemfire.cache.server.CacheServer;
 import com.gemstone.gemfire.cache30.CacheTestCase;
 import com.gemstone.gemfire.distributed.DistributedSystem;
 import com.gemstone.gemfire.distributed.Locator;
-import com.gemstone.gemfire.distributed.internal.DistributionConfig;
 import com.gemstone.gemfire.internal.AvailablePort;
 import com.gemstone.gemfire.internal.cache.partitioned.fixed.QuarterPartitionResolver;
 import com.gemstone.gemfire.internal.cache.partitioned.fixed.SingleHopQuarterPartitionResolver;
@@ -38,8 +37,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.*;
 
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.LOCATORS;
-import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.MCAST_PORT;
+import static com.gemstone.gemfire.distributed.SystemConfigurationProperties.*;
 
 public class FixedPRSinglehopDUnitTest extends CacheTestCase {
 
@@ -412,7 +410,7 @@ public class FixedPRSinglehopDUnitTest extends CacheTestCase {
     File logFile = new File("locator-" + locatorPort + ".log");
 
     Properties props = new Properties();
-    props.setProperty(DistributionConfig.ENABLE_CLUSTER_CONFIGURATION_NAME, "true");
+    props.setProperty(ENABLE_CLUSTER_CONFIGURATION, "true");
     try {
       locator = Locator.startLocatorAndDS(locatorPort, logFile, null, props);
     }