You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by kl...@apache.org on 2017/05/11 21:07:12 UTC

[33/52] [abbrv] geode git commit: GEODE-2632: change dependencies on GemFireCacheImpl to InternalCache

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
index 0f79b80..e0fc27c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
@@ -12,12 +12,29 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.internal.cache;
 
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.logging.log4j.Logger;
+
 import org.apache.geode.GemFireIOException;
 import org.apache.geode.InvalidDeltaException;
-import org.apache.geode.cache.*;
+import org.apache.geode.cache.CacheEvent;
+import org.apache.geode.cache.CacheWriter;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.DiskAccessException;
+import org.apache.geode.cache.EntryNotFoundException;
+import org.apache.geode.cache.Operation;
+import org.apache.geode.cache.RegionDestroyedException;
+import org.apache.geode.cache.TimeoutException;
+import org.apache.geode.cache.TransactionId;
 import org.apache.geode.cache.query.IndexMaintenanceException;
 import org.apache.geode.cache.query.QueryException;
 import org.apache.geode.cache.query.internal.index.IndexManager;
@@ -37,7 +54,12 @@ import org.apache.geode.internal.cache.tier.sockets.CacheClientNotifier;
 import org.apache.geode.internal.cache.tier.sockets.ClientProxyMembershipID;
 import org.apache.geode.internal.cache.tier.sockets.ClientUpdateMessageImpl;
 import org.apache.geode.internal.cache.tier.sockets.HAEventWrapper;
-import org.apache.geode.internal.cache.versions.*;
+import org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException;
+import org.apache.geode.internal.cache.versions.RegionVersionVector;
+import org.apache.geode.internal.cache.versions.VersionHolder;
+import org.apache.geode.internal.cache.versions.VersionSource;
+import org.apache.geode.internal.cache.versions.VersionStamp;
+import org.apache.geode.internal.cache.versions.VersionTag;
 import org.apache.geode.internal.cache.wan.GatewaySenderEventImpl;
 import org.apache.geode.internal.concurrent.MapCallbackAdapter;
 import org.apache.geode.internal.i18n.LocalizedStrings;
@@ -54,21 +76,12 @@ import org.apache.geode.internal.offheap.annotations.Unretained;
 import org.apache.geode.internal.sequencelog.EntryLogger;
 import org.apache.geode.internal.util.concurrent.CustomEntryConcurrentHashMap;
 
-import org.apache.logging.log4j.Logger;
-
-import java.util.*;
-import java.util.concurrent.atomic.AtomicInteger;
-
 /**
  * Abstract implementation of {@link RegionMap}that has all the common behavior.
  *
  * @since GemFire 3.5.1
- *
- *
  */
-
 public abstract class AbstractRegionMap implements RegionMap {
-
   private static final Logger logger = LogService.getLogger();
 
   /** The underlying map for this region. */
@@ -81,10 +94,15 @@ public abstract class AbstractRegionMap implements RegionMap {
   static Runnable testHookRunnableFor48182 = null;
 
   private RegionEntryFactory entryFactory;
+
   private Attributes attr;
-  private transient Object owner; // the region that owns this map
 
-  protected AbstractRegionMap(InternalRegionArguments internalRegionArgs) {}
+  // the region that owns this map
+  private Object owner;
+
+  protected AbstractRegionMap(InternalRegionArguments internalRegionArgs) {
+    // do nothing
+  }
 
   protected void initialize(Object owner, Attributes attr,
       InternalRegionArguments internalRegionArgs, boolean isLRU) {
@@ -93,22 +111,19 @@ public abstract class AbstractRegionMap implements RegionMap {
     _setMap(createConcurrentMap(attr.initialCapacity, attr.loadFactor, attr.concurrencyLevel, false,
         new AbstractRegionEntry.HashRegionEntryCreator()));
 
-    final GemFireCacheImpl cache;
     boolean isDisk;
-    boolean withVersioning = false;
-    boolean offHeap = false;
+    boolean withVersioning;
+    boolean offHeap;
     if (owner instanceof LocalRegion) {
       LocalRegion region = (LocalRegion) owner;
       isDisk = region.getDiskRegion() != null;
-      cache = region.getGemFireCache();
       withVersioning = region.getConcurrencyChecksEnabled();
       offHeap = region.getOffHeap();
     } else if (owner instanceof PlaceHolderDiskRegion) {
-      offHeap = ((PlaceHolderDiskRegion) owner).getOffHeap();
+      offHeap = ((RegionEntryContext) owner).getOffHeap();
       isDisk = true;
       withVersioning =
-          ((PlaceHolderDiskRegion) owner).getFlags().contains(DiskRegionFlag.IS_WITH_VERSIONING);
-      cache = GemFireCacheImpl.getInstance();
+          ((DiskRegionView) owner).getFlags().contains(DiskRegionFlag.IS_WITH_VERSIONING);
     } else {
       throw new IllegalStateException("expected LocalRegion or PlaceHolderDiskRegion");
     }
@@ -117,15 +132,15 @@ public abstract class AbstractRegionMap implements RegionMap {
         attr.statisticsEnabled, isLRU, isDisk, withVersioning, offHeap));
   }
 
-  protected CustomEntryConcurrentHashMap<Object, Object> createConcurrentMap(int initialCapacity,
+  private CustomEntryConcurrentHashMap<Object, Object> createConcurrentMap(int initialCapacity,
       float loadFactor, int concurrencyLevel, boolean isIdentityMap,
       CustomEntryConcurrentHashMap.HashEntryCreator<Object, Object> entryCreator) {
     if (entryCreator != null) {
-      return new CustomEntryConcurrentHashMap<Object, Object>(initialCapacity, loadFactor,
-          concurrencyLevel, isIdentityMap, entryCreator);
+      return new CustomEntryConcurrentHashMap<>(initialCapacity, loadFactor, concurrencyLevel,
+          isIdentityMap, entryCreator);
     } else {
-      return new CustomEntryConcurrentHashMap<Object, Object>(initialCapacity, loadFactor,
-          concurrencyLevel, isIdentityMap);
+      return new CustomEntryConcurrentHashMap<>(initialCapacity, loadFactor, concurrencyLevel,
+          isIdentityMap);
     }
   }
 
@@ -1548,7 +1563,6 @@ public abstract class AbstractRegionMap implements RegionMap {
     final boolean isDebugEnabled = logger.isDebugEnabled();
 
     final LocalRegion owner = _getOwner();
-    owner.checkBeforeEntrySync(txEvent);
 
     final boolean isRegionReady = !inTokenMode;
     final boolean hasRemoteOrigin = !((TXId) txId).getMemberId().equals(owner.getMyId());
@@ -1591,7 +1605,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                 if (txEvent != null) {
                   txEvent.addDestroy(owner, re, re.getKey(), aCallbackArgument);
                 }
-                boolean clearOccurred = false;
+                boolean clearOccured = false;
                 try {
                   processAndGenerateTXVersionTag(owner, cbEvent, re, txEntryState);
                   if (inTokenMode) {
@@ -1618,10 +1632,10 @@ public abstract class AbstractRegionMap implements RegionMap {
                   EntryLogger.logTXDestroy(_getOwnerObject(), key);
                   owner.updateSizeOnRemove(key, oldSize);
                 } catch (RegionClearedException rce) {
-                  clearOccurred = true;
+                  clearOccured = true;
                 }
                 owner.txApplyDestroyPart2(re, re.getKey(), inTokenMode,
-                    clearOccurred /* Clear Conflciting with the operation */);
+                    clearOccured /* Clear Conflciting with the operation */);
                 if (invokeCallbacks) {
                   switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                   if (pendingCallbacks == null) {
@@ -1632,7 +1646,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                     cbEventInPending = true;
                   }
                 }
-                if (!clearOccurred) {
+                if (!clearOccured) {
                   lruEntryDestroy(re);
                 }
                 if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
@@ -1856,7 +1870,7 @@ public abstract class AbstractRegionMap implements RegionMap {
     }
     boolean didInvalidate = false;
     RegionEntry invalidatedRe = null;
-    boolean clearOccurred = false;
+    boolean clearOccured = false;
     DiskRegion dr = owner.getDiskRegion();
     boolean ownerIsInitialized = owner.isInitialized();
     try {
@@ -1966,11 +1980,11 @@ public abstract class AbstractRegionMap implements RegionMap {
                           // generate versionTag for the event
                           EntryLogger.logInvalidate(event);
                           owner.recordEvent(event);
-                          clearOccurred = true;
+                          clearOccured = true;
                         }
                         owner.basicInvalidatePart2(oldRe, event,
-                            clearOccurred /* conflict with clear */, invokeCallbacks);
-                        if (!clearOccurred) {
+                            clearOccured /* conflict with clear */, invokeCallbacks);
+                        if (!clearOccured) {
                           if (isCreate) {
                             lruEntryCreate(oldRe);
                           } else {
@@ -2011,11 +2025,11 @@ public abstract class AbstractRegionMap implements RegionMap {
                     // TODO: deltaGII: do we even need RegionClearedException?
                     // generate versionTag for the event
                     owner.recordEvent(event);
-                    clearOccurred = true;
+                    clearOccured = true;
                   }
-                  owner.basicInvalidatePart2(newRe, event, clearOccurred /* conflict with clear */,
+                  owner.basicInvalidatePart2(newRe, event, clearOccured /* conflict with clear */,
                       invokeCallbacks);
-                  if (!clearOccurred) {
+                  if (!clearOccured) {
                     lruEntryCreate(newRe);
                     incEntryCount(1);
                   }
@@ -2183,7 +2197,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                         // generate versionTag for the event
                         EntryLogger.logInvalidate(event);
                         _getOwner().recordEvent(event);
-                        clearOccurred = true;
+                        clearOccured = true;
                       } catch (ConcurrentCacheModificationException ccme) {
                         VersionTag tag = event.getVersionTag();
                         if (tag != null && tag.isTimeStampUpdated()) {
@@ -2192,9 +2206,9 @@ public abstract class AbstractRegionMap implements RegionMap {
                         }
                         throw ccme;
                       }
-                      owner.basicInvalidatePart2(re, event, clearOccurred /* conflict with clear */,
+                      owner.basicInvalidatePart2(re, event, clearOccured /* conflict with clear */,
                           invokeCallbacks);
-                      if (!clearOccurred) {
+                      if (!clearOccured) {
                         if (oldWasTombstone) {
                           lruEntryCreate(re);
                         } else {
@@ -2228,7 +2242,7 @@ public abstract class AbstractRegionMap implements RegionMap {
           if (invalidatedRe != null) {
             owner.basicInvalidatePart3(invalidatedRe, event, invokeCallbacks);
           }
-          if (didInvalidate && !clearOccurred) {
+          if (didInvalidate && !clearOccured) {
             try {
               lruUpdateCallback();
             } catch (DiskAccessException dae) {
@@ -2337,7 +2351,6 @@ public abstract class AbstractRegionMap implements RegionMap {
       TXEntryState txEntryState, VersionTag versionTag, long tailKey) {
     // boolean didInvalidate = false;
     final LocalRegion owner = _getOwner();
-    owner.checkBeforeEntrySync(txEvent);
 
     @Released
     EntryEventImpl cbEvent = null;
@@ -2397,7 +2410,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                     }
                     oldRe.setValueResultOfSearch(false);
                     processAndGenerateTXVersionTag(owner, cbEvent, oldRe, txEntryState);
-                    boolean clearOccurred = false;
+                    boolean clearOccured = false;
                     try {
                       oldRe.setValue(owner, oldRe.prepareValueForCache(owner, newValue, true));
                       EntryLogger.logTXInvalidate(_getOwnerObject(), key);
@@ -2406,10 +2419,9 @@ public abstract class AbstractRegionMap implements RegionMap {
                         owner.unscheduleTombstone(oldRe);
                       }
                     } catch (RegionClearedException rce) {
-                      clearOccurred = true;
+                      clearOccured = true;
                     }
-                    owner.txApplyInvalidatePart2(oldRe, oldRe.getKey(), didDestroy, true,
-                        clearOccurred);
+                    owner.txApplyInvalidatePart2(oldRe, oldRe.getKey(), didDestroy, true);
                     // didInvalidate = true;
                     if (invokeCallbacks) {
                       switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
@@ -2421,7 +2433,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                         cbEventInPending = true;
                       }
                     }
-                    if (!clearOccurred) {
+                    if (!clearOccured) {
                       lruEntryUpdate(oldRe);
                     }
                     if (shouldPerformConcurrencyChecks(owner, cbEvent) && txEntryState != null) {
@@ -2445,7 +2457,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                 cbEvent.setRegionEntry(newRe);
                 txRemoveOldIndexEntry(Operation.INVALIDATE, newRe);
                 newRe.setValueResultOfSearch(false);
-                boolean clearOccurred = false;
+                boolean clearOccured = false;
                 try {
                   processAndGenerateTXVersionTag(owner, cbEvent, newRe, txEntryState);
                   newRe.setValue(owner, newRe.prepareValueForCache(owner, newValue, true));
@@ -2453,10 +2465,9 @@ public abstract class AbstractRegionMap implements RegionMap {
                   owner.updateSizeOnCreate(newRe.getKey(), 0);// we are putting in a new invalidated
                                                               // entry
                 } catch (RegionClearedException rce) {
-                  clearOccurred = true;
+                  clearOccured = true;
                 }
-                owner.txApplyInvalidatePart2(newRe, newRe.getKey(), didDestroy, true,
-                    clearOccurred);
+                owner.txApplyInvalidatePart2(newRe, newRe.getKey(), didDestroy, true);
 
                 if (invokeCallbacks) {
                   switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
@@ -2469,7 +2480,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                   }
                 }
                 opCompleted = true;
-                if (!clearOccurred) {
+                if (!clearOccured) {
                   lruEntryCreate(newRe);
                   incEntryCount(1);
                 }
@@ -2517,7 +2528,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                 }
                 re.setValueResultOfSearch(false);
                 processAndGenerateTXVersionTag(owner, cbEvent, re, txEntryState);
-                boolean clearOccurred = false;
+                boolean clearOccured = false;
                 try {
                   re.setValue(owner, re.prepareValueForCache(owner, newValue, true));
                   EntryLogger.logTXInvalidate(_getOwnerObject(), key);
@@ -2526,9 +2537,9 @@ public abstract class AbstractRegionMap implements RegionMap {
                   }
                   owner.updateSizeOnPut(key, oldSize, 0);
                 } catch (RegionClearedException rce) {
-                  clearOccurred = true;
+                  clearOccured = true;
                 }
-                owner.txApplyInvalidatePart2(re, re.getKey(), didDestroy, true, clearOccurred);
+                owner.txApplyInvalidatePart2(re, re.getKey(), didDestroy, true);
                 // didInvalidate = true;
                 if (invokeCallbacks) {
                   switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
@@ -2540,7 +2551,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                     cbEventInPending = true;
                   }
                 }
-                if (!clearOccurred) {
+                if (!clearOccured) {
                   lruEntryUpdate(re);
                 }
                 if (shouldPerformConcurrencyChecks(owner, cbEvent) && txEntryState != null) {
@@ -2646,7 +2657,7 @@ public abstract class AbstractRegionMap implements RegionMap {
       boolean requireOldValue, final boolean overwriteDestroyed)
       throws CacheWriterException, TimeoutException {
     final LocalRegion owner = _getOwner();
-    boolean clearOccurred = false;
+    boolean clearOccured = false;
     if (owner == null) {
       // "fix" for bug 32440
       Assert.assertTrue(false, "The owner for RegionMap " + this + " is null for event " + event);
@@ -2785,7 +2796,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                     owner.recordEvent(event);
                     eventRecorded = true;
                   } catch (RegionClearedException rce) {
-                    clearOccurred = true;
+                    clearOccured = true;
                     owner.recordEvent(event);
                   } catch (ConcurrentCacheModificationException ccme) {
                     VersionTag tag = event.getVersionTag();
@@ -2798,10 +2809,10 @@ public abstract class AbstractRegionMap implements RegionMap {
                   if (uninitialized) {
                     event.inhibitCacheListenerNotification(true);
                   }
-                  updateLru(clearOccurred, re, event);
+                  updateLru(clearOccured, re, event);
 
                   lastModifiedTime = owner.basicPutPart2(event, re, !uninitialized,
-                      lastModifiedTime, clearOccurred);
+                      lastModifiedTime, clearOccured);
                 } finally {
                   notifyIndex(re, false);
                 }
@@ -2838,7 +2849,7 @@ public abstract class AbstractRegionMap implements RegionMap {
           } finally {
             // bug 32589, post update may throw an exception if exception occurs
             // for any recipients
-            if (!clearOccurred) {
+            if (!clearOccured) {
               try {
                 lruUpdateCallback();
               } catch (DiskAccessException dae) {
@@ -2959,8 +2970,8 @@ public abstract class AbstractRegionMap implements RegionMap {
     updateSize(event, oldSize, true/* isUpdate */, wasTombstone);
   }
 
-  private void updateLru(boolean clearOccurred, RegionEntry re, EntryEventImpl event) {
-    if (!clearOccurred) {
+  private void updateLru(boolean clearOccured, RegionEntry re, EntryEventImpl event) {
+    if (!clearOccured) {
       if (event.getOperation().isCreate()) {
         lruEntryCreate(re);
       } else {
@@ -3081,7 +3092,6 @@ public abstract class AbstractRegionMap implements RegionMap {
 
     Operation putOp = p_putOp;
 
-    owner.checkBeforeEntrySync(txEvent);
     Object newValue = nv;
 
     final boolean hasRemoteOrigin = !((TXId) txId).getMemberId().equals(owner.getMyId());
@@ -3137,7 +3147,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                     cbEvent.setOldValue(re.getValueInVM(owner)); // OFFHEAP eei
                   }
 
-                  boolean clearOccurred = false;
+                  boolean clearOccured = false;
                   // Set RegionEntry updateInProgress
                   if (owner.indexMaintenanceSynchronous) {
                     re.setUpdateInProgress(true);
@@ -3170,14 +3180,14 @@ public abstract class AbstractRegionMap implements RegionMap {
                         }
                       }
                     } catch (RegionClearedException rce) {
-                      clearOccurred = true;
+                      clearOccured = true;
                     }
                     {
                       long lastMod = owner.cacheTimeMillis();
                       EntryLogger.logTXPut(_getOwnerObject(), key, nv);
                       re.updateStatsForPut(lastMod, lastMod);
-                      owner.txApplyPutPart2(re, re.getKey(), newValue, lastMod, false, didDestroy,
-                          clearOccurred);
+                      owner.txApplyPutPart2(re, re.getKey(), lastMod, false, didDestroy,
+                          clearOccured);
                     }
                   } finally {
                     if (re != null && owner.indexMaintenanceSynchronous) {
@@ -3195,13 +3205,13 @@ public abstract class AbstractRegionMap implements RegionMap {
                       cbEventInPending = true;
                     }
                   }
-                  if (!clearOccurred) {
+                  if (!clearOccured) {
                     lruEntryUpdate(re);
                   }
                 }
               }
               if (didDestroy && !opCompleted) {
-                owner.txApplyInvalidatePart2(re, re.getKey(), true, false, false /* clear */);
+                owner.txApplyInvalidatePart2(re, re.getKey(), true, false /* clear */);
               }
             }
             if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
@@ -3232,7 +3242,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                     cbEvent.setRegionEntry(oldRe);
                     cbEvent.setOldValue(oldRe.getValueInVM(owner)); // OFFHEAP eei
                   }
-                  boolean clearOccurred = false;
+                  boolean clearOccured = false;
                   // Set RegionEntry updateInProgress
                   if (owner.indexMaintenanceSynchronous) {
                     oldRe.setUpdateInProgress(true);
@@ -3270,14 +3280,14 @@ public abstract class AbstractRegionMap implements RegionMap {
                         }
                       }
                     } catch (RegionClearedException rce) {
-                      clearOccurred = true;
+                      clearOccured = true;
                     }
                     {
                       long lastMod = owner.cacheTimeMillis();
                       EntryLogger.logTXPut(_getOwnerObject(), key, nv);
                       oldRe.updateStatsForPut(lastMod, lastMod);
-                      owner.txApplyPutPart2(oldRe, oldRe.getKey(), newValue, lastMod, false,
-                          didDestroy, clearOccurred);
+                      owner.txApplyPutPart2(oldRe, oldRe.getKey(), lastMod, false, didDestroy,
+                          clearOccured);
                     }
                   } finally {
                     if (oldRe != null && owner.indexMaintenanceSynchronous) {
@@ -3299,7 +3309,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                       cbEventInPending = true;
                     }
                   }
-                  if (!clearOccurred) {
+                  if (!clearOccured) {
                     lruEntryUpdate(oldRe);
                   }
                 }
@@ -3311,7 +3321,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                 cbEvent.setRegionEntry(newRe);
                 cbEvent.setOldValue(null);
               }
-              boolean clearOccurred = false;
+              boolean clearOccured = false;
               // Set RegionEntry updateInProgress
               if (owner.indexMaintenanceSynchronous) {
                 newRe.setUpdateInProgress(true);
@@ -3336,14 +3346,14 @@ public abstract class AbstractRegionMap implements RegionMap {
                   owner.updateSizeOnCreate(newRe.getKey(),
                       owner.calculateRegionEntryValueSize(newRe));
                 } catch (RegionClearedException rce) {
-                  clearOccurred = true;
+                  clearOccured = true;
                 }
                 {
                   long lastMod = owner.cacheTimeMillis();
                   EntryLogger.logTXPut(_getOwnerObject(), key, nv);
                   newRe.updateStatsForPut(lastMod, lastMod);
-                  owner.txApplyPutPart2(newRe, newRe.getKey(), newValue, lastMod, true, didDestroy,
-                      clearOccurred);
+                  owner.txApplyPutPart2(newRe, newRe.getKey(), lastMod, true, didDestroy,
+                      clearOccured);
                 }
               } finally {
                 if (newRe != null && owner.indexMaintenanceSynchronous) {
@@ -3363,7 +3373,7 @@ public abstract class AbstractRegionMap implements RegionMap {
                   cbEventInPending = true;
                 }
               }
-              if (!clearOccurred) {
+              if (!clearOccured) {
                 lruEntryCreate(newRe);
                 incEntryCount(1);
               }

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/AddCacheServerProfileMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AddCacheServerProfileMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AddCacheServerProfileMessage.java
index 6928ad2..ff0101b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AddCacheServerProfileMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AddCacheServerProfileMessage.java
@@ -44,8 +44,9 @@ public class AddCacheServerProfileMessage extends SerialDistributionMessage
   protected void process(DistributionManager dm) {
     int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
     try {
-      GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
-      if (cache != null && !cache.isClosed()) { // will be null if not initialized
+      InternalCache cache = GemFireCacheImpl.getInstance();
+      // will be null if not initialized
+      if (cache != null && !cache.isClosed()) {
         operateOnCache(cache);
       }
     } finally {
@@ -55,16 +56,16 @@ public class AddCacheServerProfileMessage extends SerialDistributionMessage
       reply.setRecipient(getSender());
       try {
         dm.putOutgoing(reply);
-      } catch (CancelException e) {
+      } catch (CancelException ignore) {
         // can't send a reply, so ignore the exception
       }
     }
   }
 
-  private void operateOnCache(GemFireCacheImpl cache) {
+  private void operateOnCache(InternalCache cache) {
     final boolean isDebugEnabled = logger.isDebugEnabled();
 
-    for (DistributedRegion r : this.getDistributedRegions(cache)) {
+    for (DistributedRegion r : getDistributedRegions(cache)) {
       CacheDistributionAdvisor cda = (CacheDistributionAdvisor) r.getDistributionAdvisor();
       CacheDistributionAdvisor.CacheProfile cp =
           (CacheDistributionAdvisor.CacheProfile) cda.getProfile(getSender());
@@ -91,16 +92,16 @@ public class AddCacheServerProfileMessage extends SerialDistributionMessage
   }
 
   /** set the hasCacheServer flags for all regions in this cache */
-  public void operateOnLocalCache(GemFireCacheImpl cache) {
+  public void operateOnLocalCache(InternalCache cache) {
     int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
     try {
-      for (LocalRegion r : this.getAllRegions(cache)) {
+      for (LocalRegion r : getAllRegions(cache)) {
         FilterProfile fp = r.getFilterProfile();
         if (fp != null) {
           fp.getLocalProfile().hasCacheServer = true;
         }
       }
-      for (PartitionedRegion r : this.getPartitionedRegions(cache)) {
+      for (PartitionedRegion r : getPartitionedRegions(cache)) {
         FilterProfile fp = r.getFilterProfile();
         if (fp != null) {
           fp.getLocalProfile().hasCacheServer = true;
@@ -112,13 +113,13 @@ public class AddCacheServerProfileMessage extends SerialDistributionMessage
   }
 
 
-  private Set<LocalRegion> getAllRegions(GemFireCacheImpl gfc) {
-    return gfc.getAllRegions();
+  private Set<LocalRegion> getAllRegions(InternalCache internalCache) {
+    return internalCache.getAllRegions();
   }
 
-  private Set<DistributedRegion> getDistributedRegions(GemFireCacheImpl gfc) {
-    Set<DistributedRegion> result = new HashSet();
-    for (LocalRegion r : gfc.getAllRegions()) {
+  private Set<DistributedRegion> getDistributedRegions(InternalCache internalCache) {
+    Set<DistributedRegion> result = new HashSet<>();
+    for (LocalRegion r : internalCache.getAllRegions()) {
       if (r instanceof DistributedRegion) {
         result.add((DistributedRegion) r);
       }
@@ -126,14 +127,14 @@ public class AddCacheServerProfileMessage extends SerialDistributionMessage
     return result;
   }
 
-  private Set<PartitionedRegion> getPartitionedRegions(GemFireCacheImpl gfc) {
-    Set<PartitionedRegion> result = new HashSet(gfc.getPartitionedRegions());
-    return result;
+  private Set<PartitionedRegion> getPartitionedRegions(InternalCache internalCache) {
+    return (Set<PartitionedRegion>) new HashSet(internalCache.getPartitionedRegions());
   }
 
   /** for deserialization only */
   public AddCacheServerProfileMessage() {}
 
+  @Override
   public int getDSFID() {
     return ADD_CACHESERVER_PROFILE_UPDATE;
   }

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
index 8b8705a..7b35cb5 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
@@ -14,6 +14,30 @@
  */
 package org.apache.geode.internal.cache;
 
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.Serializable;
+import java.util.AbstractSet;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Queue;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.logging.log4j.Logger;
+
 import org.apache.geode.CancelException;
 import org.apache.geode.DataSerializer;
 import org.apache.geode.cache.CacheClosedException;
@@ -42,16 +66,6 @@ import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.internal.logging.log4j.LocalizedMessage;
 import org.apache.geode.internal.logging.log4j.LogMarker;
 import org.apache.geode.internal.util.StopWatch;
-import org.apache.logging.log4j.Logger;
-
-import java.io.*;
-import java.util.*;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * Specialized {@link CacheDistributionAdvisor} for {@link BucketRegion BucketRegions}. The
@@ -1452,7 +1466,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
         for (;;) {
           // bail out if the system starts closing
           this.getAdvisee().getCancelCriterion().checkCancelInProgress(null);
-          final GemFireCacheImpl cache = (GemFireCacheImpl) getBucket().getCache();
+          final InternalCache cache = getBucket().getCache();
           if (cache != null && cache.isCacheAtShutdownAll()) {
             throw new CacheClosedException("Cache is shutting down");
           }
@@ -1727,9 +1741,9 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
   @Override
   protected Profile instantiateProfile(InternalDistributedMember memberId, int version) {
     if (!this.pRegion.isShadowPR()) {
-      GemFireCacheImpl c = getProxyBucketRegion().getCache();
+      InternalCache cache = getProxyBucketRegion().getCache();
       List servers = null;
-      servers = c.getCacheServers();
+      servers = cache.getCacheServers();
 
       HashSet<BucketServerLocation66> serverLocations = new HashSet<BucketServerLocation66>();
       for (Object object : servers) {

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
index 136d7b9..62a9bc7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
@@ -185,7 +185,6 @@ public class BucketRegion extends DistributedRegion implements Bucket {
 
   static final boolean FORCE_LOCAL_LISTENERS_INVOCATION = Boolean
       .getBoolean(DistributionConfig.GEMFIRE_PREFIX + "BucketRegion.alwaysFireLocalListeners");
-  // gemfire.BucktRegion.alwaysFireLocalListeners=true
 
   private volatile AtomicLong5 eventSeqNum = null;
 
@@ -194,7 +193,7 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   }
 
   public BucketRegion(String regionName, RegionAttributes attrs, LocalRegion parentRegion,
-      GemFireCacheImpl cache, InternalRegionArguments internalRegionArgs) {
+      InternalCache cache, InternalRegionArguments internalRegionArgs) {
     super(regionName, attrs, parentRegion, cache, internalRegionArgs);
     if (PartitionedRegion.DISABLE_SECONDARY_BUCKET_ACK) {
       Assert.assertTrue(attrs.getScope().isDistributedNoAck());
@@ -270,7 +269,7 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   }
 
   @Override
-  protected DiskStoreImpl findDiskStore(RegionAttributes ra,
+  protected DiskStoreImpl findDiskStore(RegionAttributes regionAttributes,
       InternalRegionArguments internalRegionArgs) {
     return internalRegionArgs.getPartitionedRegion().getDiskStore();
   }
@@ -927,8 +926,8 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   }
 
   @Override
-  void basicInvalidatePart2(final RegionEntry re, final EntryEventImpl event, boolean clearConflict,
-      boolean invokeCallbacks) {
+  void basicInvalidatePart2(final RegionEntry regionEntry, final EntryEventImpl event,
+      boolean conflictWithClear, boolean invokeCallbacks) {
     // Assumed this is called with the entry synchronized
     long token = -1;
     InvalidateOperation op = null;
@@ -936,7 +935,7 @@ public class BucketRegion extends DistributedRegion implements Bucket {
     try {
       if (!event.isOriginRemote()) {
         if (event.getVersionTag() == null || event.getVersionTag().isGatewayTag()) {
-          VersionTag v = re.generateVersionTag(null, false, this, event);
+          VersionTag v = regionEntry.generateVersionTag(null, false, this, event);
           if (logger.isDebugEnabled() && v != null) {
             logger.debug("generated version tag {} in region {}", v, this.getName());
           }
@@ -954,8 +953,8 @@ public class BucketRegion extends DistributedRegion implements Bucket {
         op = new InvalidateOperation(event);
         token = op.startOperation();
       }
-      super.basicInvalidatePart2(re, event, clearConflict /* Clear conflict occurred */,
-          invokeCallbacks);
+      super.basicInvalidatePart2(regionEntry, event,
+          conflictWithClear /* Clear conflict occurred */, invokeCallbacks);
     } finally {
       if (op != null) {
         op.endOperation(token);
@@ -1018,8 +1017,8 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   }
 
   @Override
-  final void performExpiryTimeout(ExpiryTask p_task) throws CacheException {
-    ExpiryTask task = p_task;
+  final void performExpiryTimeout(ExpiryTask expiryTask) throws CacheException {
+    ExpiryTask task = expiryTask;
     boolean isEvictDestroy = isEntryEvictDestroyEnabled();
     // Fix for bug 43805 - get the primary lock before
     // synchronizing on pendingSecondaryExpires, to match the lock
@@ -1382,13 +1381,9 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   /**
    * is the current thread involved in destroying the PR that owns this region?
    */
-  private final boolean isInDestroyingThread() {
+  private boolean isInDestroyingThread() {
     return this.partitionedRegion.locallyDestroyingThread == Thread.currentThread();
   }
-  // public int getSerialNumber() {
-  // String s = "This should never be called on " + getClass();
-  // throw new UnsupportedOperationException(s);
-  // }
 
   @Override
   public void fillInProfile(Profile profile) {
@@ -2110,10 +2105,8 @@ public class BucketRegion extends DistributedRegion implements Bucket {
       // counters to 0.
       oldMemValue = this.bytesInMemory.getAndSet(0);
     }
-    // Gemfire PRs don't support clear. allowing it via a hack for tests
-    else if (LocalRegion.simulateClearForTests) {
-      oldMemValue = this.bytesInMemory.getAndSet(0);
-    } else {
+
+    else {
       throw new InternalGemFireError(
           "Trying to clear a bucket region that was not destroyed or in initialization.");
     }
@@ -2124,14 +2117,14 @@ public class BucketRegion extends DistributedRegion implements Bucket {
   }
 
   @Override
-  public int calculateValueSize(Object val) {
+  public int calculateValueSize(Object value) {
     // Only needed by BucketRegion
-    return calcMemSize(val);
+    return calcMemSize(value);
   }
 
   @Override
-  public int calculateRegionEntryValueSize(RegionEntry re) {
-    return calcMemSize(re._getValue()); // OFFHEAP _getValue ok
+  public int calculateRegionEntryValueSize(RegionEntry regionEntry) {
+    return calcMemSize(regionEntry._getValue()); // OFFHEAP _getValue ok
   }
 
   @Override
@@ -2181,7 +2174,7 @@ public class BucketRegion extends DistributedRegion implements Bucket {
 
   @Override
   public void initialCriticalMembers(boolean localHeapIsCritical,
-      Set<InternalDistributedMember> critialMembers) {
+      Set<InternalDistributedMember> criticalMembers) {
     // The owner Partitioned Region handles critical threshold events
   }
 

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java
index 7a21d12..46f31f5 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegionQueue.java
@@ -76,29 +76,20 @@ public class BucketRegionQueue extends AbstractBucketRegionQueue {
    */
   private final BlockingQueue<Object> eventSeqNumQueue = new LinkedBlockingQueue<Object>();
 
-  // private final BlockingQueue<EventID> eventSeqNumQueueWithEventId = new
-  // LinkedBlockingQueue<EventID>();
-
   private long lastKeyRecovered;
 
-  private AtomicLong latestQueuedKey = new AtomicLong();
+  private final AtomicLong latestQueuedKey = new AtomicLong();
 
-  private AtomicLong latestAcknowledgedKey = new AtomicLong();
+  private final AtomicLong latestAcknowledgedKey = new AtomicLong();
 
-  /**
-   * @param regionName
-   * @param attrs
-   * @param parentRegion
-   * @param cache
-   * @param internalRegionArgs
-   */
   public BucketRegionQueue(String regionName, RegionAttributes attrs, LocalRegion parentRegion,
-      GemFireCacheImpl cache, InternalRegionArguments internalRegionArgs) {
+      InternalCache cache, InternalRegionArguments internalRegionArgs) {
     super(regionName, attrs, parentRegion, cache, internalRegionArgs);
     this.keySet();
-    indexes = new ConcurrentHashMap<Object, Long>();
+    this.indexes = new ConcurrentHashMap<Object, Long>();
   }
 
+  @Override
   protected void cleanUpDestroyedTokensAndMarkGIIComplete(
       InitialImageOperation.GIIStatus giiStatus) {
     // Load events from temp queued events
@@ -553,9 +544,9 @@ public class BucketRegionQueue extends AbstractBucketRegionQueue {
       logger.debug(" destroying primary key {}", key);
     }
     @Released
-    EntryEventImpl event = getPartitionedRegion().newDestroyEntryEvent(key, null);
+    EntryEventImpl event = newDestroyEntryEvent(key, null);
     try {
-      event.setEventId(new EventID(cache.getSystem()));
+      event.setEventId(new EventID(cache.getInternalDistributedSystem()));
       event.setRegion(this);
       basicDestroy(event, true, null);
       setLatestAcknowledgedKey((Long) key);
@@ -584,6 +575,10 @@ public class BucketRegionQueue extends AbstractBucketRegionQueue {
     this.notifyEntriesRemoved();
   }
 
+  public EntryEventImpl newDestroyEntryEvent(Object key, Object aCallbackArgument) {
+    return getPartitionedRegion().newDestroyEntryEvent(key, aCallbackArgument);
+  }
+
   public boolean isReadyForPeek() {
     return !this.getPartitionedRegion().isDestroyed() && !this.isEmpty()
         && !this.eventSeqNumQueue.isEmpty() && getBucketAdvisor().isPrimary();

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/CacheConfig.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheConfig.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheConfig.java
index 45b6a6c..b97cc46 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheConfig.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheConfig.java
@@ -14,6 +14,7 @@
  */
 package org.apache.geode.internal.cache;
 
+import org.apache.geode.cache.client.internal.InternalClientCache;
 import org.apache.geode.internal.cache.xmlcache.CacheServerCreation;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.pdx.PdxSerializer;
@@ -167,7 +168,7 @@ public class CacheConfig {
     this.cacheServerCreation = servers;
   }
 
-  public void validateCacheConfig(GemFireCacheImpl cacheInstance) {
+  public void validateCacheConfig(InternalClientCache cacheInstance) {
     // To fix bug 44961 only validate our attributes against the existing cache
     // if they have been explicitly set by the set.
     // So all the following "ifs" check that "*UserSet" is true.

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
index b4bb00f..e4a7957 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java
@@ -14,7 +14,6 @@
  */
 package org.apache.geode.internal.cache;
 
-import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.RegionAttributes;
 import org.apache.geode.distributed.internal.DistributionAdvisee;
 import org.apache.geode.internal.cache.CacheDistributionAdvisor.CacheProfile;
@@ -39,7 +38,7 @@ public interface CacheDistributionAdvisee extends DistributionAdvisee {
    * 
    * @return the Cache
    */
-  public Cache getCache();
+  public InternalCache getCache();
 
   /**
    * Returns the <code>RegionAttributes</code> associated with this advisee.

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/CacheLifecycleListener.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheLifecycleListener.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheLifecycleListener.java
index c7e6a37..e0f1d99 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheLifecycleListener.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheLifecycleListener.java
@@ -15,7 +15,7 @@
 package org.apache.geode.internal.cache;
 
 /**
- * Defines callbacks that are invoked when a <code>GemFireCache</code> is created or closed.
+ * Defines callbacks that are invoked when an {@code InternalCache} is created or closed.
  * 
  * @see GemFireCacheImpl#addCacheLifecycleListener(CacheLifecycleListener)
  * @see GemFireCacheImpl#removeCacheLifecycleListener(CacheLifecycleListener)
@@ -23,12 +23,12 @@ package org.apache.geode.internal.cache;
 public interface CacheLifecycleListener {
 
   /**
-   * Invoked when a new <code>GemFireCache</code> is created
+   * Invoked when a new {@code InternalCache} is created
    */
-  public void cacheCreated(GemFireCacheImpl cache);
+  void cacheCreated(InternalCache cache);
 
   /**
-   * Invoked when a <code>GemFireCache</code> is closed
+   * Invoked when a {@code InternalCache} is closed
    */
-  public void cacheClosed(GemFireCacheImpl cache);
+  void cacheClosed(InternalCache cache);
 }

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
index 67e04f1..59fa5dd 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CachePerfStats.java
@@ -1283,11 +1283,10 @@ public class CachePerfStats {
   public void incDeltaFullValuesRequested() {
     stats.incInt(deltaFullValuesRequestedId, 1);
   }
-  ////// Special Instance Methods /////
 
   /**
-   * Closes these stats so that they can not longer be used. The stats are closed when the
-   * {@linkplain GemFireCacheImpl#close cache} is closed.
+   * Closes these stats so that they can not longer be used. The stats are closed when the cache is
+   * closed.
    *
    * @since GemFire 3.5
    */

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java
index 367b6f4..670c697 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerImpl.java
@@ -14,19 +14,51 @@
  */
 package org.apache.geode.internal.cache;
 
+import static java.lang.Integer.*;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.logging.log4j.Logger;
+
 import org.apache.geode.CancelCriterion;
 import org.apache.geode.GemFireIOException;
 import org.apache.geode.InternalGemFireError;
 import org.apache.geode.InvalidValueException;
-import org.apache.geode.cache.*;
+import org.apache.geode.cache.AttributesFactory;
+import org.apache.geode.cache.ClientSession;
+import org.apache.geode.cache.DataPolicy;
+import org.apache.geode.cache.DiskStore;
+import org.apache.geode.cache.DiskStoreFactory;
+import org.apache.geode.cache.DynamicRegionFactory;
+import org.apache.geode.cache.EvictionAction;
+import org.apache.geode.cache.EvictionAttributes;
+import org.apache.geode.cache.InterestRegistrationListener;
+import org.apache.geode.cache.RegionAttributes;
+import org.apache.geode.cache.RegionExistsException;
+import org.apache.geode.cache.Scope;
 import org.apache.geode.cache.server.CacheServer;
 import org.apache.geode.cache.server.ClientSubscriptionConfig;
 import org.apache.geode.cache.server.ServerLoadProbe;
 import org.apache.geode.cache.server.internal.LoadMonitor;
 import org.apache.geode.cache.wan.GatewayTransportFilter;
 import org.apache.geode.distributed.DistributedMember;
-import org.apache.geode.distributed.internal.*;
+import org.apache.geode.distributed.internal.DM;
+import org.apache.geode.distributed.internal.DistributionAdvisee;
+import org.apache.geode.distributed.internal.DistributionAdvisor;
 import org.apache.geode.distributed.internal.DistributionAdvisor.Profile;
+import org.apache.geode.distributed.internal.DistributionConfig;
+import org.apache.geode.distributed.internal.InternalDistributedSystem;
+import org.apache.geode.distributed.internal.ResourceEvent;
+import org.apache.geode.distributed.internal.ServerLocation;
 import org.apache.geode.distributed.internal.membership.MemberAttributes;
 import org.apache.geode.internal.Assert;
 import org.apache.geode.internal.OSProcess;
@@ -42,29 +74,19 @@ import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.internal.logging.log4j.LocalizedMessage;
 import org.apache.geode.management.membership.ClientMembership;
 import org.apache.geode.management.membership.ClientMembershipListener;
-import org.apache.logging.log4j.Logger;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.atomic.AtomicInteger;
 
 /**
- * An implementation of the <code>CacheServer</code> interface that delegates most of the heavy
- * lifting to an {@link Acceptor}.
+ * An implementation of the{@code CacheServer} interface that delegates most of the heavy lifting to
+ * an {@link Acceptor}.
  * 
  * @since GemFire 4.0
  */
 @SuppressWarnings("deprecation")
 public class CacheServerImpl extends AbstractCacheServer implements DistributionAdvisee {
-
   private static final Logger logger = LogService.getLogger();
 
-  private static final int FORCE_LOAD_UPDATE_FREQUENCY =
-      Integer
-          .getInteger(
-              DistributionConfig.GEMFIRE_PREFIX + "BridgeServer.FORCE_LOAD_UPDATE_FREQUENCY", 10)
-          .intValue();
+  private static final int FORCE_LOAD_UPDATE_FREQUENCY = getInteger(
+      DistributionConfig.GEMFIRE_PREFIX + "BridgeServer.FORCE_LOAD_UPDATE_FREQUENCY", 10);
 
   /** The acceptor that does the actual serving */
   private volatile AcceptorImpl acceptor;
@@ -88,7 +110,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
    */
   private boolean isGatewayReceiver;
 
-  private List<GatewayTransportFilter> gatewayTransportFilters = Collections.EMPTY_LIST;
+  private List<GatewayTransportFilter> gatewayTransportFilters = Collections.emptyList();
 
   /** is this a server created by a launcher as opposed to by an application or XML? */
   private boolean isDefaultServer;
@@ -107,10 +129,10 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
   // ////////////////////// Constructors //////////////////////
 
   /**
-   * Creates a new <code>BridgeServerImpl</code> that serves the contents of the give
-   * <code>Cache</code>. It has the default configuration.
+   * Creates a new{@code BridgeServerImpl} that serves the contents of the give {@code Cache}. It
+   * has the default configuration.
    */
-  public CacheServerImpl(GemFireCacheImpl cache, boolean isGatewayReceiver) {
+  public CacheServerImpl(InternalCache cache, boolean isGatewayReceiver) {
     super(cache);
     this.isGatewayReceiver = isGatewayReceiver;
   }
@@ -251,8 +273,8 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
   }
 
   /**
-   * Sets the configuration of <b>this</b> <code>CacheServer</code> based on the configuration of
-   * <b>another</b> <code>CacheServer</code>.
+   * Sets the configuration of <b>this</b>{@code CacheServer} based on the configuration of
+   * <b>another</b>{@code CacheServer}.
    */
   public void configureFrom(CacheServer other) {
     setPort(other.getPort());
@@ -302,8 +324,8 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
     List overflowAttributesList = new LinkedList();
     ClientSubscriptionConfig csc = this.getClientSubscriptionConfig();
     overflowAttributesList.add(0, csc.getEvictionPolicy());
-    overflowAttributesList.add(1, Integer.valueOf(csc.getCapacity()));
-    overflowAttributesList.add(2, Integer.valueOf(this.port));
+    overflowAttributesList.add(1, valueOf(csc.getCapacity()));
+    overflowAttributesList.add(2, valueOf(this.port));
     String diskStoreName = csc.getDiskStoreName();
     if (diskStoreName != null) {
       overflowAttributesList.add(3, diskStoreName);
@@ -327,9 +349,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
     // TODO : Need to provide facility to enable/disable client health monitoring.
     // Creating ClientHealthMonitoring region.
     // Force initialization on current cache
-    if (cache instanceof GemFireCacheImpl) {
-      ClientHealthMonitoringRegion.getInstance((GemFireCacheImpl) cache);
-    }
+    ClientHealthMonitoringRegion.getInstance(this.cache);
     this.cache.getLoggerI18n()
         .config(LocalizedStrings.CacheServerImpl_CACHESERVER_CONFIGURATION___0, getConfig());
 
@@ -379,7 +399,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
         throw new IllegalStateException(s);
       }
     }
-    if (this.hostnameForClients == null || this.hostnameForClients.equals("")) {
+    if (this.hostnameForClients == null || this.hostnameForClients.isEmpty()) {
       if (this.acceptor != null) {
         return this.acceptor.getExternalAddress();
       } else {
@@ -518,16 +538,10 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
   /**
    * create client subscription
    * 
-   * @param cache
-   * @param ePolicy
-   * @param capacity
-   * @param port
-   * @param overFlowDir
-   * @param isDiskStore
    * @return client subscription name
    * @since GemFire 5.7
    */
-  public static String clientMessagesRegion(GemFireCacheImpl cache, String ePolicy, int capacity,
+  public static String clientMessagesRegion(InternalCache cache, String ePolicy, int capacity,
       int port, String overFlowDir, boolean isDiskStore) {
     AttributesFactory factory =
         getAttribFactoryForClientMessagesRegion(cache, ePolicy, capacity, overFlowDir, isDiskStore);
@@ -536,7 +550,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
     return createClientMessagesRegion(attr, cache, capacity, port);
   }
 
-  public static AttributesFactory getAttribFactoryForClientMessagesRegion(GemFireCacheImpl cache,
+  public static AttributesFactory getAttribFactoryForClientMessagesRegion(InternalCache cache,
       String ePolicy, int capacity, String overflowDir, boolean isDiskStore)
       throws InvalidValueException, GemFireIOException {
     AttributesFactory factory = new AttributesFactory();
@@ -564,9 +578,10 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
             "Could not create client subscription overflow directory: " + dir.getAbsolutePath());
       }
       File[] dirs = {dir};
+
       DiskStoreFactory dsf = cache.createDiskStoreFactory();
-      DiskStore bsi = dsf.setAutoCompact(true)
-          .setDiskDirsAndSizes(dirs, new int[] {Integer.MAX_VALUE}).create("bsi");
+      dsf.setAutoCompact(true).setDiskDirsAndSizes(dirs, new int[] {MAX_VALUE}).create("bsi");
+
       factory.setDiskStoreName("bsi");
       // backward compatibility, it was sync
       factory.setDiskSynchronous(true);
@@ -576,11 +591,12 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
     factory.setStatisticsEnabled(true);
     /* setting LIFO related eviction attributes */
     if (HARegionQueue.HA_EVICTION_POLICY_ENTRY.equals(ePolicy)) {
-      factory.setEvictionAttributes(EvictionAttributesImpl.createLIFOEntryAttributes(capacity,
-          EvictionAction.OVERFLOW_TO_DISK));
-    } else if (HARegionQueue.HA_EVICTION_POLICY_MEMORY.equals(ePolicy)) { // condition refinement
-      factory.setEvictionAttributes(EvictionAttributesImpl.createLIFOMemoryAttributes(capacity,
-          EvictionAction.OVERFLOW_TO_DISK));
+      factory.setEvictionAttributes(
+          EvictionAttributes.createLIFOEntryAttributes(capacity, EvictionAction.OVERFLOW_TO_DISK));
+    } else if (HARegionQueue.HA_EVICTION_POLICY_MEMORY.equals(ePolicy)) {
+      // condition refinement
+      factory.setEvictionAttributes(
+          EvictionAttributes.createLIFOMemoryAttributes(capacity, EvictionAction.OVERFLOW_TO_DISK));
     } else {
       // throw invalid eviction policy exception
       throw new InvalidValueException(
@@ -589,7 +605,7 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
     return factory;
   }
 
-  public static String createClientMessagesRegion(RegionAttributes attr, GemFireCacheImpl cache,
+  private static String createClientMessagesRegion(RegionAttributes attr, InternalCache cache,
       int capacity, int port) {
     // generating unique name in VM for ClientMessagesRegion
     String regionName = generateNameForClientMsgsRegion(port);
@@ -615,22 +631,9 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
     return regionName;
   }
 
-  public static String createClientMessagesRegionForTesting(GemFireCacheImpl cache, String ePolicy,
-      int capacity, int port, int expiryTime, String overFlowDir, boolean isDiskStore) {
-    AttributesFactory factory =
-        getAttribFactoryForClientMessagesRegion(cache, ePolicy, capacity, overFlowDir, isDiskStore);
-    ExpirationAttributes ea =
-        new ExpirationAttributes(expiryTime, ExpirationAction.LOCAL_INVALIDATE);
-    factory.setEntryTimeToLive(ea);
-    RegionAttributes attr = factory.create();
-
-    return createClientMessagesRegion(attr, cache, capacity, port);
-  }
-
   /**
    * Generates the name for the client subscription using the given id.
    * 
-   * @param id
    * @return String
    * @since GemFire 5.7
    */
@@ -662,9 +665,9 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
   }
 
   /**
-   * Returns the underlying <code>InternalDistributedSystem</code> connection.
+   * Returns the underlying{@code InternalDistributedSystem} connection.
    * 
-   * @return the underlying <code>InternalDistributedSystem</code>
+   * @return the underlying{@code InternalDistributedSystem}
    */
   public InternalDistributedSystem getSystem() {
     return (InternalDistributedSystem) this.cache.getDistributedSystem();
@@ -729,10 +732,10 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
   }
 
   /**
-   * Registers a new <code>InterestRegistrationListener</code> with the set of
-   * <code>InterestRegistrationListener</code>s.
+   * Registers a new{@code InterestRegistrationListener} with the set of
+   * {@code InterestRegistrationListener}s.
    * 
-   * @param listener The <code>InterestRegistrationListener</code> to register
+   * @param listener The{@code InterestRegistrationListener} to register
    * @throws IllegalStateException if the BridgeServer has not been started
    * @since GemFire 5.8Beta
    */
@@ -745,10 +748,10 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
   }
 
   /**
-   * Unregisters an existing <code>InterestRegistrationListener</code> from the set of
-   * <code>InterestRegistrationListener</code>s.
+   * Unregisters an existing{@code InterestRegistrationListener} from the set of
+   * {@code InterestRegistrationListener}s.
    * 
-   * @param listener The <code>InterestRegistrationListener</code> to unregister
+   * @param listener The{@code InterestRegistrationListener} to unregister
    * 
    * @since GemFire 5.8Beta
    */
@@ -757,11 +760,9 @@ public class CacheServerImpl extends AbstractCacheServer implements Distribution
   }
 
   /**
-   * Returns a read-only set of <code>InterestRegistrationListener</code>s registered with this
-   * notifier.
+   * Returns a read-only set of{@code InterestRegistrationListener}s registered with this notifier.
    * 
-   * @return a read-only set of <code>InterestRegistrationListener</code>s registered with this
-   *         notifier
+   * @return a read-only set of{@code InterestRegistrationListener}s registered with this notifier
    * 
    * @since GemFire 5.8Beta
    */

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java
index 9a544d2..6bfb0f3 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheServerLauncher.java
@@ -15,6 +15,26 @@
 
 package org.apache.geode.internal.cache;
 
+import static org.apache.geode.distributed.ConfigurationProperties.*;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.PrintStream;
+import java.io.Serializable;
+import java.net.URL;
+import java.util.AbstractList;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.geode.SystemFailure;
 import org.apache.geode.cache.Cache;
 import org.apache.geode.cache.CacheFactory;
@@ -28,26 +48,17 @@ import org.apache.geode.distributed.internal.InternalDistributedSystem;
 import org.apache.geode.i18n.LogWriterI18n;
 import org.apache.geode.internal.OSProcess;
 import org.apache.geode.internal.PureJavaMode;
-import org.apache.geode.internal.net.SocketCreator;
 import org.apache.geode.internal.cache.tier.sockets.CacheServerHelper;
 import org.apache.geode.internal.i18n.LocalizedStrings;
+import org.apache.geode.internal.net.SocketCreator;
 import org.apache.geode.internal.process.StartupStatus;
 import org.apache.geode.internal.process.StartupStatusListener;
 import org.apache.geode.internal.util.IOUtils;
 import org.apache.geode.internal.util.JavaCommandBuilder;
 
-import java.io.*;
-import java.net.URL;
-import java.util.*;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.geode.distributed.ConfigurationProperties.LOG_FILE;
-import static org.apache.geode.distributed.ConfigurationProperties.SERVER_BIND_ADDRESS;
-
 /**
  * Launcher program to start a cache server.
  *
- *
  * @since GemFire 2.0.2
  */
 public class CacheServerLauncher {
@@ -669,7 +680,7 @@ public class CacheServerLauncher {
     // redirect output to the log file
     OSProcess.redirectOutput(system.getConfig().getLogFile());
 
-    Cache cache = this.createCache(system, options);
+    InternalCache cache = createCache(system, options);
     cache.setIsServer(true);
     startAdditionalServices(cache, options);
 
@@ -678,7 +689,7 @@ public class CacheServerLauncher {
     clearLogListener();
 
     if (ASSIGN_BUCKETS) {
-      for (PartitionedRegion region : ((GemFireCacheImpl) cache).getPartitionedRegions()) {
+      for (PartitionedRegion region : cache.getPartitionedRegions()) {
         PartitionRegionHelper.assignBucketsToPartitions(region);
       }
     }
@@ -823,9 +834,9 @@ public class CacheServerLauncher {
     return -1.0f;
   }
 
-  protected Cache createCache(InternalDistributedSystem system, Map<String, Object> options)
+  protected InternalCache createCache(InternalDistributedSystem system, Map<String, Object> options)
       throws IOException {
-    Cache cache = CacheFactory.create(system);
+    InternalCache cache = (InternalCache) CacheFactory.create(system);
 
     float threshold = getCriticalHeapPercent(options);
     if (threshold > 0.0f) {

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/CachedDeserializableFactory.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CachedDeserializableFactory.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CachedDeserializableFactory.java
index 037e589..c332db3 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CachedDeserializableFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CachedDeserializableFactory.java
@@ -84,11 +84,8 @@ public class CachedDeserializableFactory {
   }
 
   private static boolean cachePrefersPdx() {
-    GemFireCacheImpl gfc = GemFireCacheImpl.getInstance();
-    if (gfc != null) {
-      return gfc.getPdxReadSerialized();
-    }
-    return false;
+    InternalCache internalCache = GemFireCacheImpl.getInstance();
+    return internalCache != null && internalCache.getPdxReadSerialized();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
index d0f3ec4..ed08175 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java
@@ -15,6 +15,21 @@
 
 package org.apache.geode.internal.cache;
 
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.logging.log4j.Logger;
+
+import org.apache.geode.cache.DiskStore;
 import org.apache.geode.cache.EntryDestroyedException;
 import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionDestroyedException;
@@ -29,23 +44,13 @@ import org.apache.geode.internal.cache.persistence.PRPersistentConfig;
 import org.apache.geode.internal.cache.wan.parallel.ParallelGatewaySenderQueue;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.logging.LogService;
-import org.apache.geode.internal.logging.log4j.LocalizedMessage;
-
-import org.apache.logging.log4j.Logger;
-
-import java.io.Serializable;
-import java.util.*;
-import java.util.Map.Entry;
 
 /**
  * An utility class to retrieve colocated regions in a colocation hierarchy in various scenarios
- * 
- * 
+ *
  * @since GemFire 6.0
  */
 public class ColocationHelper {
-
-  /** Logging mechanism for debugging */
   private static final Logger logger = LogService.getLogger();
 
   /**
@@ -58,7 +63,6 @@ public class ColocationHelper {
   /**
    * An utility method to retrieve colocated region of a given partitioned region
    *
-   * @param partitionedRegion
    * @return colocated PartitionedRegion
    * @throws IllegalStateException for missing colocated region
    * @since GemFire 5.8Beta
@@ -124,7 +128,7 @@ public class ColocationHelper {
         String prName = (String) itr.next();
         try {
           prConf = (PartitionRegionConfig) prRoot.get(prName);
-        } catch (EntryDestroyedException ede) {
+        } catch (EntryDestroyedException ignore) {
           continue;
         }
         if (prConf == null) {
@@ -134,7 +138,8 @@ public class ColocationHelper {
         }
         if (prConf.getColocatedWith() != null) {
           if (prConf.getColocatedWith().equals(tempToBeColocatedWith.getFullPath())
-              || ("/" + prConf.getColocatedWith()).equals(tempToBeColocatedWith.getFullPath())) {
+              || (Region.SEPARATOR + prConf.getColocatedWith())
+                  .equals(tempToBeColocatedWith.getFullPath())) {
             colocatedRegions.add(prConf);
             tempcolocatedRegions.add(prConf);
           }
@@ -149,11 +154,7 @@ public class ColocationHelper {
       if (colocatedWithRegionName == null)
         break;
       else {
-        try {
-          prConf = (PartitionRegionConfig) prRoot.get(getRegionIdentifier(colocatedWithRegionName));
-        } catch (EntryDestroyedException ede) {
-          throw ede;
-        }
+        prConf = (PartitionRegionConfig) prRoot.get(getRegionIdentifier(colocatedWithRegionName));
         if (prConf == null) {
           break;
         }
@@ -193,12 +194,13 @@ public class ColocationHelper {
     boolean hasOfflineChildren = false;
     int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.ANY_INIT);
     try {
-      GemFireCacheImpl cache = region.getCache();
-      Collection<DiskStoreImpl> stores = cache.listDiskStores();
+      InternalCache cache = region.getCache();
+      Collection<DiskStore> stores = cache.listDiskStores();
       // Look through all of the disk stores for offline colocated child regions
-      for (DiskStoreImpl diskStore : stores) {
+      for (DiskStore diskStore : stores) {
         // Look at all of the partitioned regions.
-        for (Map.Entry<String, PRPersistentConfig> entry : diskStore.getAllPRs().entrySet()) {
+        for (Map.Entry<String, PRPersistentConfig> entry : ((DiskStoreImpl) diskStore).getAllPRs()
+            .entrySet()) {
 
           PRPersistentConfig config = entry.getValue();
           String childName = entry.getKey();
@@ -275,7 +277,6 @@ public class ColocationHelper {
    * getAllColocationRegions(orderPR) --> List{customerPR, shipmentPR}<br>
    * getAllColocationRegions(shipmentPR) --> List{customerPR, orderPR}<br>
    * 
-   * @param partitionedRegion
    * @return List of all partitioned regions (excluding self) in a colocated chain
    * @since GemFire 5.8Beta
    */
@@ -309,7 +310,6 @@ public class ColocationHelper {
   /**
    * gets local data of colocated regions on a particular data store
    * 
-   * @param partitionedRegion
    * @return map of region name to local colocated regions
    * @since GemFire 5.8Beta
    */
@@ -367,7 +367,6 @@ public class ColocationHelper {
    * getColocatedChildRegions(orderPR) will return List{shipmentPR}<br>
    * getColocatedChildRegions(shipmentPR) will return empty List{}<br>
    * 
-   * @param partitionedRegion
    * @return list of all child partitioned regions colocated with the region
    * @since GemFire 5.8Beta
    */
@@ -387,7 +386,7 @@ public class ColocationHelper {
         }
         try {
           prConf = (PartitionRegionConfig) prRoot.get(prName);
-        } catch (EntryDestroyedException ede) {
+        } catch (EntryDestroyedException ignore) {
           continue;
         }
         if (prConf == null) {

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
index b1dd003..815b526 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java
@@ -12,7 +12,6 @@
  * or implied. See the License for the specific language governing permissions and limitations under
  * the License.
  */
-
 package org.apache.geode.internal.cache;
 
 import java.io.DataInput;
@@ -29,6 +28,7 @@ import org.apache.geode.CancelException;
 import org.apache.geode.DataSerializer;
 import org.apache.geode.SystemFailure;
 import org.apache.geode.cache.CacheFactory;
+import org.apache.geode.cache.DynamicRegionFactory;
 import org.apache.geode.cache.PartitionAttributes;
 import org.apache.geode.cache.RegionDestroyedException;
 import org.apache.geode.cache.Scope;
@@ -48,6 +48,7 @@ import org.apache.geode.internal.Assert;
 import org.apache.geode.internal.InternalDataSerializer;
 import org.apache.geode.internal.cache.CacheDistributionAdvisor.CacheProfile;
 import org.apache.geode.internal.cache.CacheDistributionAdvisor.InitialImageAdvice;
+import org.apache.geode.internal.cache.partitioned.Bucket;
 import org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException;
 import org.apache.geode.internal.cache.partitioned.RegionAdvisor;
 import org.apache.geode.internal.cache.partitioned.RegionAdvisor.PartitionProfile;
@@ -96,7 +97,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
 
       CreateRegionReplyProcessor replyProc = new CreateRegionReplyProcessor(recps);
 
-
       boolean useMcast = false; // multicast is disabled for this message for now
       CreateRegionMessage msg = getCreateRegionMessage(recps, replyProc, useMcast);
 
@@ -118,10 +118,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
         // This isn't right. We should disable region creation in general, not just
         // the remote case here...
         // // Similarly, don't allow new regions to be created if the cache is closing
-        // GemFireCache cache = (GemFireCache)this.newRegion.getCache();
-        // if (cache.isClosing()) {
-        // throw new CacheClosedException("Cannot create a region when the cache is closing");
-        // }
         try {
           replyProc.waitForRepliesUninterruptibly();
           if (!replyProc.needRetry()) {
@@ -166,15 +162,13 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
     return recps;
   }
 
-
-
   public InitialImageAdvice getInitialImageAdvice(InitialImageAdvice previousAdvice) {
     return newRegion.getCacheDistributionAdvisor().adviseInitialImage(previousAdvice);
   }
 
   private Set getAdvice() {
     if (this.newRegion instanceof BucketRegion) {
-      return ((BucketRegion) this.newRegion).getBucketAdvisor().adviseProfileExchange();
+      return ((Bucket) this.newRegion).getBucketAdvisor().adviseProfileExchange();
     } else {
       DistributionAdvisee rgn = this.newRegion.getParentAdvisee();
       DistributionAdvisor advisor = rgn.getDistributionAdvisor();
@@ -195,7 +189,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
   }
 
   public void setOnline(InternalDistributedMember target) {
-
+    // nothing
   }
 
   class CreateRegionReplyProcessor extends ReplyProcessor21 {
@@ -319,6 +313,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
     protected String regionPath;
     protected CacheProfile profile;
     protected int processorId;
+
     private transient boolean incompatible = false;
     private transient ReplyException replyException;
     private transient CacheProfile replyProfile;
@@ -327,7 +322,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
     protected transient boolean severeAlertCompatible;
     private transient boolean skippedCompatibilityChecks;
 
-
     @Override
     public int getProcessorId() {
       return this.processorId;
@@ -354,7 +348,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
         // get the region from the path, but do NOT wait on initialization,
         // otherwise we could have a distributed deadlock
 
-        GemFireCacheImpl cache = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem());
+        InternalCache cache = (InternalCache) CacheFactory.getInstance(dm.getSystem());
 
         // Fix for bug 42051 - Discover any regions that are in the process
         // of being destroyed
@@ -389,15 +383,15 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
             }
           }
         }
-      } catch (PRLocallyDestroyedException fre) {
+      } catch (PRLocallyDestroyedException ignore) {
         if (logger.isDebugEnabled()) {
           logger.debug("<Region Locally Destroyed> {}", this);
         }
-      } catch (RegionDestroyedException e) {
+      } catch (RegionDestroyedException ignore) {
         if (logger.isDebugEnabled()) {
           logger.debug("<RegionDestroyed> {}", this);
         }
-      } catch (CancelException e) {
+      } catch (CancelException ignore) {
         if (logger.isDebugEnabled()) {
           logger.debug("<CancelException> {}", this);
         }
@@ -445,8 +439,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
         dm.putOutgoing(replyMsg);
         if (lclRgn instanceof PartitionedRegion)
           ((PartitionedRegion) lclRgn).sendIndexCreationMsg(this.getSender());
-
-
       }
     }
 
@@ -549,15 +541,13 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
           && this.concurrencyChecksEnabled != otherCCEnabled) {
         result =
             LocalizedStrings.CreateRegionProcessor_CANNOT_CREATE_REGION_0_CCENABLED_1_BECAUSE_ANOTHER_CACHE_HAS_THE_SAME_REGION_CCENABLED_2
-                .toLocalizedString(
-                    new Object[] {regionPath, Boolean.valueOf(this.concurrencyChecksEnabled), myId,
-                        Boolean.valueOf(otherCCEnabled)});
+                .toLocalizedString(regionPath, this.concurrencyChecksEnabled, myId, otherCCEnabled);
       }
 
       Set<String> otherGatewaySenderIds = ((LocalRegion) rgn).getGatewaySenderIds();
       Set<String> myGatewaySenderIds = profile.gatewaySenderIds;
       if (!otherGatewaySenderIds.equals(myGatewaySenderIds)) {
-        if (!rgn.getFullPath().contains(DynamicRegionFactoryImpl.dynamicRegionListName)) {
+        if (!rgn.getFullPath().contains(DynamicRegionFactory.dynamicRegionListName)) {
           result =
               LocalizedStrings.CreateRegionProcessor_CANNOT_CREATE_REGION_0_WITH_1_GATEWAY_SENDER_IDS_BECAUSE_ANOTHER_CACHE_HAS_THE_SAME_REGION_WITH_2_GATEWAY_SENDER_IDS
                   .toLocalizedString(this.regionPath, myGatewaySenderIds, otherGatewaySenderIds);
@@ -588,8 +578,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
         if (profile.scope != otherScope) {
           result =
               LocalizedStrings.CreateRegionProcessor_CANNOT_CREATE_REGION_0_WITH_1_SCOPE_BECAUSE_ANOTHER_CACHE_HAS_SAME_REGION_WITH_2_SCOPE
-                  .toLocalizedString(
-                      new Object[] {this.regionPath, profile.scope, myId, otherScope});
+                  .toLocalizedString(this.regionPath, profile.scope, myId, otherScope);
         }
       }
 
@@ -605,8 +594,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
           && profile.isOffHeap != otherIsOffHeap) {
         result =
             LocalizedStrings.CreateRegionProcessor_CANNOT_CREATE_REGION_0_WITH_OFF_HEAP_EQUALS_1_BECAUSE_ANOTHER_CACHE_2_HAS_SAME_THE_REGION_WITH_OFF_HEAP_EQUALS_3
-                .toLocalizedString(
-                    new Object[] {this.regionPath, profile.isOffHeap, myId, otherIsOffHeap});
+                .toLocalizedString(this.regionPath, profile.isOffHeap, myId, otherIsOffHeap);
       }
 
       String cspResult = null;
@@ -652,47 +640,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
             profile, result);
       }
 
-      // if (profile.membershipAttributes != null) {
-      // // check to see if:
-      // // 1. we do not have DataPolicy that will take queued msgs
-      // // 2. the profile has queuing turned on
-      // // 3. we are playing one of the queued roles
-      // if (!rgn.getAttributes().getDataPolicy().withQueuedMessages()) {
-      // if (profile.membershipAttributes.getLossAction().isAllAccessWithQueuing()) {
-      // Set myRoles = rgn.getSystem().getDistributedMember().getRoles();
-      // if (!myRoles.isEmpty()) {
-      // Set intersection = new HashSet(myRoles);
-      // intersection.retainAll(profile.membershipAttributes.getRequiredRoles());
-      // if (!intersection.isEmpty()) {
-      // result = "Cannot create region " + regionPath
-      // + " with queuing because the region already exists"
-      // + " with a data-policy " + rgn.getAttributes().getDataPolicy()
-      // + " that does not allow queued messages with the roles "
-      // + intersection;
-      // }
-      // }
-      // }
-      // }
-      // } else {
-      // // see if we are queuing on this region
-      // MembershipAttributes ra = rgn.getMembershipAttributes();
-      // if (ra != null && ra.hasRequiredRoles()
-      // && ra.getLossAction().isAllAccessWithQueuing()) {
-      // // we are queuing so make sure this other guy allows queued messages
-      // // if he is playing a role we queue for.
-      // if (!profile.dataPolicy.withQueuedMessages()) {
-      // Set intersection = new HashSet(ra.getRequiredRoles());
-      // intersection.retainAll(profile.getDistributedMember().getRoles());
-      // if (!intersection.isEmpty()) {
-      // result = "Cannot create region " + regionPath
-      // + " with a data-policy " + profile.dataPolicy
-      // + " that does not allow queued messages because the region"
-      // + " already exists with queuing enabled for roles " + intersection;
-      // }
-      // }
-      // }
-      // }
-
       return result;
     }
 
@@ -808,16 +755,16 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
 
     @Override
     public String toString() {
-      StringBuffer buff = new StringBuffer();
-      buff.append("CreateRegionMessage (region='");
-      buff.append(this.regionPath);
-      buff.append("'; processorId=");
-      buff.append(this.processorId);
-      buff.append("; concurrencyChecksEnabled=").append(this.concurrencyChecksEnabled);
-      buff.append("; profile=");
-      buff.append(this.profile);
-      buff.append(")");
-      return buff.toString();
+      StringBuilder sb = new StringBuilder();
+      sb.append("CreateRegionMessage (region='");
+      sb.append(this.regionPath);
+      sb.append("'; processorId=");
+      sb.append(this.processorId);
+      sb.append("; concurrencyChecksEnabled=").append(this.concurrencyChecksEnabled);
+      sb.append("; profile=");
+      sb.append(this.profile);
+      sb.append(")");
+      return sb.toString();
     }
   }
 
@@ -848,8 +795,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
     public void fromData(DataInput in) throws IOException, ClassNotFoundException {
       super.fromData(in);
       if (in.readBoolean()) {
-        // this.profile = new CacheProfile();
-        // this.profile.fromData(in);
         this.profile = (CacheProfile) DataSerializer.readObject(in);
       }
       int size = in.readInt();
@@ -879,7 +824,6 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
       super.toData(out);
       out.writeBoolean(this.profile != null);
       if (this.profile != null) {
-        // this.profile.toData(out);
         DataSerializer.writeObject(this.profile, out);
       }
       if (this.bucketProfiles == null) {
@@ -914,7 +858,7 @@ public class CreateRegionProcessor implements ProfileExchangeProcessor {
 
     @Override
     public String toString() {
-      StringBuffer buff = new StringBuffer();
+      StringBuilder buff = new StringBuilder();
       buff.append("CreateRegionReplyMessage");
       buff.append("(sender=").append(getSender());
       buff.append("; processorId=");

http://git-wip-us.apache.org/repos/asf/geode/blob/21f405b8/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java
----------------------------------------------------------------------
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java
index b36cd2a..5914ab5 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java
@@ -169,7 +169,7 @@ public final class DestroyPartitionedRegionMessage extends PartitionMessage {
   }
 
   @Override
-  protected void appendFields(StringBuffer buff) {
+  protected void appendFields(StringBuilder buff) {
     super.appendFields(buff);
     buff.append("; cbArg=").append(this.cbArg).append("; op=").append(this.op);
     buff.append("; prSerial=" + prSerial);