You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by pr...@apache.org on 2018/04/03 23:34:03 UTC

[geode] branch develop updated: GEODE-4902: Refactor LogMarker class and usage (#1678)

This is an automated email from the ASF dual-hosted git repository.

prhomberg pushed a commit to branch develop
in repository https://gitbox.apache.org/repos/asf/geode.git


The following commit(s) were added to refs/heads/develop by this push:
     new 912a1b2  GEODE-4902: Refactor LogMarker class and usage (#1678)
912a1b2 is described below

commit 912a1b211633b63cdaa49b6811a90ac1591c959c
Author: Patrick Rhomberg <pr...@pivotal.io>
AuthorDate: Tue Apr 3 16:33:58 2018 -0700

    GEODE-4902: Refactor LogMarker class and usage (#1678)
    
    * GEODE-4902: Refactor LogMarker and usage
    * Distinguish those markers at the TRACE and DEBUG levels (now Markers named *_VERBOSE) from those that log at a coarser grainularity (now Markers named *_MARKER)
    * Suppress only *_VERBOSE markers by default, from previous behavior of suppressing all markers.
    * Refactor some pathological log checks, e.g., change a log from debug to trace when it is gated behind ifTraceEnabled block.
    * Remove unused Markers.
    * Remove any log marker logic from AdminWaiters exception message generation.  Exceptions now always include a full message.
    * Remove marker-gated print to System.out in PersistentOplogSet -- direct calls to System.out should not exist in that class.
    * Add marker EVENT_ID_TO_STRING_VERBOSE to explicitly distinguish instance of marker used in logic not immediately consumed by a logger.  The consumption of EventId::toString should only exist at logging in any event.
    **
    * Remove nearby dead code and dead code detected during marker removal.
    * Expand wildcard imports in touched files.
    * Remove TODO comments, at least one of which was a decade old.
    * Collapse nearby, redundant catch blocks
    * Remove empty finally blocks
    * Update documentation page to refer to GEODE_VERBOSE, from deprecated GEMFIRE_VERBOSE
    * The change in log marker names causes one class to compile larger.  Updated sanctionedDataSerializables.
---
 .../main/java/org/apache/geode/DataSerializer.java | 419 ++++++++++---------
 .../admin/internal/AdminDistributedSystemImpl.java |   4 +-
 .../internal/DisabledManagedEntityController.java  |  26 +-
 .../internal/ManagedEntityControllerFactory.java   |   4 +-
 .../apache/geode/admin/jmx/internal/AgentImpl.java |  13 +-
 .../geode/cache/client/internal/AbstractOp.java    |  11 +-
 .../cache/client/internal/OpExecutorImpl.java      |  12 +-
 .../internal/ClusterDistributionManager.java       |  29 +-
 .../distributed/internal/DistributionAdvisor.java  | 135 +++---
 .../distributed/internal/DistributionMessage.java  |   8 +-
 .../internal/StartupResponseMessage.java           |   4 +-
 .../distributed/internal/direct/DirectChannel.java |   4 +-
 .../distributed/internal/locks/DLockGrantor.java   | 460 ++++++++-------------
 .../internal/locks/DLockQueryProcessor.java        |  23 +-
 .../locks/DLockRecoverGrantorProcessor.java        |  42 +-
 .../internal/locks/DLockReleaseProcessor.java      |  30 +-
 .../internal/locks/DLockRequestProcessor.java      | 147 +++----
 .../distributed/internal/locks/DLockService.java   | 294 +++++++------
 .../distributed/internal/locks/DLockToken.java     |  24 +-
 .../internal/locks/DeposeGrantorProcessor.java     |   5 +-
 .../internal/locks/ElderInitProcessor.java         |  24 +-
 .../distributed/internal/locks/ElderState.java     |  62 ++-
 .../internal/locks/GrantorRequestProcessor.java    |  49 +--
 .../locks/NonGrantorDestroyedProcessor.java        |  17 +-
 .../membership/gms/mgr/GMSMembershipManager.java   |   7 +-
 .../geode/internal/InternalDataSerializer.java     | 147 +++----
 .../geode/internal/admin/remote/AdminWaiters.java  |  25 +-
 .../admin/remote/AlertLevelChangeMessage.java      |   4 +-
 .../admin/remote/RemoteGfManagerAgent.java         |  65 ++-
 .../internal/cache/AbstractBucketRegionQueue.java  |  34 +-
 .../geode/internal/cache/AbstractDiskRegion.java   |  72 ++--
 .../geode/internal/cache/AbstractRegionMap.java    |  15 +-
 .../apache/geode/internal/cache/BucketAdvisor.java |  15 +-
 .../internal/cache/BucketPersistenceAdvisor.java   |  29 +-
 .../apache/geode/internal/cache/BucketRegion.java  |  49 ++-
 .../internal/cache/CacheDistributionAdvisor.java   |   2 +-
 .../cache/DestroyPartitionedRegionMessage.java     |  12 +-
 .../apache/geode/internal/cache/DiskInitFile.java  |  68 ++-
 .../geode/internal/cache/DiskStoreMonitor.java     |  68 ++-
 .../geode/internal/cache/DistTXCommitMessage.java  |   9 +-
 .../internal/cache/DistTXPrecommitMessage.java     |   9 +-
 .../internal/cache/DistTXRollbackMessage.java      |   8 +-
 .../internal/cache/DistributedCacheOperation.java  |  16 +-
 .../internal/cache/DistributedPutAllOperation.java |  15 +-
 .../DistributedRegionFunctionStreamingMessage.java |   8 +-
 .../geode/internal/cache/EntryEventImpl.java       |   6 +-
 .../org/apache/geode/internal/cache/EventID.java   |   2 +-
 .../apache/geode/internal/cache/FilterProfile.java |  26 +-
 .../internal/cache/InitialImageOperation.java      |  86 ++--
 .../cache/LatestLastAccessTimeReplyProcessor.java  |   4 +-
 .../apache/geode/internal/cache/LocalRegion.java   |  54 ++-
 .../org/apache/geode/internal/cache/Oplog.java     | 269 +++++-------
 .../apache/geode/internal/cache/OverflowOplog.java |  25 +-
 .../geode/internal/cache/PartitionedRegion.java    |   4 +-
 .../geode/internal/cache/PersistentOplogSet.java   |   3 -
 .../geode/internal/cache/StateFlushOperation.java  |  59 ++-
 .../internal/cache/TXRegionLockRequestImpl.java    |   4 +-
 .../internal/cache/TXRemoteCommitMessage.java      |   8 +-
 .../geode/internal/cache/TombstoneService.java     |  29 +-
 .../geode/internal/cache/VMLRURegionMap.java       |  88 ++--
 .../cache/entries/AbstractRegionEntry.java         |  10 +-
 .../cache/event/DistributedEventTracker.java       |   8 +-
 .../cache/eviction/AbstractEvictionList.java       |  16 +-
 .../internal/cache/eviction/CountLRUEviction.java  |   3 +-
 .../cache/eviction/LRUListWithAsyncSorting.java    |   8 +-
 .../cache/eviction/LRUListWithSyncSorting.java     |  17 +-
 .../geode/internal/cache/ha/HARegionQueue.java     |  34 +-
 .../geode/internal/cache/map/RegionMapDestroy.java |   4 +-
 .../partitioned/BecomePrimaryBucketMessage.java    |  20 +-
 .../cache/partitioned/BucketBackupMessage.java     |   5 +-
 .../cache/partitioned/BucketSizeMessage.java       |  15 +-
 .../cache/partitioned/ContainsKeyValueMessage.java |  12 +-
 .../cache/partitioned/CreateBucketMessage.java     |  21 +-
 .../partitioned/DeposePrimaryBucketMessage.java    |  20 +-
 .../internal/cache/partitioned/DestroyMessage.java |  29 +-
 .../DestroyRegionOnDataStoreMessage.java           |   5 +-
 .../cache/partitioned/DumpAllPRConfigMessage.java  |  14 +-
 .../internal/cache/partitioned/DumpB2NRegion.java  |  16 +-
 .../cache/partitioned/DumpBucketsMessage.java      |  14 +-
 .../cache/partitioned/FetchBulkEntriesMessage.java |  20 +-
 .../cache/partitioned/FetchEntriesMessage.java     |  21 +-
 .../cache/partitioned/FetchEntryMessage.java       |  21 +-
 .../cache/partitioned/FetchKeysMessage.java        |  20 +-
 .../partitioned/FetchPartitionDetailsMessage.java  |  23 +-
 .../internal/cache/partitioned/GetMessage.java     |  17 +-
 .../cache/partitioned/IdentityRequestMessage.java  |  23 +-
 .../cache/partitioned/IdentityUpdateMessage.java   |   4 +-
 .../cache/partitioned/IndexCreationMsg.java        |   6 +-
 .../cache/partitioned/InterestEventMessage.java    |  17 +-
 .../cache/partitioned/InvalidateMessage.java       |  16 +-
 .../partitioned/ManageBackupBucketMessage.java     |  21 +-
 .../cache/partitioned/ManageBucketMessage.java     |  21 +-
 .../cache/partitioned/MoveBucketMessage.java       |  16 +-
 .../cache/partitioned/PRTombstoneMessage.java      |   4 +-
 .../partitioned/PRUpdateEntryVersionMessage.java   |   4 +-
 .../cache/partitioned/PartitionMessage.java        |  10 +-
 .../PartitionedRegionFunctionStreamingMessage.java |   4 +-
 .../cache/partitioned/PrimaryRequestMessage.java   |  17 +-
 .../cache/partitioned/PutAllPRMessage.java         |   8 +-
 .../internal/cache/partitioned/PutMessage.java     |  41 +-
 .../internal/cache/partitioned/QueryMessage.java   |   4 +-
 .../internal/cache/partitioned/RegionAdvisor.java  |  54 +--
 .../cache/partitioned/RemoveAllPRMessage.java      |   8 +-
 .../cache/partitioned/RemoveBucketMessage.java     |  16 +-
 .../cache/partitioned/RemoveIndexesMessage.java    |   5 +-
 .../internal/cache/partitioned/SizeMessage.java    |  15 +-
 .../cache/persistence/DiskInitFileParser.java      | 149 ++++---
 .../cache/persistence/PersistenceAdvisorImpl.java  | 123 +++---
 .../cache/tier/sockets/CacheClientProxy.java       |  42 +-
 .../cache/tier/sockets/CacheClientUpdater.java     |   4 +-
 .../cache/tier/sockets/ObjectPartList.java         |   6 -
 .../cache/tier/sockets/VersionedObjectList.java    |  44 +-
 .../internal/cache/tx/RemoteClearMessage.java      |   8 +-
 .../cache/tx/RemoteContainsKeyValueMessage.java    |  19 +-
 .../internal/cache/tx/RemoteDestroyMessage.java    |  16 +-
 .../internal/cache/tx/RemoteFetchEntryMessage.java |  13 +-
 .../internal/cache/tx/RemoteFetchKeysMessage.java  |  12 +-
 .../cache/tx/RemoteFetchVersionMessage.java        |  20 +-
 .../geode/internal/cache/tx/RemoteGetMessage.java  |  16 +-
 .../internal/cache/tx/RemoteInvalidateMessage.java |  20 +-
 .../internal/cache/tx/RemoteOperationMessage.java  |   4 +-
 .../internal/cache/tx/RemotePutAllMessage.java     |  12 +-
 .../geode/internal/cache/tx/RemotePutMessage.java  |   8 +-
 .../internal/cache/tx/RemoteRemoveAllMessage.java  |  12 +-
 .../geode/internal/cache/tx/RemoteSizeMessage.java |  14 +-
 .../cache/versions/RegionVersionHolder.java        |  31 +-
 .../cache/versions/RegionVersionVector.java        |   8 +-
 .../geode/internal/cache/versions/VersionTag.java  |  10 +-
 .../internal/cache/xmlcache/CacheXmlParser.java    |  34 +-
 .../geode/internal/logging/LogWriterFactory.java   |   6 +-
 .../geode/internal/logging/log4j/LogMarker.java    | 182 ++++----
 .../internal/statistics/GemFireStatSampler.java    |   4 +-
 .../geode/internal/statistics/HostStatSampler.java |  18 +-
 .../statistics/LocalStatisticsFactory.java         |   6 +-
 .../geode/internal/statistics/SampleCollector.java | 122 +++---
 .../internal/statistics/SimpleStatSampler.java     |   2 +-
 .../internal/statistics/StatArchiveHandler.java    |  51 +--
 .../internal/statistics/StatArchiveWriter.java     |  46 +--
 .../internal/statistics/StatMonitorHandler.java    |  14 +-
 .../geode/internal/tcp/DirectReplySender.java      |   5 +-
 .../org/apache/geode/internal/tcp/TCPConduit.java  |  12 +-
 .../internal/beans/MemberMBeanBridge.java          |   2 +-
 .../cli/functions/ChangeLogLevelFunction.java      |   7 +-
 .../DistributedLockServiceDUnitTest.java           |  31 +-
 .../cache/ClientServerGetAllDUnitTest.java         |  10 +-
 .../cache/partitioned/Bug47388DUnitTest.java       |   7 +-
 .../logging/DistributedSystemLogFileJUnitTest.java |  27 +-
 .../log4j/FastLoggerIntegrationJUnitTest.java      |   5 +-
 .../codeAnalysis/sanctionedDataSerializables.txt   |   2 +-
 .../logging/configuring_log4j2.html.md.erb         |   6 +-
 150 files changed, 2328 insertions(+), 2759 deletions(-)

diff --git a/geode-core/src/main/java/org/apache/geode/DataSerializer.java b/geode-core/src/main/java/org/apache/geode/DataSerializer.java
index 1bc8792..8093ffd 100644
--- a/geode-core/src/main/java/org/apache/geode/DataSerializer.java
+++ b/geode-core/src/main/java/org/apache/geode/DataSerializer.java
@@ -205,8 +205,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Class {}", c);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Class {}", c);
     }
 
     if (c == null || c.isPrimitive()) {
@@ -236,8 +236,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Class name {}", className);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Class name {}", className);
     }
 
     writeString(InternalDataSerializer.processOutgoingClassName(className, out), out);
@@ -334,8 +334,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Date {}", date);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Date {}", date);
     }
 
     long v;
@@ -366,8 +366,8 @@ public abstract class DataSerializer {
       date = new Date(time);
     }
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Date {}", date);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Date {}", date);
     }
 
     return date;
@@ -390,8 +390,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing File {}", file);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing File {}", file);
     }
 
     writeString((file != null) ? file.getCanonicalPath() : null, out);
@@ -411,8 +411,8 @@ public abstract class DataSerializer {
     if (s != null) {
       file = new File(s);
     }
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read File {}", file);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read File {}", file);
     }
 
     return file;
@@ -438,8 +438,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing InetAddress {}", address);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing InetAddress {}", address);
     }
 
     writeByteArray((address != null) ? address.getAddress() : null, out);
@@ -465,8 +465,8 @@ public abstract class DataSerializer {
 
     try {
       InetAddress addr = InetAddress.getByAddress(address);
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read InetAddress {}", addr);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read InetAddress {}", addr);
       }
       return addr;
     } catch (UnknownHostException ex) {
@@ -492,14 +492,14 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.SERIALIZER);
-    if (isDebugEnabled) {
-      logger.trace(LogMarker.SERIALIZER, "Writing String \"{}\"", value);
+    final boolean isTraceSerialzerVerbose = logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE);
+    if (isTraceSerialzerVerbose) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing String \"{}\"", value);
     }
 
     if (value == null) {
-      if (isDebugEnabled) {
-        logger.trace(LogMarker.SERIALIZER, "Writing NULL_STRING");
+      if (isTraceSerialzerVerbose) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing NULL_STRING");
       }
       out.writeByte(DSCODE.NULL_STRING);
 
@@ -531,30 +531,30 @@ public abstract class DataSerializer {
       boolean writeUTF = utfLen > len;
       if (writeUTF) {
         if (utfLen > 0xFFFF) {
-          if (isDebugEnabled) {
-            logger.trace(LogMarker.SERIALIZER, "Writing utf HUGE_STRING of len={}", len);
+          if (isTraceSerialzerVerbose) {
+            logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing utf HUGE_STRING of len={}", len);
           }
           out.writeByte(DSCODE.HUGE_STRING);
           out.writeInt(len);
           out.writeChars(value);
         } else {
-          if (isDebugEnabled) {
-            logger.trace(LogMarker.SERIALIZER, "Writing utf STRING of len={}", len);
+          if (isTraceSerialzerVerbose) {
+            logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing utf STRING of len={}", len);
           }
           out.writeByte(DSCODE.STRING);
           out.writeUTF(value);
         }
       } else {
         if (len > 0xFFFF) {
-          if (isDebugEnabled) {
-            logger.trace(LogMarker.SERIALIZER, "Writing HUGE_STRING_BYTES of len={}", len);
+          if (isTraceSerialzerVerbose) {
+            logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing HUGE_STRING_BYTES of len={}", len);
           }
           out.writeByte(DSCODE.HUGE_STRING_BYTES);
           out.writeInt(len);
           out.writeBytes(value);
         } else {
-          if (isDebugEnabled) {
-            logger.trace(LogMarker.SERIALIZER, "Writing STRING_BYTES of len={}", len);
+          if (isTraceSerialzerVerbose) {
+            logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing STRING_BYTES of len={}", len);
           }
           out.writeByte(DSCODE.STRING_BYTES);
           out.writeShort(len);
@@ -588,8 +588,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Boolean {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Boolean {}", value);
     }
 
     out.writeBoolean(value.booleanValue());
@@ -604,8 +604,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     Boolean value = Boolean.valueOf(in.readBoolean());
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Boolean {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Boolean {}", value);
     }
     return value;
   }
@@ -622,8 +622,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Character {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Character {}", value);
     }
 
     out.writeChar(value.charValue());
@@ -639,8 +639,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     Character value = Character.valueOf(in.readChar());
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Character {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Character {}", value);
     }
     return value;
   }
@@ -657,8 +657,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Byte {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Byte {}", value);
     }
 
     out.writeByte(value.byteValue());
@@ -673,8 +673,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     Byte value = Byte.valueOf(in.readByte());
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Byte {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Byte {}", value);
     }
     return value;
   }
@@ -691,8 +691,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Short {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Short {}", value);
     }
 
     out.writeShort(value.shortValue());
@@ -707,8 +707,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     Short value = Short.valueOf(in.readShort());
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Short {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Short {}", value);
     }
     return value;
   }
@@ -725,8 +725,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Integer {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Integer {}", value);
     }
 
     out.writeInt(value.intValue());
@@ -741,8 +741,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     Integer value = Integer.valueOf(in.readInt());
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Integer {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Integer {}", value);
     }
     return value;
   }
@@ -759,8 +759,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Long {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Long {}", value);
     }
 
     out.writeLong(value.longValue());
@@ -775,8 +775,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     Long value = Long.valueOf(in.readLong());
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Long {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Long {}", value);
     }
     return value;
   }
@@ -793,8 +793,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Float {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Float {}", value);
     }
 
     out.writeFloat(value.floatValue());
@@ -809,8 +809,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     Float value = Float.valueOf(in.readFloat());
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Float {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Float {}", value);
     }
     return value;
   }
@@ -827,8 +827,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Double {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Double {}", value);
     }
 
     out.writeDouble(value.doubleValue());
@@ -843,8 +843,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     Double value = Double.valueOf(in.readDouble());
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Double {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Double {}", value);
     }
     return value;
   }
@@ -861,8 +861,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Boolean {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Boolean {}", value);
     }
 
     out.writeBoolean(value);
@@ -879,8 +879,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     boolean value = in.readBoolean();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Boolean {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Boolean {}", value);
     }
     return value;
   }
@@ -897,8 +897,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Byte {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Byte {}", value);
     }
 
     out.writeByte(value);
@@ -915,8 +915,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     byte value = in.readByte();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Byte {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Byte {}", value);
     }
     return value;
   }
@@ -933,8 +933,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Char {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Char {}", value);
     }
 
     out.writeChar(value);
@@ -951,8 +951,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     char value = in.readChar();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Char {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Char {}", value);
     }
     return value;
   }
@@ -969,8 +969,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Short {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Short {}", value);
     }
 
     out.writeShort(value);
@@ -987,8 +987,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     short value = in.readShort();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Short {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Short {}", value);
     }
     return value;
   }
@@ -1006,8 +1006,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Unsigned Byte {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Unsigned Byte {}", value);
     }
 
     out.writeByte(value);
@@ -1024,8 +1024,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     int value = in.readUnsignedByte();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Unsigned Byte {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Unsigned Byte {}", value);
     }
     return value;
   }
@@ -1043,8 +1043,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Unsigned Short {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Unsigned Short {}", value);
     }
 
     out.writeShort(value);
@@ -1061,8 +1061,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     int value = in.readUnsignedShort();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Unsigned Short {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Unsigned Short {}", value);
     }
     return value;
   }
@@ -1078,8 +1078,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Integer {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Integer {}", value);
     }
 
     out.writeInt(value);
@@ -1096,8 +1096,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     int value = in.readInt();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Integer {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Integer {}", value);
     }
     return value;
   }
@@ -1114,8 +1114,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Long {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Long {}", value);
     }
 
     out.writeLong(value);
@@ -1132,8 +1132,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     long value = in.readLong();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Long {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Long {}", value);
     }
     return value;
   }
@@ -1150,8 +1150,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Float {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Float {}", value);
     }
 
     out.writeFloat(value);
@@ -1168,8 +1168,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     float value = in.readFloat();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Float {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Float {}", value);
     }
     return value;
   }
@@ -1186,8 +1186,8 @@ public abstract class DataSerializer {
 
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Double {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Double {}", value);
     }
 
     out.writeDouble(value);
@@ -1204,8 +1204,8 @@ public abstract class DataSerializer {
     InternalDataSerializer.checkIn(in);
 
     double value = in.readDouble();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Double {}", value);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Double {}", value);
     }
     return value;
   }
@@ -1251,8 +1251,8 @@ public abstract class DataSerializer {
       }
     }
     InternalDataSerializer.writeArrayLength(length, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing byte array of length {}", length);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing byte array of length {}", length);
     }
     if (length > 0) {
       out.write(array, 0, length);
@@ -1279,8 +1279,8 @@ public abstract class DataSerializer {
     if (obj instanceof CachedDeserializable) {
       if (obj instanceof StoredObject) {
         StoredObject so = (StoredObject) obj;
-        if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-          logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray StoredObject");
+        if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+          logger.trace(LogMarker.SERIALIZER_VERBOSE, "writeObjectAsByteArray StoredObject");
         }
         so.sendAsByteArray(out);
         return;
@@ -1288,11 +1288,11 @@ public abstract class DataSerializer {
         object = ((CachedDeserializable) obj).getSerializedValue();
       }
     }
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
       if (object == null) {
-        logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray null");
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "writeObjectAsByteArray null");
       } else {
-        logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray obj.getClass={}",
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "writeObjectAsByteArray obj.getClass={}",
             object.getClass());
       }
     }
@@ -1345,8 +1345,8 @@ public abstract class DataSerializer {
       byte[] array = new byte[length];
       in.readFully(array, 0, length);
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read byte array of length {}", length);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read byte array of length {}", length);
       }
 
       return array;
@@ -1373,8 +1373,8 @@ public abstract class DataSerializer {
       length = array.length;
     }
     InternalDataSerializer.writeArrayLength(length, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing String array of length {}", length);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing String array of length {}", length);
     }
     if (length > 0) {
       for (int i = 0; i < length; i++) {
@@ -1403,8 +1403,8 @@ public abstract class DataSerializer {
         array[i] = readString(in);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read String array of length {}", length);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read String array of length {}", length);
       }
 
       return array;
@@ -1430,8 +1430,8 @@ public abstract class DataSerializer {
       length = array.length;
     }
     InternalDataSerializer.writeArrayLength(length, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing short array of length {}", length);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing short array of length {}", length);
     }
     if (length > 0) {
       for (int i = 0; i < length; i++) {
@@ -1460,8 +1460,8 @@ public abstract class DataSerializer {
         array[i] = in.readShort();
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read short array of length {}", length);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read short array of length {}", length);
       }
 
       return array;
@@ -1502,8 +1502,8 @@ public abstract class DataSerializer {
         array[i] = in.readChar();
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read char array of length {}", length);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read char array of length {}", length);
       }
 
       return array;
@@ -1529,8 +1529,8 @@ public abstract class DataSerializer {
       length = array.length;
     }
     InternalDataSerializer.writeArrayLength(length, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing boolean array of length {}", length);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing boolean array of length {}", length);
     }
     if (length > 0) {
       for (int i = 0; i < length; i++) {
@@ -1560,8 +1560,8 @@ public abstract class DataSerializer {
         array[i] = in.readBoolean();
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read boolean array of length {}", length);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read boolean array of length {}", length);
       }
 
       return array;
@@ -1588,8 +1588,8 @@ public abstract class DataSerializer {
     }
     InternalDataSerializer.writeArrayLength(length, out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing int array of length {}", length);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing int array of length {}", length);
     }
     if (length > 0) {
       for (int i = 0; i < length; i++) {
@@ -1618,8 +1618,8 @@ public abstract class DataSerializer {
         array[i] = in.readInt();
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read int array of length {}", length);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read int array of length {}", length);
       }
 
       return array;
@@ -1645,8 +1645,8 @@ public abstract class DataSerializer {
       length = array.length;
     }
     InternalDataSerializer.writeArrayLength(length, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing long array of length {}", length);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing long array of length {}", length);
     }
     if (length > 0) {
       for (int i = 0; i < length; i++) {
@@ -1675,8 +1675,8 @@ public abstract class DataSerializer {
         array[i] = in.readLong();
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read long array of length {}", length);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read long array of length {}", length);
       }
 
       return array;
@@ -1702,8 +1702,8 @@ public abstract class DataSerializer {
       length = array.length;
     }
     InternalDataSerializer.writeArrayLength(length, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing float array of length {}", length);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing float array of length {}", length);
     }
     if (length > 0) {
       for (int i = 0; i < length; i++) {
@@ -1732,8 +1732,8 @@ public abstract class DataSerializer {
         array[i] = in.readFloat();
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read float array of length {}", length);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read float array of length {}", length);
       }
 
       return array;
@@ -1759,8 +1759,8 @@ public abstract class DataSerializer {
       length = array.length;
     }
     InternalDataSerializer.writeArrayLength(length, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing double array of length {}", length);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing double array of length {}", length);
     }
     if (length > 0) {
       for (int i = 0; i < length; i++) {
@@ -1789,8 +1789,8 @@ public abstract class DataSerializer {
         array[i] = in.readDouble();
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read double array of length {}", length);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read double array of length {}", length);
       }
 
       return array;
@@ -1886,8 +1886,8 @@ public abstract class DataSerializer {
         throw cnfEx;
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read Object array of length {}", length);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Object array of length {}", length);
       }
 
       return array;
@@ -1910,8 +1910,8 @@ public abstract class DataSerializer {
       length = array.length;
     }
     InternalDataSerializer.writeArrayLength(length, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing byte[][] of length {}", length);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing byte[][] of length {}", length);
     }
     if (length >= 0) {
       for (int i = 0; i < length; i++) {
@@ -1939,8 +1939,8 @@ public abstract class DataSerializer {
         array[i] = readByteArray(in);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read byte[][] of length {}", length);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read byte[][] of length {}", length);
       }
 
       return array;
@@ -1971,8 +1971,9 @@ public abstract class DataSerializer {
       size = list.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing ArrayList with {} elements: {}", size, list);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing ArrayList with {} elements: {}", size,
+          list);
     }
     if (size > 0) {
       for (int i = 0; i < size; i++) {
@@ -2007,8 +2008,9 @@ public abstract class DataSerializer {
         list.add(element);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read ArrayList with {} elements: {}", size, list);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read ArrayList with {} elements: {}", size,
+            list);
       }
 
       return list;
@@ -2038,8 +2040,8 @@ public abstract class DataSerializer {
       size = list.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Vector with {} elements: {}", size, list);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Vector with {} elements: {}", size, list);
     }
     if (size > 0) {
       for (int i = 0; i < size; i++) {
@@ -2072,8 +2074,8 @@ public abstract class DataSerializer {
         list.add(element);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read Vector with {} elements: {}", size, list);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Vector with {} elements: {}", size, list);
       }
 
       return list;
@@ -2103,8 +2105,8 @@ public abstract class DataSerializer {
       size = list.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Stack with {} elements: {}", size, list);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Stack with {} elements: {}", size, list);
     }
     if (size > 0) {
       for (int i = 0; i < size; i++) {
@@ -2137,8 +2139,8 @@ public abstract class DataSerializer {
         list.add(element);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read Stack with {} elements: {}", size, list);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Stack with {} elements: {}", size, list);
       }
 
       return list;
@@ -2168,8 +2170,9 @@ public abstract class DataSerializer {
       size = list.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing LinkedList with {} elements: {}", size, list);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing LinkedList with {} elements: {}", size,
+          list);
     }
     if (size > 0) {
       for (Object e : list) {
@@ -2202,8 +2205,9 @@ public abstract class DataSerializer {
         list.add(element);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read LinkedList with {} elements: {}", size, list);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read LinkedList with {} elements: {}", size,
+            list);
       }
 
       return list;
@@ -2250,8 +2254,8 @@ public abstract class DataSerializer {
         set.add(element);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read HashSet with {} elements: {}", size, set);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read HashSet with {} elements: {}", size, set);
       }
 
       return set;
@@ -2299,8 +2303,9 @@ public abstract class DataSerializer {
         set.add(element);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read LinkedHashSet with {} elements: {}", size, set);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read LinkedHashSet with {} elements: {}", size,
+            set);
       }
 
       return set;
@@ -2330,8 +2335,8 @@ public abstract class DataSerializer {
       size = map.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing HashMap with {} elements: {}", size, map);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing HashMap with {} elements: {}", size, map);
     }
     if (size > 0) {
       for (Map.Entry<?, ?> entry : map.entrySet()) {
@@ -2366,8 +2371,8 @@ public abstract class DataSerializer {
         map.put(key, value);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read HashMap with {} elements: {}", size, map);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read HashMap with {} elements: {}", size, map);
       }
 
       return map;
@@ -2398,8 +2403,9 @@ public abstract class DataSerializer {
       size = map.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing IdentityHashMap with {} elements: {}", size, map);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing IdentityHashMap with {} elements: {}",
+          size, map);
     }
     if (size > 0) {
       for (Map.Entry<?, ?> entry : map.entrySet()) {
@@ -2435,8 +2441,9 @@ public abstract class DataSerializer {
         map.put(key, value);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read IdentityHashMap with {} elements: {}", size, map);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read IdentityHashMap with {} elements: {}",
+            size, map);
       }
 
       return map;
@@ -2476,9 +2483,9 @@ public abstract class DataSerializer {
       size = entrySnapshot.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing ConcurrentHashMap with {} elements: {}", size,
-          entrySnapshot);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing ConcurrentHashMap with {} elements: {}",
+          size, entrySnapshot);
     }
     if (size > 0) {
       for (Map.Entry<?, ?> entry : entrySnapshot) {
@@ -2514,9 +2521,9 @@ public abstract class DataSerializer {
         map.put(key, value);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read ConcurrentHashMap with {} elements: {}", size,
-            map);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read ConcurrentHashMap with {} elements: {}",
+            size, map);
       }
 
       return map;
@@ -2546,8 +2553,9 @@ public abstract class DataSerializer {
       size = map.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Hashtable with {} elements: {}", size, map);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Hashtable with {} elements: {}", size,
+          map);
     }
     if (size > 0) {
       for (Map.Entry<?, ?> entry : map.entrySet()) {
@@ -2583,8 +2591,9 @@ public abstract class DataSerializer {
         map.put(key, value);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read Hashtable with {} elements: {}", size, map);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Hashtable with {} elements: {}", size,
+            map);
       }
 
       return map;
@@ -2616,8 +2625,8 @@ public abstract class DataSerializer {
       size = map.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing TreeMap with {} elements: {}", size, map);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing TreeMap with {} elements: {}", size, map);
     }
     if (size >= 0) {
       writeObject(map.comparator(), out);
@@ -2656,8 +2665,8 @@ public abstract class DataSerializer {
         map.put(key, value);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read TreeMap with {} elements: {}", size, map);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read TreeMap with {} elements: {}", size, map);
       }
 
       return map;
@@ -2687,8 +2696,9 @@ public abstract class DataSerializer {
       size = map.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing LinkedHashMap with {} elements: {}", size, map);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing LinkedHashMap with {} elements: {}", size,
+          map);
     }
     if (size > 0) {
       for (Map.Entry<?, ?> entry : map.entrySet()) {
@@ -2722,8 +2732,9 @@ public abstract class DataSerializer {
         map.put(key, value);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read LinkedHashMap with {} elements: {}", size, map);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read LinkedHashMap with {} elements: {}", size,
+            map);
       }
 
       return map;
@@ -2756,8 +2767,8 @@ public abstract class DataSerializer {
       size = set.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing TreeSet with {} elements: {}", size, set);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing TreeSet with {} elements: {}", size, set);
     }
     if (size >= 0) {
       writeObject(set.comparator(), out);
@@ -2793,8 +2804,8 @@ public abstract class DataSerializer {
         set.add(element);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read TreeSet with {} elements: {}", size, set);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read TreeSet with {} elements: {}", size, set);
       }
 
       return set;
@@ -2829,8 +2840,9 @@ public abstract class DataSerializer {
       size = s.size();
     }
     InternalDataSerializer.writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Properties with {} elements: {}", size, props);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Properties with {} elements: {}", size,
+          props);
     }
     if (size > 0) {
       for (Map.Entry<Object, Object> entry : s) {
@@ -2869,8 +2881,9 @@ public abstract class DataSerializer {
         Object value = readObject(in);
         props.put(key, value);
       }
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read Properties with {} elements: {}", size, props);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Properties with {} elements: {}", size,
+            props);
       }
       return props;
     }
@@ -3142,8 +3155,6 @@ public abstract class DataSerializer {
    * gets the enum constants for the given class. {@link Class#getEnumConstants()} uses reflection,
    * so we keep around the class to enumConstants mapping in the {@link #knownEnums} map
    *
-   * @param <E>
-   * @param clazz
    * @return enum constants for the given class
    */
   private static <E extends Enum> E[] getEnumConstantsForClass(Class<E> clazz) {
@@ -3175,8 +3186,8 @@ public abstract class DataSerializer {
           LocalizedStrings.DataSerializer_ENUM_TO_SERIALIZE_IS_NULL.toLocalizedString());
     }
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing enum {}", e);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing enum {}", e);
     }
     InternalDataSerializer.writeArrayLength(e.ordinal(), out);
   }
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java b/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java
index 84f6f75..67c5719 100755
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/internal/AdminDistributedSystemImpl.java
@@ -1089,7 +1089,7 @@ public class AdminDistributedSystemImpl implements org.apache.geode.admin.AdminD
             synchronized (this.cacheServerSet) {
               future = new AdminFutureTask(vm.getId(), new Callable() {
                 public Object call() throws Exception {
-                  logger.info(LogMarker.DM,
+                  logger.info(LogMarker.DM_MARKER,
                       LocalizedMessage.create(
                           LocalizedStrings.AdminDistributedSystemImpl_ADDING_NEW_CACHESERVER_FOR__0,
                           vm));
@@ -1104,7 +1104,7 @@ public class AdminDistributedSystemImpl implements org.apache.geode.admin.AdminD
             synchronized (this.applicationSet) {
               future = new AdminFutureTask(vm.getId(), new Callable() {
                 public Object call() throws Exception {
-                  logger.info(LogMarker.DM,
+                  logger.info(LogMarker.DM_MARKER,
                       LocalizedMessage.create(
                           LocalizedStrings.AdminDistributedSystemImpl_ADDING_NEW_APPLICATION_FOR__0,
                           vm));
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/DisabledManagedEntityController.java b/geode-core/src/main/java/org/apache/geode/admin/internal/DisabledManagedEntityController.java
index e4eb49f..f869821 100755
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/DisabledManagedEntityController.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/internal/DisabledManagedEntityController.java
@@ -40,8 +40,8 @@ class DisabledManagedEntityController implements ManagedEntityController {
 
   @Override
   public void start(InternalManagedEntity entity) {
-    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY)) {
-      logger.warn(LogMarker.MANAGED_ENTITY, "DisabledManagedEntityController#start {}",
+    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY_VERBOSE)) {
+      logger.trace(LogMarker.MANAGED_ENTITY_VERBOSE, "DisabledManagedEntityController#start {}",
           EXCEPTION_MESSAGE);
     }
     throw new UnsupportedOperationException(EXCEPTION_MESSAGE);
@@ -49,8 +49,8 @@ class DisabledManagedEntityController implements ManagedEntityController {
 
   @Override
   public void stop(InternalManagedEntity entity) {
-    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY)) {
-      logger.warn(LogMarker.MANAGED_ENTITY, "DisabledManagedEntityController#stop {}",
+    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY_VERBOSE)) {
+      logger.trace(LogMarker.MANAGED_ENTITY_VERBOSE, "DisabledManagedEntityController#stop {}",
           EXCEPTION_MESSAGE);
     }
     throw new UnsupportedOperationException(EXCEPTION_MESSAGE);
@@ -58,8 +58,8 @@ class DisabledManagedEntityController implements ManagedEntityController {
 
   @Override
   public boolean isRunning(InternalManagedEntity entity) {
-    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY)) {
-      logger.warn(LogMarker.MANAGED_ENTITY, "DisabledManagedEntityController#isRunning {}",
+    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY_VERBOSE)) {
+      logger.trace(LogMarker.MANAGED_ENTITY_VERBOSE, "DisabledManagedEntityController#isRunning {}",
           EXCEPTION_MESSAGE);
     }
     throw new UnsupportedOperationException(EXCEPTION_MESSAGE);
@@ -67,8 +67,8 @@ class DisabledManagedEntityController implements ManagedEntityController {
 
   @Override
   public String getLog(DistributionLocatorImpl locator) {
-    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY)) {
-      logger.warn(LogMarker.MANAGED_ENTITY, "DisabledManagedEntityController#getLog {}",
+    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY_VERBOSE)) {
+      logger.trace(LogMarker.MANAGED_ENTITY_VERBOSE, "DisabledManagedEntityController#getLog {}",
           EXCEPTION_MESSAGE);
     }
     throw new UnsupportedOperationException(EXCEPTION_MESSAGE);
@@ -76,17 +76,17 @@ class DisabledManagedEntityController implements ManagedEntityController {
 
   @Override
   public String buildSSLArguments(DistributedSystemConfig config) {
-    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY)) {
-      logger.warn(LogMarker.MANAGED_ENTITY, "DisabledManagedEntityController#buildSSLArguments {}",
-          EXCEPTION_MESSAGE);
+    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY_VERBOSE)) {
+      logger.trace(LogMarker.MANAGED_ENTITY_VERBOSE,
+          "DisabledManagedEntityController#buildSSLArguments {}", EXCEPTION_MESSAGE);
     }
     throw new UnsupportedOperationException(EXCEPTION_MESSAGE);
   }
 
   @Override
   public String getProductExecutable(InternalManagedEntity entity, String executable) {
-    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY)) {
-      logger.warn(LogMarker.MANAGED_ENTITY,
+    if (logger.isTraceEnabled(LogMarker.MANAGED_ENTITY_VERBOSE)) {
+      logger.trace(LogMarker.MANAGED_ENTITY_VERBOSE,
           "DisabledManagedEntityController#getProductExecutable {}", EXCEPTION_MESSAGE);
     }
     throw new UnsupportedOperationException(EXCEPTION_MESSAGE);
diff --git a/geode-core/src/main/java/org/apache/geode/admin/internal/ManagedEntityControllerFactory.java b/geode-core/src/main/java/org/apache/geode/admin/internal/ManagedEntityControllerFactory.java
index 005a300..6a9bb64 100755
--- a/geode-core/src/main/java/org/apache/geode/admin/internal/ManagedEntityControllerFactory.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/internal/ManagedEntityControllerFactory.java
@@ -37,11 +37,11 @@ public class ManagedEntityControllerFactory {
   static ManagedEntityController createManagedEntityController(
       final AdminDistributedSystem system) {
     if (isEnabledManagedEntityController()) {
-      logger.info(LogMarker.CONFIG,
+      logger.info(LogMarker.CONFIG_MARKER,
           "Local and remote OS command invocations are enabled for the Admin API.");
       return createEnabledManagedEntityController(system);
     } else {
-      logger.info(LogMarker.CONFIG,
+      logger.info(LogMarker.CONFIG_MARKER,
           "Local and remote OS command invocations are disabled for the Admin API.");
       return new DisabledManagedEntityController();
     }
diff --git a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentImpl.java b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentImpl.java
index b2f01e0..b64ca6b 100644
--- a/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/admin/jmx/internal/AgentImpl.java
@@ -894,10 +894,11 @@ public class AgentImpl implements org.apache.geode.admin.jmx.Agent,
     logger.info(Banner.getString(this.agentConfig.getOriginalArgs()));
 
     // LOG:CONFIG: changed next three statements from config to info
-    logger.info(LogMarker.CONFIG, LocalizedStrings.AgentImpl_AGENT_CONFIG_PROPERTY_FILE_NAME_0
-        .toLocalizedString(AgentConfigImpl.retrievePropertyFile()));
-    logger.info(LogMarker.CONFIG, this.agentConfig.getPropertyFileDescription());
-    logger.info(LogMarker.CONFIG, this.agentConfig.toPropertiesAsString());
+    logger.info(LogMarker.CONFIG_MARKER,
+        LocalizedStrings.AgentImpl_AGENT_CONFIG_PROPERTY_FILE_NAME_0
+            .toLocalizedString(AgentConfigImpl.retrievePropertyFile()));
+    logger.info(LogMarker.CONFIG_MARKER, this.agentConfig.getPropertyFileDescription());
+    logger.info(LogMarker.CONFIG_MARKER, this.agentConfig.toPropertiesAsString());
   }
 
   /**
@@ -1301,7 +1302,7 @@ public class AgentImpl implements org.apache.geode.admin.jmx.Agent,
       // validate and set host and port values...
       if (this.agentConfig.getHttpPort() > 0) {
         this.httpAdaptor.setPort(this.agentConfig.getHttpPort());
-        logger.info(LogMarker.CONFIG,
+        logger.info(LogMarker.CONFIG_MARKER,
             LocalizedMessage.create(LocalizedStrings.AgentImpl_HTTP_ADAPTOR_LISTENING_ON_PORT__0,
                 this.agentConfig.getHttpPort()));
       } else {
@@ -1311,7 +1312,7 @@ public class AgentImpl implements org.apache.geode.admin.jmx.Agent,
 
       if (this.agentConfig.getHttpBindAddress() != null) {
         String host = this.agentConfig.getHttpBindAddress();
-        logger.info(LogMarker.CONFIG, LocalizedMessage
+        logger.info(LogMarker.CONFIG_MARKER, LocalizedMessage
             .create(LocalizedStrings.AgentImpl_HTTP_ADAPTOR_LISTENING_ON_ADDRESS__0, host));
         this.httpAdaptor.setHost(host);
       } else {
diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/AbstractOp.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/AbstractOp.java
index 29b1c62..e0ea42c 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/AbstractOp.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/AbstractOp.java
@@ -81,10 +81,9 @@ public abstract class AbstractOp implements Op {
    */
   protected void attemptSend(Connection cnx) throws Exception {
     setMsgTransactionId();
-    if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_BRIDGE_SERVER)) {
-      if (logger.isDebugEnabled()) {
-        logger.debug("Sending op={} using {}", getShortClassName(), cnx);
-      }
+    if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_BRIDGE_SERVER_VERBOSE)) {
+      logger.trace(LogMarker.DISTRIBUTION_BRIDGE_SERVER_VERBOSE, "Sending op={} using {}",
+          getShortClassName(), cnx);
     }
     getMessage().setComms(cnx.getSocket(), cnx.getInputStream(), cnx.getOutputStream(),
         cnx.getCommBuffer(), cnx.getStats());
@@ -148,8 +147,8 @@ public abstract class AbstractOp implements Op {
     if (cnx.getServer().getRequiresCredentials()) {
       if (!message.isSecureMode()) {
         // This can be seen during shutdown
-        if (logger.isDebugEnabled()) {
-          logger.trace(LogMarker.BRIDGE_SERVER,
+        if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE)) {
+          logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE,
               "Response message from {} for {} has no secure part.", cnx, this);
         }
         return;
diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/OpExecutorImpl.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/OpExecutorImpl.java
index 1793c71..4116dde 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/OpExecutorImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/OpExecutorImpl.java
@@ -539,8 +539,8 @@ public class OpExecutorImpl implements ExecutablePool {
     QueueConnections connections = queueManager.getAllConnections();
 
     List backups = connections.getBackups();
-    if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
-      logger.trace(LogMarker.BRIDGE_SERVER, "sending {} to backups: {}", op, backups);
+    if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE)) {
+      logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE, "sending {} to backups: {}", op, backups);
     }
     for (int i = backups.size() - 1; i >= 0; i--) {
       Connection conn = (Connection) backups.get(i);
@@ -555,13 +555,13 @@ public class OpExecutorImpl implements ExecutablePool {
     HashSet attemptedPrimaries = new HashSet();
     while (true) {
       try {
-        if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
-          logger.trace(LogMarker.BRIDGE_SERVER, "sending {} to primary: {}", op, primary);
+        if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE)) {
+          logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE, "sending {} to primary: {}", op, primary);
         }
         return executeWithPossibleReAuthentication(primary, op);
       } catch (Exception e) {
-        if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
-          logger.trace(LogMarker.BRIDGE_SERVER, "caught exception sending to primary {}",
+        if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE)) {
+          logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE, "caught exception sending to primary {}",
               e.getMessage(), e);
         }
         boolean finalAttempt = !attemptedPrimaries.add(primary.getServer());
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java
index f190e72..14251b7 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java
@@ -619,10 +619,10 @@ public class ClusterDistributionManager implements DistributionManager {
         Object[] logArgs = new Object[] {distributionManager.getDistributionManagerId(), transport,
             Integer.valueOf(distributionManager.getOtherDistributionManagerIds().size()),
             distributionManager.getOtherDistributionManagerIds(),
-            (logger.isInfoEnabled(LogMarker.DM) ? " (VERBOSE, took " + delta + " ms)" : ""),
+            (logger.isInfoEnabled(LogMarker.DM_MARKER) ? " (VERBOSE, took " + delta + " ms)" : ""),
             ((distributionManager.getDMType() == ADMIN_ONLY_DM_TYPE) ? " (admin only)"
                 : (distributionManager.getDMType() == LOCATOR_DM_TYPE) ? " (locator)" : "")};
-        logger.info(LogMarker.DM,
+        logger.info(LogMarker.DM_MARKER,
             LocalizedMessage.create(
                 LocalizedStrings.DistributionManager_DISTRIBUTIONMANAGER_0_STARTED_ON_1_THERE_WERE_2_OTHER_DMS_3_4_5,
                 logArgs));
@@ -681,13 +681,15 @@ public class ClusterDistributionManager implements DistributionManager {
     try {
 
       if (MULTI_SERIAL_EXECUTORS) {
-        if (logger.isInfoEnabled(LogMarker.DM)) {
-          logger.info(LogMarker.DM, "Serial Queue info :" + " THROTTLE_PERCENT: " + THROTTLE_PERCENT
-              + " SERIAL_QUEUE_BYTE_LIMIT :" + SERIAL_QUEUE_BYTE_LIMIT + " SERIAL_QUEUE_THROTTLE :"
-              + SERIAL_QUEUE_THROTTLE + " TOTAL_SERIAL_QUEUE_BYTE_LIMIT :"
-              + TOTAL_SERIAL_QUEUE_BYTE_LIMIT + " TOTAL_SERIAL_QUEUE_THROTTLE :"
-              + TOTAL_SERIAL_QUEUE_THROTTLE + " SERIAL_QUEUE_SIZE_LIMIT :" + SERIAL_QUEUE_SIZE_LIMIT
-              + " SERIAL_QUEUE_SIZE_THROTTLE :" + SERIAL_QUEUE_SIZE_THROTTLE);
+        if (logger.isInfoEnabled(LogMarker.DM_MARKER)) {
+          logger.info(LogMarker.DM_MARKER,
+              "Serial Queue info :" + " THROTTLE_PERCENT: " + THROTTLE_PERCENT
+                  + " SERIAL_QUEUE_BYTE_LIMIT :" + SERIAL_QUEUE_BYTE_LIMIT
+                  + " SERIAL_QUEUE_THROTTLE :" + SERIAL_QUEUE_THROTTLE
+                  + " TOTAL_SERIAL_QUEUE_BYTE_LIMIT :" + TOTAL_SERIAL_QUEUE_BYTE_LIMIT
+                  + " TOTAL_SERIAL_QUEUE_THROTTLE :" + TOTAL_SERIAL_QUEUE_THROTTLE
+                  + " SERIAL_QUEUE_SIZE_LIMIT :" + SERIAL_QUEUE_SIZE_LIMIT
+                  + " SERIAL_QUEUE_SIZE_THROTTLE :" + SERIAL_QUEUE_SIZE_THROTTLE);
         }
         // when TCP/IP is disabled we can't throttle the serial queue or we run the risk of
         // distributed deadlock when we block the UDP reader thread
@@ -1034,8 +1036,9 @@ public class ClusterDistributionManager implements DistributionManager {
       sb.append(" ms)");
 
       logger.info(LocalizedMessage.create(
-          LocalizedStrings.DistributionManager_STARTING_DISTRIBUTIONMANAGER_0_1, new Object[] {
-              this.localAddress, (logger.isInfoEnabled(LogMarker.DM) ? sb.toString() : "")}));
+          LocalizedStrings.DistributionManager_STARTING_DISTRIBUTIONMANAGER_0_1,
+          new Object[] {this.localAddress,
+              (logger.isInfoEnabled(LogMarker.DM_MARKER) ? sb.toString() : "")}));
 
       this.description = "Distribution manager on " + this.localAddress + " started at "
           + (new Date(System.currentTimeMillis())).toString();
@@ -3956,8 +3959,8 @@ public class ClusterDistributionManager implements DistributionManager {
 
         // If not used mark this as unused.
         if (!isUsed) {
-          if (logger.isInfoEnabled(LogMarker.DM))
-            logger.info(LogMarker.DM,
+          if (logger.isInfoEnabled(LogMarker.DM_MARKER))
+            logger.info(LogMarker.DM_MARKER,
                 LocalizedMessage.create(
                     LocalizedStrings.DistributionManager_MARKING_THE_SERIALQUEUEDEXECUTOR_WITH_ID__0__USED_BY_THE_MEMBER__1__TO_BE_UNUSED,
                     new Object[] {queueId, member}));
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionAdvisor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionAdvisor.java
index 955342c..1eb7f91 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionAdvisor.java
@@ -534,8 +534,9 @@ public class DistributionAdvisor {
     try {
       return doPutProfile(newProfile, forceProfile);
     } finally {
-      if (logger.isTraceEnabled(LogMarker.DA)) {
-        logger.trace(LogMarker.DA, "putProfile exiting {}", toStringWithProfiles());
+      if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE)) {
+        logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE, "putProfile exiting {}",
+            toStringWithProfiles());
       }
     }
   }
@@ -568,8 +569,9 @@ public class DistributionAdvisor {
     if (!forceProfile) {
       // ensure member is in distributed system view
       if (!isCurrentMember(newProfile)) {
-        if (logger.isTraceEnabled(LogMarker.DA)) {
-          logger.trace(LogMarker.DA, "putProfile: ignoring {}; not in current view for {}",
+        if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE)) {
+          logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
+              "putProfile: ignoring {}; not in current view for {}",
               newProfile.getDistributedMember(), getAdvisee().getFullPath());
         }
 
@@ -583,8 +585,8 @@ public class DistributionAdvisor {
     if (removedSerialNumber != null
         && !isNewerSerialNumber(newProfile.getSerialNumber(), removedSerialNumber.intValue())) {
       // removedProfile exists and newProfile is NOT newer so do nothing
-      if (logger.isTraceEnabled(LogMarker.DA)) {
-        logger.trace(LogMarker.DA,
+      if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE)) {
+        logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
             "putProfile: Skipping putProfile: {} is not newer than serialNumber {} for {}",
             newProfile, removedSerialNumber, getAdvisee().getFullPath());
       }
@@ -593,16 +595,17 @@ public class DistributionAdvisor {
 
     // compare newProfile to oldProfile if one is found
     Profile oldProfile = getProfile(newProfile.getId());
-    final boolean isDebugEnabled_DA = logger.isTraceEnabled(LogMarker.DA);
-    if (isDebugEnabled_DA) {
-      logger.trace(LogMarker.DA,
+    final boolean isTraceEnabled_DistributionAdvisor =
+        logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE);
+    if (isTraceEnabled_DistributionAdvisor) {
+      logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
           "putProfile: Updating existing profile: {} with new profile: {} for {}", oldProfile,
           newProfile, getAdvisee().getFullPath());
     }
     if (oldProfile != null && !isNewerProfile(newProfile, oldProfile)) {
       // oldProfile exists and newProfile is NOT newer so do nothing
-      if (isDebugEnabled_DA) {
-        logger.trace(LogMarker.DA,
+      if (isTraceEnabled_DistributionAdvisor) {
+        logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
             "putProfile: Ignoring {} because it's older than or same as {} for {}", newProfile,
             oldProfile, getAdvisee().getFullPath());
       }
@@ -616,9 +619,9 @@ public class DistributionAdvisor {
       } else {
         if (!membershipClosed) {
           membershipVersion++;
-          if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-            logger.trace(LogMarker.STATE_FLUSH_OP, "StateFlush incremented membership version: {}",
-                membershipVersion);
+          if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+            logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE,
+                "StateFlush incremented membership version: {}", membershipVersion);
           }
           newProfile.initialMembershipVersion = membershipVersion;
           synchronized (this.opCountLock) {
@@ -631,8 +634,9 @@ public class DistributionAdvisor {
       forceNewMembershipVersion();
     }
 
-    if (isDebugEnabled_DA) {
-      logger.trace(LogMarker.DA, "DistributionAdvisor ({}) putProfile: {}", this, newProfile);
+    if (isTraceEnabled_DistributionAdvisor) {
+      logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
+          "DistributionAdvisor ({}) putProfile: {}", this, newProfile);
     }
     boolean doAddOrUpdate = evaluateProfiles(newProfile, oldProfile);
     if (!doAddOrUpdate) {
@@ -652,8 +656,6 @@ public class DistributionAdvisor {
   /**
    * A callback to sub-classes for extra validation logic
    *
-   * @param oldProfile
-   * @param newProfile
    * @return true if the change from old to new is valid
    */
   protected boolean evaluateProfiles(Profile newProfile, Profile oldProfile) {
@@ -724,15 +726,15 @@ public class DistributionAdvisor {
   public synchronized void forceNewMembershipVersion() {
     if (!membershipClosed) {
       membershipVersion++;
-      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-        logger.trace(LogMarker.STATE_FLUSH_OP, "StateFlush forced new membership version: {}",
-            membershipVersion);
+      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+        logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE,
+            "StateFlush forced new membership version: {}", membershipVersion);
       }
       synchronized (this.opCountLock) {
         previousVersionOpCount += currentVersionOpCount;
         currentVersionOpCount = 0;
-        if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_STATE_FLUSH_OP)) {
-          logger.trace(LogMarker.DISTRIBUTION_STATE_FLUSH_OP,
+        if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_STATE_FLUSH_VERBOSE)) {
+          logger.trace(LogMarker.DISTRIBUTION_STATE_FLUSH_VERBOSE,
               "advisor for {} forced new membership version to {} previousOpCount={}", getAdvisee(),
               membershipVersion, previousVersionOpCount);
         }
@@ -749,15 +751,15 @@ public class DistributionAdvisor {
    * @since GemFire 5.1
    */
   public synchronized long startOperation() {
-    if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_STATE_FLUSH_OP)) {
-      logger.trace(LogMarker.DISTRIBUTION_STATE_FLUSH_OP,
+    if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_STATE_FLUSH_VERBOSE)) {
+      logger.trace(LogMarker.DISTRIBUTION_STATE_FLUSH_VERBOSE,
           "startOperation() op count is now {} in view version {}", currentVersionOpCount + 1,
           membershipVersion);
     }
     synchronized (this.opCountLock) {
       currentVersionOpCount++;
-      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-        logger.trace(LogMarker.STATE_FLUSH_OP, "StateFlush current opcount incremented: {}",
+      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+        logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "StateFlush current opcount incremented: {}",
             currentVersionOpCount);
       }
     }
@@ -775,15 +777,15 @@ public class DistributionAdvisor {
     synchronized (this.opCountLock) {
       if (version == membershipVersion) {
         currentVersionOpCount--;
-        if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-          logger.trace(LogMarker.STATE_FLUSH_OP, "StateFlush current opcount deccremented: {}",
-              currentVersionOpCount);
+        if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+          logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE,
+              "StateFlush current opcount deccremented: {}", currentVersionOpCount);
         }
       } else {
         previousVersionOpCount--;
-        if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-          logger.trace(LogMarker.STATE_FLUSH_OP, "StateFlush previous opcount incremented: {}",
-              previousVersionOpCount);
+        if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+          logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE,
+              "StateFlush previous opcount incremented: {}", previousVersionOpCount);
         }
       }
     }
@@ -812,7 +814,8 @@ public class DistributionAdvisor {
     long warnTime = startTime + timeout;
     long quitTime = warnTime + timeout - 1000L;
     boolean warned = false;
-    final boolean isDebugEnabled_STATE_FLUSH_OP = logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP);
+    final boolean isDebugEnabled_STATE_FLUSH_OP =
+        logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE);
     while (true) {
       long opCount;
       synchronized (this.opCountLock) {
@@ -825,8 +828,8 @@ public class DistributionAdvisor {
       // must not terminate due to cache closure until that happens.
       // See bug 34361 comment 79
       if (isDebugEnabled_STATE_FLUSH_OP) {
-        logger.trace(LogMarker.STATE_FLUSH_OP, "Waiting for current operations to finish({})",
-            opCount);
+        logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE,
+            "Waiting for current operations to finish({})", opCount);
       }
       try {
         Thread.sleep(50);
@@ -847,7 +850,7 @@ public class DistributionAdvisor {
     }
     if (this.membershipClosed) {
       if (isDebugEnabled_STATE_FLUSH_OP) {
-        logger.trace(LogMarker.STATE_FLUSH_OP,
+        logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE,
             "State Flush stopped waiting for operations to distribute because advisor has been closed");
       }
     }
@@ -884,21 +887,23 @@ public class DistributionAdvisor {
    * @return true if it was being tracked
    */
   private boolean basicRemoveId(ProfileId memberId, boolean crashed, boolean destroyed) {
-    final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.DA);
+    final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE);
     if (isDebugEnabled) {
-      logger.trace(LogMarker.DA, "DistributionAdvisor ({}) removeId {}", this, memberId);
+      logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE, "DistributionAdvisor ({}) removeId {}",
+          this, memberId);
     }
 
     Profile profileRemoved = basicRemoveMemberId(memberId);
     if (profileRemoved == null) {
       if (isDebugEnabled) {
-        logger.trace(LogMarker.DA, "DistributionAdvisor.removeId: no profile to remove for {}",
-            memberId);
+        logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
+            "DistributionAdvisor.removeId: no profile to remove for {}", memberId);
       }
       return false;
     }
     if (isDebugEnabled) {
-      logger.trace(LogMarker.DA, "DistributionAdvisor.removeId: removed profile for {}", memberId);
+      logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
+          "DistributionAdvisor.removeId: removed profile for {}", memberId);
     }
     profileRemoved(profileRemoved);
     notifyListenersProfileRemoved(profileRemoved, destroyed);
@@ -933,8 +938,9 @@ public class DistributionAdvisor {
     try {
       result = doRemoveId(memberId, crashed, destroyed, fromMembershipListener);
     } finally {
-      if (logger.isTraceEnabled(LogMarker.DA)) {
-        logger.trace(LogMarker.DA, "removeId {} exiting {}", memberId, toStringWithProfiles());
+      if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE)) {
+        logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE, "removeId {} exiting {}", memberId,
+            toStringWithProfiles());
       }
     }
     return result;
@@ -942,10 +948,10 @@ public class DistributionAdvisor {
 
   private boolean doRemoveId(ProfileId memberId, boolean crashed, boolean destroyed,
       boolean fromMembershipListener) {
-    final boolean isDebugEnabled_DA = logger.isTraceEnabled(LogMarker.DA);
+    final boolean isDebugEnabled_DA = logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE);
     if (isDebugEnabled_DA) {
-      logger.trace(LogMarker.DA, "removeId: removing member {} from resource {}", memberId,
-          getAdvisee().getFullPath());
+      logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
+          "removeId: removing member {} from resource {}", memberId, getAdvisee().getFullPath());
     }
     synchronized (this) {
       // If the member has disappeared, completely remove
@@ -958,7 +964,8 @@ public class DistributionAdvisor {
         while (profileToRemove != null) {
           result = true;
           if (isDebugEnabled_DA) {
-            logger.trace(LogMarker.DA, "removeId: tracking removal of {}", profileToRemove);
+            logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE, "removeId: tracking removal of {}",
+                profileToRemove);
           }
           this.removedProfiles.put(profileToRemove.getDistributedMember(),
               Integer.valueOf(profileToRemove.getSerialNumber()));
@@ -987,8 +994,8 @@ public class DistributionAdvisor {
    */
   public boolean removeIdWithSerial(InternalDistributedMember memberId, int serialNum,
       boolean regionDestroyed) {
-    if (logger.isTraceEnabled(LogMarker.DA)) {
-      logger.trace(LogMarker.DA,
+    if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE)) {
+      logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
           "removeIdWithSerial: removing member {} with serial {} from resource {}", memberId,
           serialNum, getAdvisee().getName());
     }
@@ -1006,9 +1013,9 @@ public class DistributionAdvisor {
    */
   private synchronized boolean updateRemovedProfiles(InternalDistributedMember memberId,
       int serialNum, boolean regionDestroyed) {
-    final boolean isDebugEnabled_DA = logger.isTraceEnabled(LogMarker.DA);
+    final boolean isDebugEnabled_DA = logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE);
     if (isDebugEnabled_DA) {
-      logger.trace(LogMarker.DA,
+      logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
           "updateRemovedProfiles: ensure member {} with serial {} is removed from region {}",
           memberId, serialNum, getAdvisee().getFullPath());
     }
@@ -1022,7 +1029,7 @@ public class DistributionAdvisor {
       if (profileToRemove != null) {
         if (isNewerSerialNumber(profileToRemove.serialNumber, serialNum)) {
           if (isDebugEnabled_DA) {
-            logger.trace(LogMarker.DA,
+            logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
                 "updateRemovedProfiles: member {} has profile {} which is newer than serial {}",
                 memberId, profileToRemove, serialNum);
           }
@@ -1042,7 +1049,7 @@ public class DistributionAdvisor {
         Integer oldSerial = (Integer) this.removedProfiles.get(memberId);
         if (oldSerial != null && isNewerSerialNumber(oldSerial.intValue(), serialNum)) {
           if (isDebugEnabled_DA) {
-            logger.trace(LogMarker.DA,
+            logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
                 "updateRemovedProfiles: member {} sent removal of serial {} but we hae already removed {}",
                 memberId, serialNum, oldSerial);
           }
@@ -1052,7 +1059,7 @@ public class DistributionAdvisor {
 
       if (isNews) {
         if (isDebugEnabled_DA) {
-          logger.trace(LogMarker.DA,
+          logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
               "updateRemovedProfiles: adding serial {} for member {} to removedProfiles", serialNum,
               memberId);
         }
@@ -1069,7 +1076,8 @@ public class DistributionAdvisor {
     else {
       // If the member has disappeared, completely remove (garbage collect)
       if (isDebugEnabled_DA) {
-        logger.trace(LogMarker.DA, "updateRemovedProfiles: garbage collecting member {}", memberId);
+        logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
+            "updateRemovedProfiles: garbage collecting member {}", memberId);
       }
       this.removedProfiles.remove(memberId);
 
@@ -1078,7 +1086,8 @@ public class DistributionAdvisor {
     }
 
     if (isDebugEnabled_DA) {
-      logger.trace(LogMarker.DA, "updateRemovedProfiles: removedId = {}", removedId);
+      logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE, "updateRemovedProfiles: removedId = {}",
+          removedId);
     }
 
     return removedId;
@@ -1094,18 +1103,6 @@ public class DistributionAdvisor {
     return (indexOfMemberId(memberId) > -1);
   }
 
-  // /**
-  // * get the profile for a specific member
-  // * @since GemFire 5.1
-  // * @return the Profile or null
-  // */
-  // public synchronized Profile getProfile(InternalDistributedMember memberId) {
-  // int index = indexOfMemberId(memberId);
-  // if (index >= 0) {
-  // return profiles[index];
-  // }
-  // return null;
-  // }
 
   public synchronized int getNumProfiles() {
     return this.numActiveProfiles;
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java
index 0c47b88..23b2c5e 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java
@@ -335,10 +335,6 @@ public abstract class DistributionMessage implements DataSerializableFixedID, Cl
     return dm.getExecutor(getProcessorType(), sender);
   }
 
-  // private Executor getExecutor(DistributionManager dm, Class clazz) {
-  // return dm.getExecutor(getProcessorType());
-  // }
-
   public abstract int getProcessorType();
 
   /**
@@ -352,8 +348,8 @@ public abstract class DistributionMessage implements DataSerializableFixedID, Cl
    * Scheduled action to take when on this message when we are ready to process it.
    */
   protected void scheduleAction(final ClusterDistributionManager dm) {
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.trace(LogMarker.DM, "Processing '{}'", this);
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "Processing '{}'", this);
     }
     String reason = dm.getCancelCriterion().cancelInProgress();
     if (reason != null) {
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/StartupResponseMessage.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/StartupResponseMessage.java
index 06b835b..bff2d5e 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/StartupResponseMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/StartupResponseMessage.java
@@ -182,8 +182,8 @@ public class StartupResponseMessage extends HighPriorityDistributionMessage
           proc.setReceivedAcceptance(true);
         }
         proc.process(this);
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "{} Processed {}", proc, this);
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "{} Processed {}", proc, this);
         }
       }
     } // proc != null
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/direct/DirectChannel.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/direct/DirectChannel.java
index af1f488..a149bf3 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/direct/DirectChannel.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/direct/DirectChannel.java
@@ -545,8 +545,8 @@ public class DirectChannel {
       if (!mgr.memberExists(destination) || mgr.shutdownInProgress()
           || mgr.isShunned(destination)) {
         // This should only happen if the member is no longer in the view.
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "Not a member: {}", destination);
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "Not a member: {}", destination);
         }
         if (ce == null)
           ce = new ConnectExceptions();
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockGrantor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockGrantor.java
index 34f2e6b..97f9068 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockGrantor.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockGrantor.java
@@ -15,7 +15,16 @@
 
 package org.apache.geode.distributed.internal.locks;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -468,15 +477,15 @@ public class DLockGrantor {
     handler.waitForInProcessDepartures();
 
     synchronized (this.batchLocks) { // assures serial processing
-      waitWhileInitializing(); // calcWaitMillisFromNow
+      waitWhileInitializing();
       if (request.checkForTimeout()) {
         cleanupSuspendState(request);
         return;
       }
 
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
-      if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "[DLockGrantor.handleLockBatch]");
+      final boolean isTraceEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
+      if (isTraceEnabled_DLS) {
+        logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.handleLockBatch]");
       }
       if (!acquireDestroyReadLock(0)) {
         waitUntilDestroyed();
@@ -484,60 +493,19 @@ public class DLockGrantor {
       }
       try {
         checkDestroyed();
-        if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "[DLockGrantor.handleLockBatch] request: {}", request);
+        if (isTraceEnabled_DLS) {
+          logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.handleLockBatch] request: {}",
+              request);
         }
 
         DLockBatch batch = (DLockBatch) request.getObjectName();
         this.resMgr.makeReservation((IdentityArrayList) batch.getReqs());
-        if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "[DLockGrantor.handleLockBatch] granting {}",
+        if (isTraceEnabled_DLS) {
+          logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.handleLockBatch] granting {}",
               batch.getBatchId());
         }
         this.batchLocks.put(batch.getBatchId(), batch);
         request.respondWithGrant(Long.MAX_VALUE);
-        // // try-lock every lock in batch...
-        // Object name = null;
-        // Set lockNames = batch.getLockNames();
-        // Set acquiredLocks = new HashSet();
-        // long leaseExpireTime = -1;
-
-        // for (Iterator iter = lockNames.iterator(); iter.hasNext();) {
-        // name = iter.next();
-        // DLockGrantToken grant = getOrCreateGrant(
-        // this.dlock.getOrCreateToken(name));
-
-        // // calc lease expire time just once...
-        // if (leaseExpireTime == -1) {
-        // leaseExpireTime = grant.calcLeaseExpireTime(request.getLeaseTime());
-        // }
-
-        // // try to grant immediately else fail...
-        // if (grant.grantBatchLock(request.getSender(), leaseExpireTime)) {
-        // acquiredLocks.add(grant);
-        // } else {
-        // // fail out and release all..
-        // break;
-        // }
-        // } // for-loop
-
-        // if (acquiredLocks.size() == lockNames.size()) {
-        // // got the locks!
-        // logFine("[DLockGrantor.handleLockBatch] granting " +
-        // batch.getBatchId() + "; leaseExpireTime=" + leaseExpireTime);
-
-        // // save the batch for later release...
-        // this.batchLocks.put(batch.getBatchId(), batch);
-        // request.respondWithGrant(leaseExpireTime);
-        // }
-        // else {
-        // // failed... release them all...
-        // for (Iterator iter = acquiredLocks.iterator(); iter.hasNext();) {
-        // DLockGrantToken grant = (DLockGrantToken) iter.next();
-        // grant.release();
-        // }
-        // request.respondWithTryLockFailed(name);
-        // }
       } catch (CommitConflictException ex) {
         request.respondWithTryLockFailed(ex.getMessage());
       } finally {
@@ -583,9 +551,9 @@ public class DLockGrantor {
    */
   public DLockBatch getLockBatch(Object batchId) throws InterruptedException {
     DLockBatch ret = null;
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
-    if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS, "[DLockGrantor.getLockBatch] enter: {}", batchId);
+    final boolean isTraceEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
+    if (isTraceEnabled_DLS) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.getLockBatch] enter: {}", batchId);
     }
     synchronized (this.batchLocks) {
       waitWhileInitializing();
@@ -600,8 +568,8 @@ public class DLockGrantor {
         releaseDestroyReadLock();
       }
     }
-    if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS, "[DLockGrantor.getLockBatch] exit: {}", batchId);
+    if (isTraceEnabled_DLS) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.getLockBatch] exit: {}", batchId);
     }
     return ret;
   }
@@ -620,9 +588,9 @@ public class DLockGrantor {
    * @see org.apache.geode.internal.cache.locks.TXLockBatch#getBatchId()
    */
   public void updateLockBatch(Object batchId, DLockBatch newBatch) throws InterruptedException {
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
-    if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS, "[DLockGrantor.updateLockBatch] enter: {}", batchId);
+    final boolean isTraceEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
+    if (isTraceEnabled_DLS) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.updateLockBatch] enter: {}", batchId);
     }
     synchronized (this.batchLocks) {
       waitWhileInitializing();
@@ -640,8 +608,8 @@ public class DLockGrantor {
         releaseDestroyReadLock();
       }
     }
-    if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS, "[DLockGrantor.updateLockBatch] exit: {}", batchId);
+    if (isTraceEnabled_DLS) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.updateLockBatch] exit: {}", batchId);
     }
   }
 
@@ -656,8 +624,8 @@ public class DLockGrantor {
    */
   public void releaseLockBatch(Object batchId, InternalDistributedMember owner)
       throws InterruptedException {
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "[DLockGrantor.releaseLockBatch]");
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.releaseLockBatch]");
     }
     synchronized (this.batchLocks) {
       waitWhileInitializing();
@@ -671,13 +639,6 @@ public class DLockGrantor {
         if (batch != null) {
           this.resMgr.releaseReservation((IdentityArrayList) batch.getReqs());
         }
-        // Set lockNames = batch.getLockNames();
-        // for (Iterator iter = lockNames.iterator(); iter.hasNext();) {
-        // Object name = iter.next();
-        // DLockGrantToken grant = getOrCreateGrant(
-        // this.dlock.getOrCreateToken(name));
-        // grant.releaseIfLockedBy(owner);
-        // }
       } finally {
         releaseDestroyReadLock();
       }
@@ -720,8 +681,8 @@ public class DLockGrantor {
    * @return DLockGrantToken for the lock or null
    */
   DLockGrantToken handleLockQuery(DLockQueryMessage query) throws InterruptedException {
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "[DLockGrantor.handleLockQuery] {}", query);
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.handleLockQuery] {}", query);
     }
     if (acquireDestroyReadLock(0)) {
       try {
@@ -752,22 +713,23 @@ public class DLockGrantor {
 
     waitWhileInitializing(); // calcWaitMillisFromNow
 
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
-    if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS, "[DLockGrantor.handleLockRequest] {}", request);
+    final boolean isTraceEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
+    if (isTraceEnabled_DLS) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.handleLockRequest] {}", request);
     }
 
     if (!acquireDestroyReadLock(0)) {
       if (isLocalRequest(request) && this.dlock.isDestroyed()) {
-        if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "[DLockGrantor.handleLockRequest] about to throwIfDestroyed");
+        if (isTraceEnabled_DLS) {
+          logger.trace(LogMarker.DLS_VERBOSE,
+              "[DLockGrantor.handleLockRequest] about to throwIfDestroyed");
         }
         // this special case is one fix for deadlock between waitUntilDestroyed
         // and dlock waitForGrantorCallsInProgress (when request is local)
         throwIfDestroyed(true);
       } else {
-        if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS,
+        if (isTraceEnabled_DLS) {
+          logger.trace(LogMarker.DLS_VERBOSE,
               "[DLockGrantor.handleLockRequest] about to waitUntilDestroyed");
         }
         // is there still a deadlock when an explicit become is destroying
@@ -805,8 +767,8 @@ public class DLockGrantor {
    *        {@link #acquireDestroyReadLock(long)}
    */
   private void handlePermittedLockRequest(final DLockRequestMessage request) {
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "[DLockGrantor.handlePermittedLockRequest] {}", request);
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.handlePermittedLockRequest] {}", request);
     }
     Assert.assertTrue(request.getRemoteThread() != null);
     DLockGrantToken grant = getOrCreateGrant(request.getObjectName());
@@ -866,7 +828,7 @@ public class DLockGrantor {
       synchronized (this.grantTokens) {
         Set members = this.dlock.getDistributionManager().getDistributionManagerIds();
 
-        final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+        final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
         for (Iterator iter = tokens.iterator(); iter.hasNext();) {
           DLockRemoteToken token = (DLockRemoteToken) iter.next();
           DLockGrantToken grantToken = getOrCreateGrant(token.getName());
@@ -876,7 +838,7 @@ public class DLockGrantor {
             if (!members.contains(owner)) {
               // skipping because member is no longer in view
               if (isDebugEnabled_DLS) {
-                logger.trace(LogMarker.DLS,
+                logger.trace(LogMarker.DLS_VERBOSE,
                     "Initialization of held locks is skipping {} because owner {} is not in view: ",
                     token, owner, members);
               }
@@ -889,7 +851,7 @@ public class DLockGrantor {
 
             synchronized (grantToken) {
               if (grantToken.isLeaseHeld()) {
-                logger.error(LogMarker.DLS,
+                logger.error(LogMarker.DLS_MARKER,
                     LocalizedMessage.create(
                         LocalizedStrings.DLockGrantor_INITIALIZATION_OF_HELD_LOCKS_IS_SKIPPING_0_BECAUSE_LOCK_IS_ALREADY_HELD_1,
                         new Object[] {token, grantToken}));
@@ -960,9 +922,9 @@ public class DLockGrantor {
       // to fix GEODE-678 no longer call request.checkForTimeout
       DLockGrantToken grant = getGrantToken(request.getObjectName());
       if (grant == null) {
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
-          logger.trace(LogMarker.DLS, "[DLockGrantor.reenterLock] no grantToken found for {}",
-              request.getObjectName());
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE,
+              "[DLockGrantor.reenterLock] no grantToken found for {}", request.getObjectName());
         }
         return 0;
       }
@@ -972,8 +934,8 @@ public class DLockGrantor {
           return 0;
         }
         if (!grant.isLockedBy(request.getSender(), request.getLockId())) {
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS,
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "[DLockGrantor.reenterLock] grant is not locked by sender={} lockId={} grant={}",
                 request.getSender(), request.getLockId(), grant);
           }
@@ -1089,12 +1051,12 @@ public class DLockGrantor {
     // bug 32657 has another cause in this method... interrupted thread from
     // connection/channel layer caused acquireDestroyReadLock to fail...
     // fixed by Darrel in org.apache.geode.internal.tcp.Connection
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
     if (acquireDestroyReadLock(0)) {
       try {
         if (isDestroyed()) {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS,
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "[DLockGrantor.handleDepartureOf] grantor is destroyed; ignoring {}", owner);
           }
           return;
@@ -1102,14 +1064,15 @@ public class DLockGrantor {
         try {
           DLockLessorDepartureHandler handler = this.dlock.getDLockLessorDepartureHandler();
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "[DLockGrantor.handleDepartureOf] handler = {}", handler);
+            logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.handleDepartureOf] handler = {}",
+                handler);
           }
           if (handler != null) {
             handler.handleDepartureOf(owner, this);
           }
         } catch (CancelException e) {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS,
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "[DlockGrantor.handleDepartureOf] ignored cancellation (1)");
           }
         } finally {
@@ -1127,7 +1090,7 @@ public class DLockGrantor {
                 postReleaseLock((RemoteThread) it.next(), null);
               } catch (CancelException e) {
                 if (isDebugEnabled_DLS) {
-                  logger.trace(LogMarker.DLS,
+                  logger.trace(LogMarker.DLS_VERBOSE,
                       "[DlockGrantor.handleDepartureOf] ignored cancellation (2)");
                 }
               }
@@ -1146,7 +1109,7 @@ public class DLockGrantor {
                 grant.checkDepartureOf(owner, grantsReferencingMember);
               } catch (CancelException e) {
                 if (isDebugEnabled_DLS) {
-                  logger.trace(LogMarker.DLS,
+                  logger.trace(LogMarker.DLS_VERBOSE,
                       "[DlockGrantor.handleDepartureOf] ignored cancellation (3)");
                 }
               }
@@ -1160,7 +1123,7 @@ public class DLockGrantor {
                 grant.handleDepartureOf(owner, grantsToRemoveIfUnused);
               } catch (CancelException e) {
                 if (isDebugEnabled_DLS) {
-                  logger.trace(LogMarker.DLS,
+                  logger.trace(LogMarker.DLS_VERBOSE,
                       "[DlockGrantor.handleDepartureOf] ignored cancellation (4)");
                 }
               }
@@ -1174,7 +1137,7 @@ public class DLockGrantor {
                 removeGrantIfUnused(grant);
               } catch (CancelException e) {
                 if (isDebugEnabled_DLS) {
-                  logger.trace(LogMarker.DLS,
+                  logger.trace(LogMarker.DLS_VERBOSE,
                       "[DlockGrantor.handleDepartureOf] ignored cancellation (5)");
                 }
               }
@@ -1196,9 +1159,9 @@ public class DLockGrantor {
     synchronized (this) {
       if (isDestroyed())
         return;
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "[simpleDestroy]");
+        logger.trace(LogMarker.DLS_VERBOSE, "[simpleDestroy]");
       }
       // wait for the destroy write lock ignoring interrupts...
       boolean acquired = false;
@@ -1226,7 +1189,7 @@ public class DLockGrantor {
             }
           }
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "[simpleDestroy] {} locks held",
+            logger.trace(LogMarker.DLS_VERBOSE, "[simpleDestroy] {} locks held",
                 (locksHeld ? "with" : "without"));
           }
         } finally {
@@ -1264,40 +1227,21 @@ public class DLockGrantor {
       Collection grants = this.grantTokens.values();
       for (Iterator iter = grants.iterator(); iter.hasNext();) {
         DLockGrantToken grant = (DLockGrantToken) iter.next();
-        try {
-          grant.handleGrantorDestruction();
-        }
-        // catch (VirtualMachineError err) {
-        // SystemFailure.initiateFailure(err);
-        // // If this ever returns, rethrow the error. We're poisoned
-        // // now, so don't let this thread continue.
-        // throw err;
-        // }
-        // catch (Throwable t) {
-        // // Whenever you catch Error or Throwable, you must also
-        // // catch VirtualMachineError (see above). However, there is
-        // // _still_ a possibility that you are dealing with a cascading
-        // // error condition, so you also need to check to see if the JVM
-        // // is still usable:
-        // SystemFailure.checkFailure();
-        // }
-        finally {
-
-        }
+        grant.handleGrantorDestruction();
       }
     }
 
     synchronized (suspendLock) {
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS,
+        logger.trace(LogMarker.DLS_VERBOSE,
             "[DLockGrantor.destroyAndRemove] responding to {} permitted requests.",
             permittedRequests.size());
       }
       respondWithNotGrantor(permittedRequests.iterator());
 
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS,
+        logger.trace(LogMarker.DLS_VERBOSE,
             "[DLockGrantor.destroyAndRemove] responding to {} requests awaiting permission.",
             suspendQueue.size());
       }
@@ -1306,7 +1250,7 @@ public class DLockGrantor {
       for (Iterator iter = permittedRequestsDrain.iterator(); iter.hasNext();) {
         final List drain = (List) iter.next();
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS,
+          logger.trace(LogMarker.DLS_VERBOSE,
               "[DLockGrantor.destroyAndRemove] responding to {} drained permitted requests.",
               drain.size());
         }
@@ -1325,40 +1269,11 @@ public class DLockGrantor {
   private void respondWithNotGrantor(Iterator requests) {
     while (requests.hasNext()) {
       final DLockRequestMessage request = (DLockRequestMessage) requests.next();
-      try {
-        request.respondWithNotGrantor();
-      }
-      // catch (VirtualMachineError err) {
-      // SystemFailure.initiateFailure(err);
-      // // If this ever returns, rethrow the error. We're poisoned
-      // // now, so don't let this thread continue.
-      // throw err;
-      // }
-      // catch (Throwable t) {
-      // // Whenever you catch Error or Throwable, you must also
-      // // catch VirtualMachineError (see above). However, there is
-      // // _still_ a possibility that you are dealing with a cascading
-      // // error condition, so you also need to check to see if the JVM
-      // // is still usable:
-      // SystemFailure.checkFailure();
-      // }
-      finally {
-
-      }
+      request.respondWithNotGrantor();
     }
   }
 
   /**
-   * TEST HOOK: Log additional debugging info about this grantor.
-   */
-  void debug() {
-    logger.info(LogMarker.DLS,
-        LocalizedMessage.create(LocalizedStrings.TESTING,
-            "[DLockGrantor.debug] svc=" + this.dlock.getName() + "; state=" + this.state
-                + "; initLatch.ct=" + this.whileInitializing.getCount()));
-  }
-
-  /**
    * Make this grantor ready for handling lock requests.
    * <p>
    * Synchronizes on this grantor.
@@ -1376,13 +1291,13 @@ public class DLockGrantor {
       }
     }
     assertInitializing();
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
       StringBuffer sb =
           new StringBuffer("DLockGrantor " + this.dlock.getName() + " initialized with:");
       for (Iterator tokens = grantTokens.values().iterator(); tokens.hasNext();) {
         sb.append("\n\t" + tokens.next());
       }
-      logger.trace(LogMarker.DLS, sb.toString());
+      logger.trace(LogMarker.DLS_VERBOSE, sb.toString());
     }
     this.state = READY;
     this.whileInitializing.countDown();
@@ -1409,10 +1324,8 @@ public class DLockGrantor {
       checkDestroyed();
       drainPermittedRequests();
       grantLock(objectName);
-    } catch (LockServiceDestroyedException e) {
+    } catch (LockServiceDestroyedException | LockGrantorDestroyedException e) {
       // ignore... service was destroyed and that's ok
-    } catch (LockGrantorDestroyedException e) {
-      // ignore... grantor was destroyed and that's ok
     } finally {
       releaseDestroyReadLock();
     }
@@ -1510,8 +1423,9 @@ public class DLockGrantor {
     try {
       this.thread.shutdown();
       this.state = DESTROYED;
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "DLockGrantor {} state is DESTROYED", this.dlock.getName());
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "DLockGrantor {} state is DESTROYED",
+            this.dlock.getName());
       }
       if (this.untilDestroyed.getCount() > 0) {
         this.untilDestroyed.countDown();
@@ -1669,9 +1583,9 @@ public class DLockGrantor {
     if (removed != null) {
       Assert.assertTrue(removed == grantToken);
       grantToken.destroy();
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "[DLockGrantor.basicRemoveGrantToken] removed {}; removed={}",
-            grantToken, removed);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE,
+            "[DLockGrantor.basicRemoveGrantToken] removed {}; removed={}", grantToken, removed);
       }
     }
   }
@@ -1783,7 +1697,7 @@ public class DLockGrantor {
     }
     if (localDebugHandleSuspendTimeouts > 0) {
       try {
-        logger.info(LogMarker.DLS,
+        logger.info(LogMarker.DLS_MARKER,
             LocalizedMessage.create(
                 LocalizedStrings.DLockGrantor_DEBUGHANDLESUSPENDTIMEOUTS_SLEEPING_FOR__0,
                 localDebugHandleSuspendTimeouts));
@@ -1877,9 +1791,9 @@ public class DLockGrantor {
     if (DEBUG_SUSPEND_LOCK) {
       Assert.assertHoldsLock(this.suspendLock, true);
     }
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "Suspend locking of {} by {} with lockId of {}", this.dlock,
-          myRThread, lockId);
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "Suspend locking of {} by {} with lockId of {}",
+          this.dlock, myRThread, lockId);
     }
     Assert.assertTrue(myRThread != null, "Attempted to suspend locking for null RemoteThread");
     Assert.assertTrue(this.lockingSuspendedBy == null || this.lockingSuspendedBy.equals(myRThread),
@@ -1898,8 +1812,8 @@ public class DLockGrantor {
     if (DEBUG_SUSPEND_LOCK) {
       Assert.assertHoldsLock(this.suspendLock, true);
     }
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "Resume locking of {}", this.dlock);
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "Resume locking of {}", this.dlock);
     }
     this.lockingSuspendedBy = null;
     this.suspendedLockId = INVALID_LOCK_ID;
@@ -1983,9 +1897,9 @@ public class DLockGrantor {
   private void postReleaseSuspendLock(RemoteThread rThread, Object lock) {
     if (!isLockingSuspendedBy(rThread)) {
       // hit bug related to 35749
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "[postReleaseSuspendLock] locking is no longer suspended by {}",
-            rThread);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE,
+            "[postReleaseSuspendLock] locking is no longer suspended by {}", rThread);
       }
       return;
     }
@@ -2025,8 +1939,8 @@ public class DLockGrantor {
         permittedRequests.add(suspendQueue.removeFirst());
       }
     }
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "[postReleaseSuspendLock] new status {}",
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[postReleaseSuspendLock] new status {}",
           displayStatus(rThread, null));
     }
   }
@@ -2035,7 +1949,7 @@ public class DLockGrantor {
    * guarded.By {@link #suspendLock}
    */
   private void postReleaseReadLock(RemoteThread rThread, Object lock) {
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
 
     // handle release of regular lock
     // boolean permitSuspend = false;
@@ -2045,8 +1959,8 @@ public class DLockGrantor {
     if (readLockCount < 1) {
       // hit bug 35749
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "[postReleaseReadLock] no locks are currently held by {}",
-            rThread);
+        logger.trace(LogMarker.DLS_VERBOSE,
+            "[postReleaseReadLock] no locks are currently held by {}", rThread);
       }
       return;
     }
@@ -2061,7 +1975,7 @@ public class DLockGrantor {
 
     if (totalReadLockCount < 0) {
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "Total readlock count has dropped to {} for {}",
+        logger.trace(LogMarker.DLS_VERBOSE, "Total readlock count has dropped to {} for {}",
             totalReadLockCount, this);
       }
     }
@@ -2086,7 +2000,7 @@ public class DLockGrantor {
       }
     }
     if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS, "[postReleaseReadLock] new status {}",
+      logger.trace(LogMarker.DLS_VERBOSE, "[postReleaseReadLock] new status {}",
           displayStatus(rThread, null));
     }
     checkTotalReadLockCount();
@@ -2104,8 +2018,8 @@ public class DLockGrantor {
     Assert.assertTrue(rThread != null);
     synchronized (suspendLock) {
       checkDestroyed();
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS,
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE,
             "[postReleaseLock] rThread={} lock={} permittedRequests={} suspendQueue={}", rThread,
             lock, permittedRequests, suspendQueue);
       }
@@ -2151,9 +2065,9 @@ public class DLockGrantor {
       this.permittedRequests = new ArrayList();
     } // suspendLock sync
 
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
     if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS, "[drainPermittedRequests] draining {}", drain);
+      logger.trace(LogMarker.DLS_VERBOSE, "[drainPermittedRequests] draining {}", drain);
     }
 
     // iterate and attempt to grantOrSchedule each request
@@ -2165,8 +2079,8 @@ public class DLockGrantor {
       } catch (LockGrantorDestroyedException e) {
         try {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "LockGrantorDestroyedException respondWithNotGrantor to {}",
-                request);
+            logger.trace(LogMarker.DLS_VERBOSE,
+                "LockGrantorDestroyedException respondWithNotGrantor to {}", request);
           }
           request.respondWithNotGrantor();
         } finally {
@@ -2175,8 +2089,8 @@ public class DLockGrantor {
       } catch (LockServiceDestroyedException e) {
         try {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "LockServiceDestroyedException respondWithNotGrantor to {}",
-                request);
+            logger.trace(LogMarker.DLS_VERBOSE,
+                "LockServiceDestroyedException respondWithNotGrantor to {}", request);
           }
           request.respondWithNotGrantor();
         } finally {
@@ -2209,34 +2123,34 @@ public class DLockGrantor {
     synchronized (suspendLock) {
       checkDestroyed();
       if (!dm.isCurrentMember(request.getSender())) {
-        logger.info(LogMarker.DLS, LocalizedMessage
+        logger.info(LogMarker.DLS_MARKER, LocalizedMessage
             .create(LocalizedStrings.DLockGrantor_IGNORING_LOCK_REQUEST_FROM_NONMEMBER_0, request));
         return false;
       }
       Integer integer = (Integer) readLockCountMap.get(rThread);
       int readLockCount = integer == null ? 0 : integer.intValue();
       boolean othersHaveReadLocks = totalReadLockCount > readLockCount;
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
       if (isLockingSuspended() || writeLockWaiters > 0 || othersHaveReadLocks) {
         writeLockWaiters++;
         suspendQueue.addLast(request);
         this.thread.checkTimeToWait(calcWaitMillisFromNow(request), false);
         checkWriteLockWaiters();
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS,
+          logger.trace(LogMarker.DLS_VERBOSE,
               "[DLockGrantor.acquireSuspend] added '{}' to end of suspendQueue.", request);
         }
       } else {
         permitLockRequest = true;
         suspendLocking(rThread, request.getLockId());
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS,
+          logger.trace(LogMarker.DLS_VERBOSE,
               "[DLockGrantor.acquireSuspendLockPermission] permitted and suspended for {}",
               request);
         }
       }
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS,
+        logger.trace(LogMarker.DLS_VERBOSE,
             "[DLockGrantor.acquireSuspendLockPermission] new status  permitLockRequest = {}{}",
             permitLockRequest, displayStatus(rThread, null));
       }
@@ -2254,19 +2168,19 @@ public class DLockGrantor {
     synchronized (suspendLock) {
       checkDestroyed();
       if (!dm.isCurrentMember(request.getSender())) {
-        logger.info(LogMarker.DLS, LocalizedMessage
+        logger.info(LogMarker.DLS_MARKER, LocalizedMessage
             .create(LocalizedStrings.DLockGrantor_IGNORING_LOCK_REQUEST_FROM_NONMEMBER_0, request));
         return false;
       }
       Integer integer = (Integer) readLockCountMap.get(rThread);
       int readLockCount = integer == null ? 0 : integer.intValue();
       boolean threadHoldsLock = readLockCount > 0 || isLockingSuspendedBy(rThread);
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
       if (!threadHoldsLock && (isLockingSuspended() || writeLockWaiters > 0)) {
         suspendQueue.addLast(request);
         this.thread.checkTimeToWait(calcWaitMillisFromNow(request), false);
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS,
+          logger.trace(LogMarker.DLS_VERBOSE,
               "[DLockGrantor.acquireReadLockPermission] added {} to end of suspendQueue.", request);
         }
       } else {
@@ -2275,12 +2189,12 @@ public class DLockGrantor {
         totalReadLockCount++;
         permitLockRequest = true;
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "[DLockGrantor.acquireReadLockPermission] permitted {}",
-              request);
+          logger.trace(LogMarker.DLS_VERBOSE,
+              "[DLockGrantor.acquireReadLockPermission] permitted {}", request);
         }
       }
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS,
+        logger.trace(LogMarker.DLS_VERBOSE,
             "[DLockGrantor.acquireReadLockPermission] new status  threadHoldsLock = {} permitLockRequest = {}{}",
             threadHoldsLock, permitLockRequest, displayStatus(rThread, null));
       }
@@ -2298,8 +2212,8 @@ public class DLockGrantor {
    * @param request the lock request to acquire permission for
    */
   private boolean acquireLockPermission(final DLockRequestMessage request) {
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "[DLockGrantor.acquireLockPermission] {}", request);
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.acquireLockPermission] {}", request);
     }
 
     boolean permitLockRequest = false;
@@ -2344,8 +2258,8 @@ public class DLockGrantor {
         DLockGrantToken token = (DLockGrantToken) entry.getValue();
         buffer.append(token.toString()).append("\n");
       }
-      logger.info(LogMarker.DLS, LocalizedMessage.create(LocalizedStrings.TESTING, buffer));
-      logger.info(LogMarker.DLS, LocalizedMessage.create(LocalizedStrings.TESTING,
+      logger.info(LogMarker.DLS_MARKER, LocalizedMessage.create(LocalizedStrings.TESTING, buffer));
+      logger.info(LogMarker.DLS_MARKER, LocalizedMessage.create(LocalizedStrings.TESTING,
           "\nreadLockCountMap:\n" + readLockCountMap));
     }
   }
@@ -2501,8 +2415,9 @@ public class DLockGrantor {
       }
 
       // add the request to the sorted set...
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "[DLockGrantToken.schedule] {} scheduling: {}", this, request);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantToken.schedule] {} scheduling: {}", this,
+            request);
       }
       if (this.pendingRequests == null) {
         this.pendingRequests = new LinkedList();
@@ -2588,8 +2503,9 @@ public class DLockGrantor {
         return false;
       }
 
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "[DLockGrantToken.grantLockToRequest] granting: {}", request);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantToken.grantLockToRequest] granting: {}",
+            request);
       }
 
       long newLeaseExpireTime = grantAndRespondToRequest(request);
@@ -2630,9 +2546,10 @@ public class DLockGrantor {
       }
       if (released) {
         // don't bother synchronizing requests for this log statement...
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
           synchronized (this) {
-            logger.trace(LogMarker.DLS, "[DLockGrantToken.releaseIfLockedBy] pending requests: {}",
+            logger.trace(LogMarker.DLS_VERBOSE,
+                "[DLockGrantToken.releaseIfLockedBy] pending requests: {}",
                 (this.pendingRequests == null ? "none" : "" + this.pendingRequests.size()));
           }
         }
@@ -2711,7 +2628,7 @@ public class DLockGrantor {
         final ArrayList grantsToRemoveIfUnused) {
       boolean released = false;
       RemoteThread rThread = null;
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
       try {
         synchronized (this) {
           try {
@@ -2731,7 +2648,7 @@ public class DLockGrantor {
                   this.grantor.cleanupSuspendState(req);
                 } catch (CancelException e) {
                   if (isDebugEnabled_DLS) {
-                    logger.trace(LogMarker.DLS,
+                    logger.trace(LogMarker.DLS_VERBOSE,
                         "[DLockGrantToken.handleDepartureOf] ignored cancellation (1)");
                   }
                 }
@@ -2757,7 +2674,7 @@ public class DLockGrantor {
               if (releasedToken) {
                 released = true;
                 if (isDebugEnabled_DLS) {
-                  logger.trace(LogMarker.DLS,
+                  logger.trace(LogMarker.DLS_VERBOSE,
                       "[DLockGrantToken.handleDepartureOf] pending requests: {}",
                       (this.pendingRequests == null ? "none" : "" + this.pendingRequests.size()));
                 }
@@ -2772,7 +2689,7 @@ public class DLockGrantor {
             this.grantor.postReleaseLock(rThread, getName());
           } catch (CancelException e) {
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS,
+              logger.trace(LogMarker.DLS_VERBOSE,
                   "[DLockGrantToken.handleDepartureOf] ignored cancellation (2)");
             }
           }
@@ -2838,9 +2755,9 @@ public class DLockGrantor {
      * @return true if the lock was granted to next request
      */
     protected boolean grantLockToNextRequest() {
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS,
+        logger.trace(LogMarker.DLS_VERBOSE,
             "[DLockGrantToken.grantLock] {} isGranted={} hasWaitingRequests={}", getName(),
             isLeaseHeld(), hasWaitingRequests());
       }
@@ -2861,8 +2778,8 @@ public class DLockGrantor {
           }
 
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "[DLockGrantToken.grantLock] granting {} to {}", getName(),
-                request.getSender());
+            logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantToken.grantLock] granting {} to {}",
+                getName(), request.getSender());
           }
 
           long newLeaseExpireTime = grantAndRespondToRequest(request);
@@ -2960,8 +2877,8 @@ public class DLockGrantor {
       if (newLeaseExpireTime < leaseTime) { // rolled over MAX_VALUE...
         newLeaseExpireTime = Long.MAX_VALUE;
       }
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS,
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE,
             "[DLockGrantToken.calcLeaseExpireTime] currentTime={} newLeaseExpireTime={}",
             currentTime, newLeaseExpireTime);
       }
@@ -3201,19 +3118,15 @@ public class DLockGrantor {
           this.lesseeThread = null;
           this.leaseExpireTime = -1;
 
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "[checkForExpiration] Expired token at {}: {}", currentTime,
-                toString(true));
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "[checkForExpiration] Expired token at {}: {}",
+                currentTime, toString(true));
           }
 
           this.grantor.postReleaseLock(rThread, this.lockName);
 
           return true;
         }
-        /*
-         * else if (this.log.fineEnabled()) { this.log.fine("[checkForExpiration] not expired: " +
-         * this); }
-         */
       }
       return false;
     }
@@ -3253,8 +3166,8 @@ public class DLockGrantor {
       this.leaseExpireTime = newLeaseExpireTime;
       this.leaseId = lockId;
       this.lesseeThread = remoteThread;
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "[DLockGrantToken.grantLock.grantor] Granting {}",
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantToken.grantLock.grantor] Granting {}",
             toString(false));
       }
     }
@@ -3298,7 +3211,6 @@ public class DLockGrantor {
       if (this.destroyed) {
         String s = "Attempting to use destroyed grant token: " + this;
         IllegalStateException e = new IllegalStateException(s);
-        // log.warning(e); -enable for debugging
         throw e;
       }
     }
@@ -3319,9 +3231,9 @@ public class DLockGrantor {
       checkDestroyed();
 
       if (isLeaseHeldBy(member, lockId)) {
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
-          logger.trace(LogMarker.DLS, "[DLockGrantToken.releaseLock] releasing ownership: {}",
-              this);
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE,
+              "[DLockGrantToken.releaseLock] releasing ownership: {}", this);
         }
 
         this.lessee = null;
@@ -3331,9 +3243,9 @@ public class DLockGrantor {
 
         return true;
       }
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "[DLockGrantToken.releaseLock] {} attempted to release: {}",
-            member, this);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE,
+            "[DLockGrantToken.releaseLock] {} attempted to release: {}", member, this);
       }
       return false;
     }
@@ -3423,45 +3335,21 @@ public class DLockGrantor {
               this.goIntoWait = true;
               this.lock.notify();
             }
-            /*
-             * if (this.log.fineEnabled()) { this.log.fine("[DLockGrantorThread.checkTimeToWait.k2]"
-             * + " newTimeToWait=" + newTimeToWait + " expire=" + expire + " newWakeupTimeStamp=" +
-             * newWakeupTimeStamp + " expectedWakeupTimeStamp=" + expectedWakeupTimeStamp +
-             * " nextExpire=" + this.nextExpire + " nextTimeout=" + this.nextTimeout +
-             * " timeToWait=" + this.timeToWait + " goIntoWait=" + this.goIntoWait ); }
-             */
           } else {
             this.timeToWait = newTimeToWait;
             this.requireTimeToWait = true;
-            /*
-             * if (this.log.fineEnabled()) { this.log.fine("[DLockGrantorThread.checkTimeToWait.k3]"
-             * + " newTimeToWait=" + newTimeToWait + " expire=" + expire +
-             * " expectedWakeupTimeStamp=" + expectedWakeupTimeStamp + " nextExpire=" +
-             * this.nextExpire + " nextTimeout=" + this.nextTimeout + " timeToWait=" +
-             * this.timeToWait + " goIntoWait=" + this.goIntoWait ); }
-             */
           }
         } // end if newTimeToWait
-
-        /*
-         * else if (this.log.fineEnabled()) {
-         * this.log.fine("[DLockGrantorThread.checkTimeToWait.k4]" + " newTimeToWait=" +
-         * newTimeToWait + " expire=" + expire + " expectedWakeupTimeStamp=" +
-         * expectedWakeupTimeStamp + " nextExpire=" + this.nextExpire + " nextTimeout=" +
-         * this.nextTimeout + " timeToWait=" + this.timeToWait + " goIntoWait=" + this.goIntoWait );
-         * }
-         */
       } // end sync this.lock
     }
 
     @Override
     public void run() {
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
 
       DistributedLockStats stats = this.grantor.dlock.getStats();
       boolean recalcTimeToWait = false;
       while (!this.shutdown) {
-        // SystemFailure.checkFailure(); stopper checks this
         if (stopper.isCancelInProgress()) {
           break; // done
         }
@@ -3488,21 +3376,22 @@ public class DLockGrantor {
                 if (this.timeToWait < 0)
                   this.timeToWait = 0;
                 if (isDebugEnabled_DLS) {
-                  logger.trace(LogMarker.DLS,
+                  logger.trace(LogMarker.DLS_VERBOSE,
                       "DLockGrantorThread will wait for {} ms. nextExpire={} nextTimeout={} now={}",
                       this.timeToWait, this.nextExpire, this.nextTimeout, now);
                 }
               } else {
                 this.timeToWait = Long.MAX_VALUE;
                 if (isDebugEnabled_DLS) {
-                  logger.trace(LogMarker.DLS, "DLockGrantorThread will wait until rescheduled.");
+                  logger.trace(LogMarker.DLS_VERBOSE,
+                      "DLockGrantorThread will wait until rescheduled.");
                 }
               }
             }
             if (this.timeToWait > 0) {
               if (isDebugEnabled_DLS) {
-                logger.trace(LogMarker.DLS, "DLockGrantorThread is about to wait for {} ms.",
-                    this.timeToWait);
+                logger.trace(LogMarker.DLS_VERBOSE,
+                    "DLockGrantorThread is about to wait for {} ms.", this.timeToWait);
               }
               if (this.timeToWait != Long.MAX_VALUE) {
                 this.expectedWakeupTimeStamp = now() + this.timeToWait;
@@ -3533,7 +3422,7 @@ public class DLockGrantor {
                 }
               }
               if (isDebugEnabled_DLS) {
-                logger.trace(LogMarker.DLS, "DLockGrantorThread has woken up...");
+                logger.trace(LogMarker.DLS_VERBOSE, "DLockGrantorThread has woken up...");
               }
               if (this.shutdown)
                 break;
@@ -3553,7 +3442,8 @@ public class DLockGrantor {
               return;
             }
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "DLockGrantorThread about to expireAndGrantLocks...");
+              logger.trace(LogMarker.DLS_VERBOSE,
+                  "DLockGrantorThread about to expireAndGrantLocks...");
             }
             {
               long smallestExpire = this.grantor.expireAndGrantLocks(grants.iterator());
@@ -3571,7 +3461,8 @@ public class DLockGrantor {
               return;
             }
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "DLockGrantorThread about to handleRequestTimeouts...");
+              logger.trace(LogMarker.DLS_VERBOSE,
+                  "DLockGrantorThread about to handleRequestTimeouts...");
             }
             {
               long smallestRequestTimeout = this.grantor.handleRequestTimeouts(grants.iterator());
@@ -3592,7 +3483,8 @@ public class DLockGrantor {
               return;
             }
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "DLockGrantorThread about to removeUnusedGrants...");
+              logger.trace(LogMarker.DLS_VERBOSE,
+                  "DLockGrantorThread about to removeUnusedGrants...");
             }
             this.grantor.removeUnusedGrants(grants.iterator());
             stats.endGrantorThreadRemoveUnusedTokens(timing);
@@ -3621,24 +3513,6 @@ public class DLockGrantor {
             stopper.checkCancelInProgress(e);
           }
         }
-        // catch (VirtualMachineError err) {
-        // SystemFailure.initiateFailure(err);
-        // // If this ever returns, rethrow the error. We're poisoned
-        // // now, so don't let this thread continue.
-        // throw err;
-        // }
-        // catch (Throwable e) {
-        // // Whenever you catch Error or Throwable, you must also
-        // // catch VirtualMachineError (see above). However, there is
-        // // _still_ a possibility that you are dealing with a cascading
-        // // error condition, so you also need to check to see if the JVM
-        // // is still usable:
-        // SystemFailure.checkFailure();
-        // this.log.warning(LocalizedStrings.DLockGrantor_DLOCKGRANTORTHREAD_CAUGHT_EXCEPTION, e);
-        // }
-        finally {
-
-        }
       }
     }
   }
@@ -3667,10 +3541,10 @@ public class DLockGrantor {
       if (distMgr.getCancelCriterion().isCancelInProgress()) {
         return;
       }
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
       try {
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS,
+          logger.trace(LogMarker.DLS_VERBOSE,
               "[DLockGrantor.memberDeparted] waiting thread pool will process id={}", id);
         }
         distMgr.getWaitingThreadPool().execute(new Runnable() {
@@ -3680,23 +3554,23 @@ public class DLockGrantor {
             } catch (InterruptedException e) {
               // ignore
               if (isDebugEnabled_DLS) {
-                logger.trace(LogMarker.DLS, "Ignored interrupt processing departed member");
+                logger.trace(LogMarker.DLS_VERBOSE, "Ignored interrupt processing departed member");
               }
             }
           }
         });
       } catch (RejectedExecutionException e) {
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "[DLockGrantor.memberDeparted] rejected handling of id={}",
-              id);
+          logger.trace(LogMarker.DLS_VERBOSE,
+              "[DLockGrantor.memberDeparted] rejected handling of id={}", id);
         }
       }
     }
 
     protected void processMemberDeparted(InternalDistributedMember id, boolean crashed,
         DLockGrantor me) throws InterruptedException {
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "[DLockGrantor.processMemberDeparted] id={}", id);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "[DLockGrantor.processMemberDeparted] id={}", id);
       }
       try {
         me.waitWhileInitializing();
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockQueryProcessor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockQueryProcessor.java
index 28a37ef..61dbefd 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockQueryProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockQueryProcessor.java
@@ -113,8 +113,8 @@ public class DLockQueryProcessor extends ReplyProcessor21 {
   public void process(DistributionMessage msg) {
     try {
       DLockQueryReplyMessage myReply = (DLockQueryReplyMessage) msg;
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "Handling: {}", myReply);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "Handling: {}", myReply);
       }
       this.reply = myReply;
     } finally {
@@ -180,8 +180,8 @@ public class DLockQueryProcessor extends ReplyProcessor21 {
       } finally {
         if (failed) {
           // above code failed so now ensure reply is sent
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "DLockQueryMessage.process failed for <{}>", this);
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "DLockQueryMessage.process failed for <{}>", this);
           }
           DLockQueryReplyMessage replyMsg = new DLockQueryReplyMessage();
           replyMsg.setProcessorId(this.processorId);
@@ -214,8 +214,8 @@ public class DLockQueryProcessor extends ReplyProcessor21 {
       final DLockQueryMessage msg = this;
       dm.getWaitingThreadPool().execute(new Runnable() {
         public void run() {
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "[executeBasicProcess] {}", msg);
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "[executeBasicProcess] {}", msg);
           }
           basicProcess(dm, true);
         }
@@ -228,9 +228,9 @@ public class DLockQueryProcessor extends ReplyProcessor21 {
      * this.svc and this.grantor must be set before calling this method.
      */
     protected void basicProcess(final DistributionManager dm, final boolean waitForGrantor) {
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "[basicProcess] {}", this);
+        logger.trace(LogMarker.DLS_VERBOSE, "[basicProcess] {}", this);
       }
       final DLockQueryReplyMessage replyMsg = new DLockQueryReplyMessage();
       replyMsg.setProcessorId(this.processorId);
@@ -280,12 +280,11 @@ public class DLockQueryProcessor extends ReplyProcessor21 {
         }
 
         replyMsg.replyCode = DLockQueryReplyMessage.OK;
-      } catch (LockGrantorDestroyedException ignore) {
-      } catch (LockServiceDestroyedException ignore) {
+      } catch (LockGrantorDestroyedException | LockServiceDestroyedException ignore) {
       } catch (RuntimeException e) {
         replyMsg.setException(new ReplyException(e));
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "[basicProcess] caught RuntimeException", e);
+          logger.trace(LogMarker.DLS_VERBOSE, "[basicProcess] caught RuntimeException", e);
         }
       } catch (VirtualMachineError err) {
         SystemFailure.initiateFailure(err);
@@ -301,7 +300,7 @@ public class DLockQueryProcessor extends ReplyProcessor21 {
         SystemFailure.checkFailure();
         replyMsg.setException(new ReplyException(e));
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "[basicProcess] caught Error", e);
+          logger.trace(LogMarker.DLS_VERBOSE, "[basicProcess] caught Error", e);
         }
       } finally {
         if (dm.getId().equals(getSender())) {
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockRecoverGrantorProcessor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockRecoverGrantorProcessor.java
index d8ed969..7bcd143 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockRecoverGrantorProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockRecoverGrantorProcessor.java
@@ -140,15 +140,17 @@ public class DLockRecoverGrantorProcessor extends ReplyProcessor21 {
       // build grantTokens from each reply...
       switch (reply.replyCode) {
         case DLockRecoverGrantorReplyMessage.GRANTOR_DISPUTE:
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "Failed DLockRecoverGrantorReplyMessage: '{}'", reply);
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "Failed DLockRecoverGrantorReplyMessage: '{}'",
+                reply);
           }
           this.error = true;
           break;
         case DLockRecoverGrantorReplyMessage.OK:
           // collect results...
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "Processing DLockRecoverGrantorReplyMessage: '{}'", reply);
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "Processing DLockRecoverGrantorReplyMessage: '{}'",
+                reply);
           }
 
           Set lockSet = new HashSet();
@@ -170,8 +172,8 @@ public class DLockRecoverGrantorProcessor extends ReplyProcessor21 {
       }
       // maybe build up another reply to indicate lock recovery status?
     } catch (IllegalStateException e) {
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS,
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE,
             "Processing of DLockRecoverGrantorReplyMessage {} resulted in {}", msg, e.getMessage(),
             e);
       }
@@ -418,25 +420,7 @@ public class DLockRecoverGrantorProcessor extends ReplyProcessor21 {
                 LocalizedStrings.DLOCKRECOVERGRANTORPROCESSOR_DLOCKRECOVERGRANTORMESSAGE_PROCESS_THROWABLE),
             e);
         replyException = new ReplyException(e);
-      }
-      // catch (VirtualMachineError err) {
-      // SystemFailure.initiateFailure(err);
-      // // If this ever returns, rethrow the error. We're poisoned
-      // // now, so don't let this thread continue.
-      // throw err;
-      // }
-      // catch (Throwable t) {
-      // // Whenever you catch Error or Throwable, you must also
-      // // catch VirtualMachineError (see above). However, there is
-      // // _still_ a possibility that you are dealing with a cascading
-      // // error condition, so you also need to check to see if the JVM
-      // // is still usable:
-      // SystemFailure.checkFailure();
-      // log.warning(LocalizedStrings.DLockRecoverGrantorProcessor_DLOCKRECOVERGRANTORMESSAGEPROCESS_THROWABLE,
-      // t);
-      // replyException = new ReplyException(t);
-      // }
-      finally {
+      } finally {
         DLockRecoverGrantorReplyMessage replyMsg = new DLockRecoverGrantorReplyMessage();
         replyMsg.replyCode = replyCode;
         replyMsg.heldLocks = heldLocks;
@@ -445,15 +429,15 @@ public class DLockRecoverGrantorProcessor extends ReplyProcessor21 {
         replyMsg.setException(replyException);
         if (msg.getSender().equals(dm.getId())) {
           // process in-line in this VM
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS,
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "[DLockRecoverGrantorMessage.process] locally process reply");
           }
           replyMsg.setSender(dm.getId());
           replyMsg.dmProcess(dm);
         } else {
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "[DLockRecoverGrantorMessage.process] send reply");
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "[DLockRecoverGrantorMessage.process] send reply");
           }
           dm.putOutgoing(replyMsg);
         }
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockReleaseProcessor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockReleaseProcessor.java
index b0041c1..d1d7f36 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockReleaseProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockReleaseProcessor.java
@@ -102,30 +102,27 @@ public class DLockReleaseProcessor extends ReplyProcessor21 {
           "DLockReleaseProcessor is unable to process message of type " + msg.getClass());
 
       DLockReleaseReplyMessage myReply = (DLockReleaseReplyMessage) msg;
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "Handling: {}", myReply);
+        logger.trace(LogMarker.DLS_VERBOSE, "Handling: {}", myReply);
       }
       this.reply = myReply;
 
       if (isDebugEnabled_DLS) {
         // grantor acknowledged release of lock...
         if (myReply.replyCode == DLockReleaseReplyMessage.OK) {
-          logger.trace(LogMarker.DLS, "Successfully released {} in {}", this.objectName,
+          logger.trace(LogMarker.DLS_VERBOSE, "Successfully released {} in {}", this.objectName,
               myReply.serviceName);
         }
         // sender denies being the grantor...
         else if (myReply.replyCode == DLockReleaseReplyMessage.NOT_GRANTOR) {
-          logger.trace(LogMarker.DLS,
+          logger.trace(LogMarker.DLS_VERBOSE,
               "{} has responded DLockReleaseReplyMessage.NOT_GRANTOR for {}", myReply.getSender(),
               myReply.serviceName);
         }
       }
     } finally {
       super.process(msg);
-      /*
-       * if (this.log.fineEnabled()) { this.log.fine("Finished handling: " + msg); }
-       */
     }
   }
 
@@ -190,8 +187,9 @@ public class DLockReleaseProcessor extends ReplyProcessor21 {
       } finally {
         if (failed) {
           // above code failed so now ensure reply is sent
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "DLockReleaseMessage.process failed for <{}>", this);
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "DLockReleaseMessage.process failed for <{}>",
+                this);
           }
           int replyCode = DLockReleaseReplyMessage.NOT_GRANTOR;
           DLockReleaseReplyMessage replyMsg = new DLockReleaseReplyMessage();
@@ -227,8 +225,8 @@ public class DLockReleaseProcessor extends ReplyProcessor21 {
       final DLockReleaseMessage msg = this;
       dm.getWaitingThreadPool().execute(new Runnable() {
         public void run() {
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "[executeBasicProcess] waitForGrantor {}", msg);
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "[executeBasicProcess] waitForGrantor {}", msg);
           }
           basicProcess(dm, true);
         }
@@ -241,9 +239,9 @@ public class DLockReleaseProcessor extends ReplyProcessor21 {
      * this.svc and this.grantor must be set before calling this method.
      */
     protected void basicProcess(final DistributionManager dm, final boolean waitForGrantor) {
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "[basicProcess] {}", this);
+        logger.trace(LogMarker.DLS_VERBOSE, "[basicProcess] {}", this);
       }
       int replyCode = DLockReleaseReplyMessage.NOT_GRANTOR;
       ReplyException replyException = null;
@@ -281,7 +279,7 @@ public class DLockReleaseProcessor extends ReplyProcessor21 {
       } catch (RuntimeException e) {
         replyException = new ReplyException(e);
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "[basicProcess] caught RuntimeException", e);
+          logger.trace(LogMarker.DLS_VERBOSE, "[basicProcess] caught RuntimeException", e);
         }
       } catch (VirtualMachineError err) {
         SystemFailure.initiateFailure(err);
@@ -297,7 +295,7 @@ public class DLockReleaseProcessor extends ReplyProcessor21 {
         SystemFailure.checkFailure();
         replyException = new ReplyException(e);
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "[basicProcess] caught Error", e);
+          logger.trace(LogMarker.DLS_VERBOSE, "[basicProcess] caught Error", e);
         }
       } finally {
         DLockReleaseReplyMessage replyMsg = new DLockReleaseReplyMessage();
@@ -327,7 +325,7 @@ public class DLockReleaseProcessor extends ReplyProcessor21 {
         } // grantor != null
         else {
           if (DLockGrantor.DEBUG_SUSPEND_LOCK && isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS,
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "DLockReleaseMessage, omitted postRemoteRelease lock on " + objectName
                     + "; grantor = " + grantor + ", lockBatch = " + lockBatch + ", replyMsg = "
                     + replyMsg);
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockRequestProcessor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockRequestProcessor.java
index 9fd6c4b..5be9d48 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockRequestProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockRequestProcessor.java
@@ -197,7 +197,7 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
   }
 
   protected boolean requestLock(boolean interruptible, int lockId) throws InterruptedException {
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
 
     Assert.assertTrue(lockId > -1, "lockId is < 0: " + this);
     this.request.lockId = lockId;
@@ -205,7 +205,8 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
     // local grantor... don't use messaging... fake it
     if (isLockGrantor()) {
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "DLockRequestProcessor processing lock request directly");
+        logger.trace(LogMarker.DLS_VERBOSE,
+            "DLockRequestProcessor processing lock request directly");
       }
       this.request.setSender(this.dm.getDistributionManagerId());
 
@@ -227,7 +228,7 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
           throw (InterruptedException) ex.getCause();
         }
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "DLockRequestProcessor caught ReplyException", ex);
+          logger.trace(LogMarker.DLS_VERBOSE, "DLockRequestProcessor caught ReplyException", ex);
         }
         return false;
       }
@@ -239,14 +240,14 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
           throw (InterruptedException) ex.getCause();
         }
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "DLockRequestProcessor caught ReplyException", ex);
+          logger.trace(LogMarker.DLS_VERBOSE, "DLockRequestProcessor caught ReplyException", ex);
         }
         return false;
       }
     }
 
     if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS, "DLockRequestProcessor {} for {}",
+      logger.trace(LogMarker.DLS_VERBOSE, "DLockRequestProcessor {} for {}",
           (this.gotLock ? "got lock" : "failed to get lock"), this.request);
     }
     return this.gotLock;
@@ -257,12 +258,6 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
     return true;
   }
 
-
-
-  // private synchronized void setDoneProcessing(boolean value) {
-  // this.doneProcessing = value;
-  // }
-
   private boolean isLockGrantor() {
     return this.dm.getDistributionManagerId().equals(this.grantor);
   }
@@ -279,13 +274,13 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
 
   @Override
   public void process(DistributionMessage msg) {
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
     try {
       Assert.assertTrue(msg instanceof DLockResponseMessage,
           "DLockRequestProcessor is unable to process message of type " + msg.getClass());
 
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "Processing DLockResponseMessage: '{}'", msg);
+        logger.trace(LogMarker.DLS_VERBOSE, "Processing DLockResponseMessage: '{}'", msg);
       }
       final DLockResponseMessage reply = (DLockResponseMessage) msg;
       this.response = reply;
@@ -294,7 +289,7 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
         // Ignore this response since it was sent for a lockId that
         // must have timed out.
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS,
+          logger.trace(LogMarker.DLS_VERBOSE,
               "Failed to find processor for lockId {} processor ids must have wrapped.",
               this.response.getLockId());
         }
@@ -303,42 +298,33 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
 
       switch (reply.responseCode) {
         case DLockResponseMessage.GRANT:
-          /*
-           * // If a different version of the lock service requested the lock, we need // to turn
-           * around and release it InternalDistributedLockService dls =
-           * (InternalDistributedLockService)
-           * DistributedLockService.getServiceNamed(reply.serviceName); boolean different = dls ==
-           * null; different = different || dls.getSerialNumber() != reply.dlsSerialNumber; if
-           * (different) { // Back at ya, dude, we don't want this lock!
-           * reply.releaseOrphanedGrant(this.dm); this.gotLock = false; // KIRK never set true
-           * except in else } else {
-           */
           // grantor has granted the lock request...
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "{} has granted lock for {} in {}", reply.getSender(),
-                reply.objectName, reply.serviceName);
+            logger.trace(LogMarker.DLS_VERBOSE, "{} has granted lock for {} in {}",
+                reply.getSender(), reply.objectName, reply.serviceName);
           }
           this.gotLock = true;
-          // }
           break;
         case DLockResponseMessage.NOT_GRANTOR:
           // target was not the grantor! who is the grantor?!
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "{} has responded DLockResponseMessage.NOT_GRANTOR for {}",
-                reply.getSender(), reply.serviceName);
+            logger.trace(LogMarker.DLS_VERBOSE,
+                "{} has responded DLockResponseMessage.NOT_GRANTOR for {}", reply.getSender(),
+                reply.serviceName);
           }
           break;
         case DLockResponseMessage.DESTROYED:
           // grantor claims we sent him a NonGrantorDestroyedMessage
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "{} has responded DLockResponseMessage.DESTROYED for {}",
-                reply.getSender(), reply.serviceName);
+            logger.trace(LogMarker.DLS_VERBOSE,
+                "{} has responded DLockResponseMessage.DESTROYED for {}", reply.getSender(),
+                reply.serviceName);
           }
           break;
         case DLockResponseMessage.TIMEOUT:
           // grantor told us the lock request has timed out...
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS,
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "{} has responded DLockResponseMessage.TIMEOUT for {} in {}", reply.getSender(),
                 reply.objectName, reply.serviceName);
           }
@@ -346,7 +332,7 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
         case DLockResponseMessage.SUSPENDED:
           // grantor told us that locking has been suspended for the service...
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS,
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "{} has responded DLockResponseMessage.SUSPENDED for {} in {}", reply.getSender(),
                 reply.objectName, reply.serviceName);
           }
@@ -354,7 +340,7 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
         case DLockResponseMessage.NOT_HOLDER:
           // tried to reenter lock but grantor says we're not the lock holder...
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS,
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "{} has responded DLockResponseMessage.NOT_HOLDER for {} in {}", reply.getSender(),
                 reply.objectName, reply.serviceName);
           }
@@ -362,7 +348,7 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
         case DLockResponseMessage.TRY_LOCK_FAILED:
           // tried to acquire try-lock but grantor says it's held and we failed...
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS,
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "{} has responded DLockResponseMessage.TRY_LOCK_FAILED for {} in {}",
                 reply.getSender(), reply.objectName, reply.serviceName);
           }
@@ -376,10 +362,9 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
     } finally {
       super.process(msg);
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "Finished processing DLockResponseMessage: '{}'", msg);
+        logger.trace(LogMarker.DLS_VERBOSE, "Finished processing DLockResponseMessage: '{}'", msg);
       }
       ((DLockResponseMessage) msg).processed = true;
-      // setDoneProcessing(true);
     }
   }
 
@@ -560,8 +545,9 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
       } finally {
         if (failed) {
           // above code failed so now ensure reply is sent
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "DLockRequestMessage.process failed for <{}>", this);
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "DLockRequestMessage.process failed for <{}>",
+                this);
           }
           this.response = createResponse();
           this.response.setProcessorId(getProcessorId());
@@ -591,8 +577,8 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
       final DLockRequestMessage msg = this;
       dm.getWaitingThreadPool().execute(new Runnable() {
         public void run() {
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "calling waitForGrantor {}", msg);
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "calling waitForGrantor {}", msg);
           }
           basicProcess(dm, true);
         }
@@ -600,11 +586,12 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
     }
 
     protected void basicProcess(final DistributionManager dm, final boolean waitForGrantor) {
-      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+      final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
       try {
         this.receivingDM = dm;
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "DLockRequestMessage.basicProcess processing <{}>", this);
+          logger.trace(LogMarker.DLS_VERBOSE, "DLockRequestMessage.basicProcess processing <{}>",
+              this);
         }
         this.response = createResponse();
         this.response.setProcessorId(getProcessorId());
@@ -624,7 +611,7 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
 
         if (this.svc == null || this.grantor == null) {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "respondWithNotGrantor this.svc={} this.grantor={}",
+            logger.trace(LogMarker.DLS_VERBOSE, "respondWithNotGrantor this.svc={} this.grantor={}",
                 this.svc, this.grantor);
           }
           respondWithNotGrantor();
@@ -632,20 +619,20 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
 
         else if (this.grantor.isDestroyed()) {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "respondWithNotGrantor grantor was destroyed {}",
+            logger.trace(LogMarker.DLS_VERBOSE, "respondWithNotGrantor grantor was destroyed {}",
                 this.grantor);
           }
           respondWithNotGrantor();
         } else if (this.grantor.getVersionId() != this.grantorVersion) {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS,
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "respondWithNotGrantor current version is {}; request was for {}",
                 this.grantor.getVersionId(), this.grantorVersion);
           }
           respondWithNotGrantor();
         } else if (this.svc.getSerialNumber() != this.grantorSerialNumber) {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS,
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "respondWithNotGrantor current serial number is {}; request was for {}",
                 this.svc.getSerialNumber(), this.grantorSerialNumber);
           }
@@ -655,10 +642,10 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
         // this is the grantor, so the request will be processed...
         else {
           this.svc.checkDestroyed();
-          if (!this.svc.isLockGrantor()) { // TODO: verify this ok
+          if (!this.svc.isLockGrantor()) {
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "respondWithNotGrantor service !isLockGrantor svc={}",
-                  this.svc);
+              logger.trace(LogMarker.DLS_VERBOSE,
+                  "respondWithNotGrantor service !isLockGrantor svc={}", this.svc);
             }
             respondWithNotGrantor();
           }
@@ -682,41 +669,39 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
           // queue up this request to be granted...
           else {
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "Handling lock request: <{}>", this);
+              logger.trace(LogMarker.DLS_VERBOSE, "Handling lock request: <{}>", this);
             }
             if (this.grantor.isDestroyed()) {
               if (isDebugEnabled_DLS) {
-                logger.trace(LogMarker.DLS,
+                logger.trace(LogMarker.DLS_VERBOSE,
                     "respondWithNotGrantor grantor was destroyed grantor={}", this.grantor);
               }
               respondWithNotGrantor();
             } else {
               try {
                 this.grantor.handleLockRequest(this);
-              } catch (InterruptedException e) {
+              } catch (InterruptedException | LockGrantorDestroyedException e) {
                 // just fail it
                 respondWithNotGrantor();
-              } catch (LockGrantorDestroyedException e) {
-                respondWithNotGrantor();
               }
             }
           }
         }
       } catch (LockGrantorDestroyedException e) {
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "LockGrantorDestroyedException respondWithNotGrantor svc={}",
-              this.svc);
+          logger.trace(LogMarker.DLS_VERBOSE,
+              "LockGrantorDestroyedException respondWithNotGrantor svc={}", this.svc);
         }
         respondWithNotGrantor();
       } catch (LockServiceDestroyedException e) {
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "LockServiceDestroyedException respondWithNotGrantor svc={}",
-              this.svc);
+          logger.trace(LogMarker.DLS_VERBOSE,
+              "LockServiceDestroyedException respondWithNotGrantor svc={}", this.svc);
         }
         respondWithNotGrantor();
       } catch (CancelException e) {
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS,
+          logger.trace(LogMarker.DLS_VERBOSE,
               "CacheClosedException respondWithNotGrantor svc={} exception = {}", this.svc, e);
         }
         if (isLocal()) {
@@ -725,30 +710,12 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
           respondWithNotGrantor();
         }
       } catch (RuntimeException e) {
-        logger.warn(LogMarker.DLS,
+        logger.warn(LogMarker.DLS_MARKER,
             LocalizedMessage.create(
                 LocalizedStrings.DLockRequestProcessor_DLOCKREQUESTMESSAGEPROCESS_CAUGHT_THROWABLE),
             e);
         respondWithException(e);
       }
-      // catch (VirtualMachineError err) {
-      // SystemFailure.initiateFailure(err);
-      // // If this ever returns, rethrow the error. We're poisoned
-      // // now, so don't let this thread continue.
-      // throw err;
-      // }
-      // catch (Throwable t) {
-      // // Whenever you catch Error or Throwable, you must also
-      // // catch VirtualMachineError (see above). However, there is
-      // // _still_ a possibility that you are dealing with a cascading
-      // // error condition, so you also need to check to see if the JVM
-      // // is still usable:
-      // SystemFailure.checkFailure();
-      // respondWithException(t);
-      // }
-      finally {
-
-      }
     }
 
     synchronized void respondWithNotGrantor() {
@@ -768,8 +735,8 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
 
     /** Callers must be synchronized on this */
     private void respondWithTimeout() {
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "Request {} timed out; grantor status = {}", this,
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "Request {} timed out; grantor status = {}", this,
             this.grantor.displayStatus(rThread, objectName));
       }
       this.response.responseCode = DLockResponseMessage.TIMEOUT;
@@ -783,7 +750,6 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
     }
 
     synchronized void respondWithGrant(long leaseExpireTime) {
-      // TODO: trim reply objectName down to just DLockBatchId for batches
       this.response.responseCode = DLockResponseMessage.GRANT;
       this.response.leaseExpireTime = leaseExpireTime;
       this.response.dlsSerialNumber = this.dlsSerialNumber;
@@ -794,12 +760,13 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
       try {
         if (this.response.getException() == null) {
           this.response.setException(new ReplyException(t));
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "While processing <{}>, got exception, returning to sender",
-                this, this.response.getException());
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE,
+                "While processing <{}>, got exception, returning to sender", this,
+                this.response.getException());
           }
         } else {
-          logger.warn(LogMarker.DLS,
+          logger.warn(LogMarker.DLS_VERBOSE,
               LocalizedMessage.create(
                   LocalizedStrings.DLockRequestProcessor_MORE_THAN_ONE_EXCEPTION_THROWN_IN__0,
                   this),
@@ -835,8 +802,8 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
       if (now < this.startTime)
         now = this.startTime;
       if (this.waitMillis + this.startTime - now <= 0) {
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
-          logger.trace(LogMarker.DLS,
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE,
               "DLockRequestProcessor request timed out: waitMillis={} now={} startTime={}",
               this.waitMillis, now, this.startTime);
         }
@@ -893,12 +860,12 @@ public class DLockRequestProcessor extends ReplyProcessor21 {
           ReplyProcessor21 processor = ReplyProcessor21.getProcessor(processorId);
           if (processor == null) {
             // lock request was probably interrupted so we need to release it...
-            logger.warn(LogMarker.DLS,
+            logger.warn(LogMarker.DLS_MARKER,
                 LocalizedMessage.create(
                     LocalizedStrings.DLockRequestProcessor_FAILED_TO_FIND_PROCESSOR_FOR__0,
                     this.response));
             if (this.response.responseCode == DLockResponseMessage.GRANT) {
-              logger.info(LogMarker.DLS,
+              logger.info(LogMarker.DLS_MARKER,
                   LocalizedMessage.create(
                       LocalizedStrings.DLockRequestProcessor_RELEASING_LOCAL_ORPHANED_GRANT_FOR_0,
                       this));
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockService.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockService.java
index 71d6501..3e0b201 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockService.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockService.java
@@ -267,7 +267,7 @@ public class DLockService extends DistributedLockService {
    * to the elder to fetch this information.
    */
   public LockGrantorId getLockGrantorId() {
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
     boolean ownLockGrantorFutureResult = false;
     FutureResult lockGrantorFutureResultRef = null;
 
@@ -287,7 +287,8 @@ public class DLockService extends DistributedLockService {
             ownLockGrantorFutureResult = true;
             lockGrantorFutureResultRef = new FutureResult(this.dm.getCancelCriterion());
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "[getLockGrantorId] creating lockGrantorFutureResult");
+              logger.trace(LogMarker.DLS_VERBOSE,
+                  "[getLockGrantorId] creating lockGrantorFutureResult");
             }
             this.lockGrantorFutureResult = lockGrantorFutureResultRef;
           }
@@ -312,7 +313,7 @@ public class DLockService extends DistributedLockService {
             new LockGrantorId(this.dm, gi.getId(), gi.getVersionId(), gi.getSerialNumber());
 
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "[getLockGrantorId] elder says grantor is {}",
+          logger.trace(LogMarker.DLS_VERBOSE, "[getLockGrantorId] elder says grantor is {}",
               theLockGrantorId);
         }
 
@@ -321,7 +322,7 @@ public class DLockService extends DistributedLockService {
           boolean needsRecovery = gi.needsRecovery();
           if (!needsRecovery) {
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "[getLockGrantorId] needsRecovery is false");
+              logger.trace(LogMarker.DLS_VERBOSE, "[getLockGrantorId] needsRecovery is false");
             }
             synchronized (this.lockGrantorIdLock) {
               // either no previous grantor or grantor is newer
@@ -384,15 +385,15 @@ public class DLockService extends DistributedLockService {
       LockGrantorId myLockGrantorId) {
     DLockGrantor myGrantor =
         DLockGrantor.createGrantor(this, myLockGrantorId.getLockGrantorVersion());
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "[createLocalGrantor] Calling makeLocalGrantor");
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[createLocalGrantor] Calling makeLocalGrantor");
     }
     return makeLocalGrantor(elder, needsRecovery, myLockGrantorId, myGrantor);
   }
 
   private boolean makeLocalGrantor(InternalDistributedMember elder, boolean needsRecovery,
       LockGrantorId myLockGrantorId, DLockGrantor myGrantor) {
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
     boolean success = false;
     try {
       synchronized (this.lockGrantorIdLock) {
@@ -404,8 +405,9 @@ public class DLockService extends DistributedLockService {
         if (!currentElder.equals(elder)) {
           // abort because elder changed
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "Failed to create {} because elder changed from {} to {}",
-                myLockGrantorId, elder, currentElder);
+            logger.trace(LogMarker.DLS_VERBOSE,
+                "Failed to create {} because elder changed from {} to {}", myLockGrantorId, elder,
+                currentElder);
           }
           return false; // exit
         }
@@ -413,7 +415,7 @@ public class DLockService extends DistributedLockService {
         if (this.deposingLockGrantorId != null) {
           if (this.deposingLockGrantorId.isNewerThan(myLockGrantorId)) {
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "Failed to create {} because I was deposed by {}",
+              logger.trace(LogMarker.DLS_VERBOSE, "Failed to create {} because I was deposed by {}",
                   myLockGrantorId, this.deposingLockGrantorId);
             }
             this.deposingLockGrantorId = null;
@@ -421,8 +423,8 @@ public class DLockService extends DistributedLockService {
           }
 
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "{} failed to depose {}", this.deposingLockGrantorId,
-                myLockGrantorId);
+            logger.trace(LogMarker.DLS_VERBOSE, "{} failed to depose {}",
+                this.deposingLockGrantorId, myLockGrantorId);
           }
           // older grantor couldn't depose us, so null him out...
           this.deposingLockGrantorId = null;
@@ -430,7 +432,7 @@ public class DLockService extends DistributedLockService {
 
         if (!setLockGrantorId(myLockGrantorId, myGrantor)) {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS,
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "[getLockGrantorId] failed to create {} because current grantor is {}",
                 myLockGrantorId, this.lockGrantorId);
           }
@@ -462,7 +464,7 @@ public class DLockService extends DistributedLockService {
         if (this.deposingLockGrantorId != null) {
           if (this.deposingLockGrantorId.isNewerThan(myLockGrantorId)) {
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "Failed to create {} because I was deposed by {}",
+              logger.trace(LogMarker.DLS_VERBOSE, "Failed to create {} because I was deposed by {}",
                   myLockGrantorId, this.deposingLockGrantorId);
             }
             this.deposingLockGrantorId = null;
@@ -470,8 +472,8 @@ public class DLockService extends DistributedLockService {
           }
 
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "{} failed to depose {}", this.deposingLockGrantorId,
-                myLockGrantorId);
+            logger.trace(LogMarker.DLS_VERBOSE, "{} failed to depose {}",
+                this.deposingLockGrantorId, myLockGrantorId);
           }
           this.deposingLockGrantorId = null;
         }
@@ -495,12 +497,12 @@ public class DLockService extends DistributedLockService {
       // is still usable:
       SystemFailure.checkFailure();
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "[makeLocalGrantor] throwing Error", e);
+        logger.trace(LogMarker.DLS_VERBOSE, "[makeLocalGrantor] throwing Error", e);
       }
       throw e;
     } catch (RuntimeException e) {
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "[makeLocalGrantor] throwing RuntimeException", e);
+        logger.trace(LogMarker.DLS_VERBOSE, "[makeLocalGrantor] throwing RuntimeException", e);
       }
       throw e;
     } finally {
@@ -509,8 +511,8 @@ public class DLockService extends DistributedLockService {
         // abort if unsuccessful or if lock service was destroyed
         if (!success || isDestroyed()) {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "[makeLocalGrantor] aborting {} and {}", myLockGrantorId,
-                myGrantor);
+            logger.trace(LogMarker.DLS_VERBOSE, "[makeLocalGrantor] aborting {} and {}",
+                myLockGrantorId, myGrantor);
           }
           nullLockGrantorId(myLockGrantorId);
           if (!myGrantor.isDestroyed()) {
@@ -521,11 +523,11 @@ public class DLockService extends DistributedLockService {
         // assertion: grantor should now be either ready or destroyed!
 
         if (myGrantor.isInitializing() && !dm.getCancelCriterion().isCancelInProgress()) {
-          logger.error(LogMarker.DLS,
+          logger.error(LogMarker.DLS_MARKER,
               LocalizedMessage.create(LocalizedStrings.DLockService_GRANTOR_IS_STILL_INITIALIZING));
         }
         if (!success && !myGrantor.isDestroyed() && !dm.getCancelCriterion().isCancelInProgress()) {
-          logger.error(LogMarker.DLS, LocalizedMessage.create(
+          logger.error(LogMarker.DLS_MARKER, LocalizedMessage.create(
               LocalizedStrings.DLockService_GRANTOR_CREATION_WAS_ABORTED_BUT_GRANTOR_WAS_NOT_DESTROYED));
         }
       }
@@ -548,8 +550,8 @@ public class DLockService extends DistributedLockService {
       return true;
     } else if (newLockGrantorId.isRemote() && this.lockGrantorId != null
         && this.lockGrantorId.hasLockGrantorVersion()) {
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "[setLockGrantorId] tried to replace {} with {}",
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "[setLockGrantorId] tried to replace {} with {}",
             this.lockGrantorId, newLockGrantorId);
       }
       return false;
@@ -588,10 +590,10 @@ public class DLockService extends DistributedLockService {
    */
   void deposeOlderLockGrantorId(LockGrantorId newLockGrantorId) {
     LockGrantorId deposedLockGrantorId = null;
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
     synchronized (this.lockGrantorIdLock) {
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "[deposeOlderLockGrantorId] pre-deposing {} for new {}",
+        logger.trace(LogMarker.DLS_VERBOSE, "[deposeOlderLockGrantorId] pre-deposing {} for new {}",
             deposedLockGrantorId, newLockGrantorId);
       }
       this.deposingLockGrantorId = newLockGrantorId;
@@ -600,8 +602,9 @@ public class DLockService extends DistributedLockService {
     if (deposedLockGrantorId != null && deposedLockGrantorId.hasLockGrantorVersion()
         && newLockGrantorId.isNewerThan(deposedLockGrantorId)) {
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "[deposeOlderLockGrantorId] post-deposing {} for new {}",
-            deposedLockGrantorId, newLockGrantorId);
+        logger.trace(LogMarker.DLS_VERBOSE,
+            "[deposeOlderLockGrantorId] post-deposing {} for new {}", deposedLockGrantorId,
+            newLockGrantorId);
       }
       nullLockGrantorId(deposedLockGrantorId);
     }
@@ -640,8 +643,9 @@ public class DLockService extends DistributedLockService {
       }
     } finally {
       if (grantorToDestroy != null) {
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
-          logger.trace(LogMarker.DLS, "[nullLockGrantorId] destroying {}", grantorToDestroy);
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE, "[nullLockGrantorId] destroying {}",
+              grantorToDestroy);
         }
         grantorToDestroy.destroy();
       }
@@ -666,7 +670,6 @@ public class DLockService extends DistributedLockService {
   /**
    * Returns true if <code>someLockGrantor</code> equals the current {@link #lockGrantorId}.
    *
-   * @param someLockGrantor
    * @return true if someLockGrantor equals the current lockGrantorId
    */
   private boolean equalsLockGrantorId(LockGrantorId someLockGrantor) {
@@ -781,8 +784,8 @@ public class DLockService extends DistributedLockService {
   private void notLockGrantorId(LockGrantorId notLockGrantorId, long timeToWait,
       final TimeUnit timeUnit) {
     if (notLockGrantorId.isLocal(getSerialNumber())) {
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS,
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE,
             "notLockGrantorId {} returning early because notGrantor {} was equal to the local dm {}",
             this.serviceName, notLockGrantorId, this.dm.getId());
       }
@@ -838,9 +841,9 @@ public class DLockService extends DistributedLockService {
       } else {
         // elder says another member is the grantor
         nullLockGrantorId(notLockGrantorId);
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
-          logger.trace(LogMarker.DLS, "notLockGrantorId cleared lockGrantorId for service {}",
-              this.serviceName);
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE,
+              "notLockGrantorId cleared lockGrantorId for service {}", this.serviceName);
         }
       }
     } finally {
@@ -942,7 +945,7 @@ public class DLockService extends DistributedLockService {
     boolean ownLockGrantorFutureResult = false;
     FutureResult lockGrantorFutureResultRef = null;
 
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
     LockGrantorId myLockGrantorId = null;
     try { // finally handles lockGrantorFutureResult
 
@@ -963,7 +966,8 @@ public class DLockService extends DistributedLockService {
             ownLockGrantorFutureResult = true;
             lockGrantorFutureResultRef = new FutureResult(this.dm.getCancelCriterion());
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "[becomeLockGrantor] creating lockGrantorFutureResult");
+              logger.trace(LogMarker.DLS_VERBOSE,
+                  "[becomeLockGrantor] creating lockGrantorFutureResult");
             }
             this.lockGrantorFutureResult = lockGrantorFutureResultRef;
           }
@@ -991,7 +995,7 @@ public class DLockService extends DistributedLockService {
         }
 
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "become set lockGrantorId to {} for service {}",
+          logger.trace(LogMarker.DLS_VERBOSE, "become set lockGrantorId to {} for service {}",
               this.lockGrantorId, this.serviceName);
         }
 
@@ -1009,7 +1013,7 @@ public class DLockService extends DistributedLockService {
             new LockGrantorId(this.dm, this.dm.getId(), myGrantorVersion, getSerialNumber());
 
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "[becomeLockGrantor] Calling makeLocalGrantor");
+          logger.trace(LogMarker.DLS_VERBOSE, "[becomeLockGrantor] Calling makeLocalGrantor");
         }
         if (!makeLocalGrantor(elder, needsRecovery, myLockGrantorId, myGrantor)) {
           return;
@@ -1105,8 +1109,8 @@ public class DLockService extends DistributedLockService {
       if (token != null) {
         synchronized (token) {
           if (!token.isBeingUsed()) {
-            if (logger.isTraceEnabled(LogMarker.DLS)) {
-              logger.trace(LogMarker.DLS, "Freeing {} in {}", token, this);
+            if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+              logger.trace(LogMarker.DLS_VERBOSE, "Freeing {} in {}", token, this);
             }
             removeTokenFromMap(name);
             token.destroy();
@@ -1140,8 +1144,8 @@ public class DLockService extends DistributedLockService {
         DLockToken token = (DLockToken) iter.next();
         synchronized (token) {
           if (!token.isBeingUsed()) {
-            if (logger.isTraceEnabled(LogMarker.DLS)) {
-              logger.trace(LogMarker.DLS, "Freeing {} in {}", token, this);
+            if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+              logger.trace(LogMarker.DLS_VERBOSE, "Freeing {} in {}", token, this);
             }
             if (unusedTokens == Collections.EMPTY_SET) {
               unusedTokens = new HashSet();
@@ -1248,7 +1252,7 @@ public class DLockService extends DistributedLockService {
     } catch (InterruptedException ex) { // LOST INTERRUPT
       Thread.currentThread().interrupt();
       // fail assertion
-      logger.error(LogMarker.DLS,
+      logger.error(LogMarker.DLS_MARKER,
           LocalizedMessage.create(LocalizedStrings.DLockService_LOCK_WAS_INTERRUPTED), ex);
       Assert.assertTrue(false, "lock() was interrupted: " + ex.getMessage());
     }
@@ -1390,7 +1394,9 @@ public class DLockService extends DistributedLockService {
       if (waitLimit < 0)
         waitLimit = Long.MAX_VALUE;
 
-      logger.trace(LogMarker.DLS, "{}, name: {} - entering lock()", this, name);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "{}, name: {} - entering lock()", this, name);
+      }
 
       DLockToken token = getOrCreateToken(name);
       boolean gotLock = false;
@@ -1427,8 +1433,10 @@ public class DLockService extends DistributedLockService {
           synchronized (token) {
             token.checkForExpiration();
             if (token.isLeaseHeldByCurrentThread()) {
-              logger.trace(LogMarker.DLS, "{} , name: {} - lock() is reentrant: {}", this, name,
-                  token);
+              if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+                logger.trace(LogMarker.DLS_VERBOSE, "{} , name: {} - lock() is reentrant: {}", this,
+                    name, token);
+              }
               reentrant = true;
               if (reentrant && disallowReentrant) {
                 throw new IllegalStateException(
@@ -1474,8 +1482,10 @@ public class DLockService extends DistributedLockService {
           gotLock = processor.requestLock(interruptible, lockId); // can throw
                                                                   // InterruptedException
 
-          logger.trace(LogMarker.DLS, "Grantor {} replied {}", theLockGrantorId,
-              processor.getResponseCodeString());
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "Grantor {} replied {}", theLockGrantorId,
+                processor.getResponseCodeString());
+          }
 
           if (gotLock) {
             leaseExpireTime = processor.getLeaseExpireTime();
@@ -1486,7 +1496,10 @@ public class DLockService extends DistributedLockService {
               continue;
             }
 
-            logger.trace(LogMarker.DLS, "{}, name: {} - granted lock: {}", this, name, token);
+            if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+              logger.trace(LogMarker.DLS_VERBOSE, "{}, name: {} - granted lock: {}", this, name,
+                  token);
+            }
             keepTrying = false;
           } else if (processor.repliedDestroyed()) {
             checkDestroyed(); // throws LockServiceDestroyedException
@@ -1509,7 +1522,7 @@ public class DLockService extends DistributedLockService {
               if (token.isLeaseHeldByCurrentThread()) {
                 // THIS SHOULDN'T HAPPEN -- some sort of weird consistency
                 // problem. Do what the grantor says and release the lock...
-                logger.warn(LogMarker.DLS,
+                logger.warn(LogMarker.DLS_MARKER,
                     LocalizedMessage.create(
                         LocalizedStrings.DLockService_GRANTOR_REPORTS_REENTRANT_LOCK_NOT_HELD_0,
                         token));
@@ -1523,7 +1536,6 @@ public class DLockService extends DistributedLockService {
             } // token sync
           } // grantor replied NOT_HOLDER for reentrant lock
 
-          // TODO: figure out when this else case can actually happen...
           else {
             // either dlock service is suspended or tryLock failed
             // fixed the math here... bug 32765
@@ -1558,12 +1570,16 @@ public class DLockService extends DistributedLockService {
         blockedOn.set(null);
       }
 
-      logger.trace(LogMarker.DLS, "{}, name: {} - exiting lock() returning {}", this, name,
-          gotLock);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "{}, name: {} - exiting lock() returning {}", this,
+            name, gotLock);
+      }
       return gotLock;
     } finally {
-      logger.trace(LogMarker.DLS, "{}, name: {} - exiting lock() without returning value", this,
-          name);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "{}, name: {} - exiting lock() without returning value",
+            this, name);
+      }
       if (interrupted) {
         Thread.currentThread().interrupt();
       }
@@ -1579,13 +1595,18 @@ public class DLockService extends DistributedLockService {
     synchronized (this.lockGrantorIdLock) {
       if (!checkLockGrantorId(theLockGrantorId)) {
         // race: grantor changed
-        logger.trace(LogMarker.DLS, "Cannot honor grant from {} because {} is now a grantor.",
-            theLockGrantorId, this.lockGrantorId);
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE,
+              "Cannot honor grant from {} because {} is now a grantor.", theLockGrantorId,
+              this.lockGrantorId);
+        }
       } else if (isDestroyed()) {
         // race: dls was destroyed
-        logger.trace(LogMarker.DLS,
-            "Cannot honor grant from {} because this lock service has been destroyed.",
-            theLockGrantorId);
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE,
+              "Cannot honor grant from {} because this lock service has been destroyed.",
+              theLockGrantorId);
+        }
         needToReleaseOrphanedGrant = true;
       } else {
         synchronized (this.tokens) {
@@ -1704,22 +1725,12 @@ public class DLockService extends DistributedLockService {
       if (waitLimit < 0)
         waitLimit = Long.MAX_VALUE;
 
-      // try {
-      // we're now using a tryLock, but we need to keep trying until wait time
-      // is used up or we're interrupted...
       while (!gotToken && keepTrying) {
         gotToken =
             lockInterruptibly(suspendToken, waitTimeMillis, -1, tryLock, interruptible, false);
         keepTrying = !gotToken && waitLimit > System.currentTimeMillis();
       }
       return gotToken;
-      // }
-      // finally {
-      // synchronized(this.lockingSuspendedMonitor) {
-      // this.lockingSuspendedMonitor.notifyAll();
-      // }
-      // }
-
     } finally {
       if (wasInterrupted) {
         Thread.currentThread().interrupt();
@@ -1729,18 +1740,18 @@ public class DLockService extends DistributedLockService {
 
   @Override
   public void unlock(Object name) throws LockNotHeldException, LeaseExpiredException {
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
 
     if (this.ds.isDisconnectListenerThread()) {
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "{}, name: {} - disconnect listener thread is exiting unlock()",
-            this, name);
+        logger.trace(LogMarker.DLS_VERBOSE,
+            "{}, name: {} - disconnect listener thread is exiting unlock()", this, name);
       }
       return;
     }
 
     if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS, "{}, name: {} - entering unlock()", this, name);
+      logger.trace(LogMarker.DLS_VERBOSE, "{}, name: {} - entering unlock()", this, name);
     }
 
     long statStart = getStats().startLockRelease();
@@ -1758,7 +1769,7 @@ public class DLockService extends DistributedLockService {
         token = basicGetToken(name);
         if (token == null) {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "{}, [unlock] no token found for: {}", this, name);
+            logger.trace(LogMarker.DLS_VERBOSE, "{}, [unlock] no token found for: {}", this, name);
           }
           throw new LockNotHeldException(
               LocalizedStrings.DLockService_ATTEMPTING_TO_UNLOCK_0_1_BUT_THIS_THREAD_DOESNT_OWN_THE_LOCK
@@ -1771,8 +1782,8 @@ public class DLockService extends DistributedLockService {
           if (!token.isLeaseHeldByCurrentOrRemoteThread(rThread)) {
             token.throwIfCurrentThreadHadExpiredLease();
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "{}, [unlock] {} not leased by this thread.", this,
-                  token);
+              logger.trace(LogMarker.DLS_VERBOSE, "{}, [unlock] {} not leased by this thread.",
+                  this, token);
             }
             throw new LockNotHeldException(
                 LocalizedStrings.DLockService_ATTEMPTING_TO_UNLOCK_0_1_BUT_THIS_THREAD_DOESNT_OWN_THE_LOCK_2
@@ -1836,7 +1847,7 @@ public class DLockService extends DistributedLockService {
           freeResources(name);
         }
         if (isDebugEnabled_DLS) {
-          logger.trace(LogMarker.DLS, "{}, name: {} - exiting unlock()", this, name);
+          logger.trace(LogMarker.DLS_VERBOSE, "{}, name: {} - exiting unlock()", this, name);
         }
       }
     }
@@ -1850,33 +1861,26 @@ public class DLockService extends DistributedLockService {
    * @throws LockServiceDestroyedException if local instance of lock service has been destroyed
    */
   public DLockRemoteToken queryLock(final Object name) {
-    // long statStart = getStats().startLockRelease();
-    try {
 
-      DLockQueryReplyMessage queryReply = null;
-      while (queryReply == null || queryReply.repliedNotGrantor()) {
-        checkDestroyed();
-        // TODO: consider using peekLockGrantor instead...
-        LockGrantorId theLockGrantorId = getLockGrantorId();
-        try {
-          queryReply = DLockQueryProcessor.query(theLockGrantorId.getLockGrantorMember(),
-              this.serviceName, name, false /* lockBatch */, this.dm);
-        } catch (LockGrantorDestroyedException e) {
-          // loop back around to get next lock grantor
-        } finally {
-          if (queryReply != null && queryReply.repliedNotGrantor()) {
-            notLockGrantorId(theLockGrantorId, 0, TimeUnit.MILLISECONDS);
-          }
+    DLockQueryReplyMessage queryReply = null;
+    while (queryReply == null || queryReply.repliedNotGrantor()) {
+      checkDestroyed();
+      LockGrantorId theLockGrantorId = getLockGrantorId();
+      try {
+        queryReply = DLockQueryProcessor.query(theLockGrantorId.getLockGrantorMember(),
+            this.serviceName, name, false /* lockBatch */, this.dm);
+      } catch (LockGrantorDestroyedException e) {
+        // loop back around to get next lock grantor
+      } finally {
+        if (queryReply != null && queryReply.repliedNotGrantor()) {
+          notLockGrantorId(theLockGrantorId, 0, TimeUnit.MILLISECONDS);
         }
-      } // while querying
+      }
+    } // while querying
 
-      return DLockRemoteToken.create(name, queryReply.getLesseeThread(), queryReply.getLeaseId(),
-          queryReply.getLeaseExpireTime());
+    return DLockRemoteToken.create(name, queryReply.getLesseeThread(), queryReply.getLeaseId(),
+        queryReply.getLeaseExpireTime());
 
-    } // try
-    finally {
-      // getStats().endLockRelease(statStart);
-    }
   }
 
   // -------------------------------------------------------------------------
@@ -1897,8 +1901,9 @@ public class DLockService extends DistributedLockService {
       throws IllegalArgumentException {
     Assert.assertHoldsLock(services, true);
 
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "About to create DistributedLockService <{}>", serviceName);
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "About to create DistributedLockService <{}>",
+          serviceName);
     }
 
     DLockService svc = new DLockService(serviceName, ds, isDistributed, destroyOnDisconnect,
@@ -1915,8 +1920,9 @@ public class DLockService extends DistributedLockService {
       getStats().incServices(1);
       this.ds.addDisconnectListener(disconnectListener);
       success = true;
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "Created DistributedLockService <{}>", this.serviceName);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "Created DistributedLockService <{}>",
+            this.serviceName);
       }
     } finally {
       if (!success) {
@@ -1925,7 +1931,6 @@ public class DLockService extends DistributedLockService {
       }
     }
 
-    /** Added For M&M **/
     ds.handleResourceEvent(ResourceEvent.LOCKSERVICE_CREATE, this);
 
     return success;
@@ -1976,9 +1981,9 @@ public class DLockService extends DistributedLockService {
   }
 
   public void releaseTryLocks(DLockBatchId batchId, Callable<Boolean> untilCondition) {
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
     if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS, "[DLockService.releaseTryLocks] enter: {}", batchId);
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockService.releaseTryLocks] enter: {}", batchId);
     }
 
     long statStart = getStats().startLockRelease();
@@ -2016,7 +2021,7 @@ public class DLockService extends DistributedLockService {
       decActiveLocks();
       getStats().endLockRelease(statStart);
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "[DLockService.releaseTryLocks] exit: {}", batchId);
+        logger.trace(LogMarker.DLS_VERBOSE, "[DLockService.releaseTryLocks] exit: {}", batchId);
       }
     }
   }
@@ -2034,9 +2039,9 @@ public class DLockService extends DistributedLockService {
 
     long startTime = getLockTimeStamp(dm);
 
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
     if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS, "[acquireTryLocks] acquiring {}", dlockBatch);
+      logger.trace(LogMarker.DLS_VERBOSE, "[acquireTryLocks] acquiring {}", dlockBatch);
     }
 
     long requestWaitTime = waitTimeMillis;
@@ -2092,7 +2097,7 @@ public class DLockService extends DistributedLockService {
           keyIfFailed[0] = processor.getKeyIfFailed();
           if (keyIfFailed[0] == null) {
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS,
+              logger.trace(LogMarker.DLS_VERBOSE,
                   "[acquireTryLocks] lock request failed but provided no conflict key; responseCode=",
                   processor.getResponseCodeString());
             }
@@ -2118,17 +2123,10 @@ public class DLockService extends DistributedLockService {
       }
 
       if (isDebugEnabled_DLS) {
-        logger.trace(LogMarker.DLS, "[acquireTryLocks] {} locks for {}",
+        logger.trace(LogMarker.DLS_VERBOSE, "[acquireTryLocks] {} locks for {}",
             (gotLocks ? "acquired" : "failed to acquire"), dlockBatch);
       }
-    }
-    // catch (Error e) {
-    // gotLocks = false;
-    // }
-    // catch (RuntimeException e) {
-    // gotLocks = false;
-    // }
-    finally {
+    } finally {
       getStats().endLockWait(statStart, gotLocks);
     }
     return gotLocks;
@@ -2230,9 +2228,9 @@ public class DLockService extends DistributedLockService {
       try {
         svc.destroyAndRemove();
       } catch (CancelException e) {
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
-          logger.trace(LogMarker.DLS, "destroyAndRemove of {} terminated due to cancellation: ",
-              svc, e);
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE,
+              "destroyAndRemove of {} terminated due to cancellation: ", svc, e);
         }
       }
     }
@@ -2302,9 +2300,9 @@ public class DLockService extends DistributedLockService {
       boolean maybeHasActiveLocks) {
     Assert.assertHoldsLock(services, false);
     // synchronized (this.serviceLock) {
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
     if (isDebugEnabled_DLS) {
-      logger.trace(LogMarker.DLS,
+      logger.trace(LogMarker.DLS_VERBOSE,
           "[DLockService.basicDestroy] Destroying {}, isCurrentlyLockGrantor={}, isMakingLockGrantor={}",
           this, isCurrentlyLockGrantor, isMakingLockGrantor);
     }
@@ -2329,7 +2327,7 @@ public class DLockService extends DistributedLockService {
         if (DEBUG_NONGRANTOR_DESTROY_LOOP) {
           nonGrantorDestroyLoopCount++;
           if (nonGrantorDestroyLoopCount >= DEBUG_NONGRANTOR_DESTROY_LOOP_COUNT) {
-            logger.fatal(LogMarker.DLS,
+            logger.fatal(LogMarker.DLS_MARKER,
                 LocalizedMessage.create(
                     LocalizedStrings.DLockService_FAILED_TO_NOTIFY_GRANTOR_OF_DESTRUCTION_WITHIN_0_ATTEMPTS,
                     Integer.valueOf(DEBUG_NONGRANTOR_DESTROY_LOOP_COUNT)));
@@ -2355,7 +2353,8 @@ public class DLockService extends DistributedLockService {
           Thread.currentThread().interrupt();
         } catch (DistributedSystemDisconnectedException e) {
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "No longer waiting for grantor because of disconnect.", e);
+            logger.trace(LogMarker.DLS_VERBOSE,
+                "No longer waiting for grantor because of disconnect.", e);
           }
         }
       }
@@ -2365,9 +2364,7 @@ public class DLockService extends DistributedLockService {
   }
 
   protected void postDestroyAction() {
-    /** Added for M&M **/
     ds.handleResourceEvent(ResourceEvent.LOCKSERVICE_REMOVE, this);
-
   }
 
   // -------------------------------------------------------------------------
@@ -2403,7 +2400,7 @@ public class DLockService extends DistributedLockService {
       nullLockGrantorId(currentLockGrantorId);
     }
 
-    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+    final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
     synchronized (this.lockGrantorIdLock) {
       synchronized (this.tokens) {
         // build up set of currently held locks
@@ -2416,7 +2413,8 @@ public class DLockService extends DistributedLockService {
               if (token.ignoreForRecovery()) {
                 // unlock of token must be in progress... ignore for recovery
                 if (isDebugEnabled_DLS) {
-                  logger.trace(LogMarker.DLS, "getLockTokensForRecovery is skipping {}", token);
+                  logger.trace(LogMarker.DLS_VERBOSE, "getLockTokensForRecovery is skipping {}",
+                      token);
                 }
               }
 
@@ -2486,8 +2484,8 @@ public class DLockService extends DistributedLockService {
       synchronized (token) {
         if (createNewToken) {
           this.tokens.put(name, token);
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "Creating {} in {}", token, this);
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "Creating {} in {}", token, this);
           }
           getStats().incTokens(1);
         }
@@ -2537,7 +2535,7 @@ public class DLockService extends DistributedLockService {
         DLockToken token = (DLockToken) entry.getValue();
         buffer.append(token.toString()).append("\n");
       }
-      logger.info(LogMarker.DLS, LocalizedMessage.create(LocalizedStrings.ONE_ARG, buffer));
+      logger.info(LogMarker.DLS_MARKER, LocalizedMessage.create(LocalizedStrings.ONE_ARG, buffer));
     }
   }
 
@@ -2592,9 +2590,9 @@ public class DLockService extends DistributedLockService {
         }
 
         public void onDisconnect(final InternalDistributedSystem sys) {
-          final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS);
+          final boolean isDebugEnabled_DLS = logger.isTraceEnabled(LogMarker.DLS_VERBOSE);
           if (isDebugEnabled_DLS) {
-            logger.trace(LogMarker.DLS, "Shutting down Distributed Lock Services");
+            logger.trace(LogMarker.DLS_VERBOSE, "Shutting down Distributed Lock Services");
           }
           long start = System.currentTimeMillis();
           try {
@@ -2603,7 +2601,8 @@ public class DLockService extends DistributedLockService {
             closeStats();
             long delta = System.currentTimeMillis() - start;
             if (isDebugEnabled_DLS) {
-              logger.trace(LogMarker.DLS, "Distributed Lock Services stopped (took {} ms)", delta);
+              logger.trace(LogMarker.DLS_VERBOSE, "Distributed Lock Services stopped (took {} ms)",
+                  delta);
             }
           }
         }
@@ -2890,7 +2889,7 @@ public class DLockService extends DistributedLockService {
   public static void dumpAllServices() { // used by: distributed/DistributedLockServiceTest
     StringBuffer buffer = new StringBuffer();
     synchronized (services) {
-      logger.info(LogMarker.DLS, LocalizedMessage.create(LocalizedStrings.TESTING,
+      logger.info(LogMarker.DLS_MARKER, LocalizedMessage.create(LocalizedStrings.TESTING,
           "DLockService.dumpAllServices() - " + services.size() + " services:\n"));
       Iterator entries = services.entrySet().iterator();
       while (entries.hasNext()) {
@@ -2987,8 +2986,8 @@ public class DLockService extends DistributedLockService {
   /** Provide way to peek at current lock grantor id when dls does not exist */
   static GrantorInfo checkLockGrantorInfo(String serviceName, InternalDistributedSystem system) {
     GrantorInfo gi = GrantorRequestProcessor.peekGrantor(serviceName, system);
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "[checkLockGrantorId] returning {}", gi);
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[checkLockGrantorId] returning {}", gi);
     }
     return gi;
   }
@@ -3054,11 +3053,6 @@ public class DLockService extends DistributedLockService {
     private final DLockService dls;
 
     /**
-     * True if this stopper initiated cancellation for DLS destroy.
-     */
-    // private boolean stoppedByDLS = false; // used by single thread
-
-    /**
      * Creates a new DLockStopper for the specified DLockService and DM.
      *
      * @param dm the DM to check for shutdown
@@ -3094,10 +3088,6 @@ public class DLockService extends DistributedLockService {
         return null;
       }
       return this.dls.generateLockServiceDestroyedException(reason);
-      // if (this.stoppedByDLS) { // set and checked by same thread
-      // return this.dls.generateLockServiceDestroyedException(reason);
-      // }
-      // return new DistributedSystemDisconnectedException(reason, e);
     }
 
   }
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockToken.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockToken.java
index 1a1a68d..e9b5871 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockToken.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DLockToken.java
@@ -280,9 +280,9 @@ public class DLockToken {
 
       long currentTime = getCurrentTime();
       if (currentTime > this.leaseExpireTime) {
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
-          logger.trace(LogMarker.DLS, "[checkForExpiration] Expiring token at {}: {}", currentTime,
-              this);
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE, "[checkForExpiration] Expiring token at {}: {}",
+              currentTime, this);
         }
         noteExpiredLease();
         basicReleaseLock();
@@ -318,8 +318,8 @@ public class DLockToken {
     this.recursion = newRecursion;
     this.thread = Thread.currentThread();
 
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "[DLockToken.grantLock.client] granted {}", this);
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockToken.grantLock.client] granted {}", this);
     }
   }
 
@@ -415,8 +415,9 @@ public class DLockToken {
     else if (decRecursion && getRecursion() > 0) {
       incRecursion(-1);
       decUsage();
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "[DLockToken.releaseLock] decremented recursion: {}", this);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "[DLockToken.releaseLock] decremented recursion: {}",
+            this);
       }
       return true;
     }
@@ -433,8 +434,9 @@ public class DLockToken {
    * token.
    */
   private void basicReleaseLock() {
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "[DLockToken.basicReleaseLock] releasing ownership: {}", this);
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[DLockToken.basicReleaseLock] releasing ownership: {}",
+          this);
     }
 
     this.leaseId = -1;
@@ -510,8 +512,8 @@ public class DLockToken {
    * lock token.
    */
   private void noteExpiredLease() {
-    if (logger.isTraceEnabled(LogMarker.DLS)) {
-      logger.trace(LogMarker.DLS, "[noteExpiredLease] {}", this.thread);
+    if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+      logger.trace(LogMarker.DLS_VERBOSE, "[noteExpiredLease] {}", this.thread);
     }
     if (this.expiredLeases == null) {
       this.expiredLeases = new WeakHashMap();
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DeposeGrantorProcessor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DeposeGrantorProcessor.java
index ff22f2c..9f067ef 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DeposeGrantorProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/DeposeGrantorProcessor.java
@@ -117,8 +117,9 @@ public class DeposeGrantorProcessor extends ReplyProcessor21 {
       msg.newGrantorSerialNumber = newGrantorSerialNumber;
       msg.processorId = proc.getProcessorId();
       msg.setRecipient(oldGrantor);
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "DeposeGrantorMessage sending {} to {}", msg, oldGrantor);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "DeposeGrantorMessage sending {} to {}", msg,
+            oldGrantor);
       }
       dm.putOutgoing(msg);
     }
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/ElderInitProcessor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/ElderInitProcessor.java
index 5d28831..3dbf43a 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/ElderInitProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/ElderInitProcessor.java
@@ -152,8 +152,8 @@ public class ElderInitProcessor extends ReplyProcessor21 {
       ElderInitMessage msg = new ElderInitMessage();
       msg.processorId = proc.getProcessorId();
       msg.setRecipients(others);
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "ElderInitMessage sending {} to {}", msg, others);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "ElderInitMessage sending {} to {}", msg, others);
       }
       dm.putOutgoing(msg);
     }
@@ -179,22 +179,18 @@ public class ElderInitProcessor extends ReplyProcessor21 {
         GrantorRequestProcessor.readyForElderRecovery(dm.getSystem(), this.getSender(), null);
         DLockService.recoverRmtElder(grantors, grantorVersions, grantorSerialNumbers, nonGrantors);
         reply(dm, grantors, grantorVersions, grantorSerialNumbers, nonGrantors);
-      } else if (dm.getOtherNormalDistributionManagerIds().isEmpty()) { // bug 38690
-                                                                        // Either we're alone (and
-                                                                        // received a message from
-                                                                        // an unknown member)
-                                                                        // or else we haven't yet
-                                                                        // processed a view, In
-                                                                        // either case, we clearly
-                                                                        // don't have any grantors,
-                                                                        // so we return empty lists.
-        logger.info(LogMarker.DLS,
+      } else if (dm.getOtherNormalDistributionManagerIds().isEmpty()) {
+        // Either we're alone (and received a message from an unknown member) or else we haven't
+        // yet processed a view. In either case, we clearly don't have any grantors,
+        // so we return empty lists.
+
+        logger.info(LogMarker.DLS_MARKER,
             LocalizedMessage.create(
                 LocalizedStrings.ElderInitProcessor__0_RETURNING_EMPTY_LISTS_BECAUSE_I_KNOW_OF_NO_OTHER_MEMBERS,
                 this));
         reply(dm, grantors, grantorVersions, grantorSerialNumbers, nonGrantors);
-      } else { // TODO make this fine level?
-        logger.info(LogMarker.DLS, LocalizedMessage.create(
+      } else {
+        logger.info(LogMarker.DLS_MARKER, LocalizedMessage.create(
             LocalizedStrings.ElderInitProcessor_0_DISREGARDING_REQUEST_FROM_DEPARTED_MEMBER, this));
       }
     }
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/ElderState.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/ElderState.java
index 994ddc1..0b019d9 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/ElderState.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/ElderState.java
@@ -54,47 +54,39 @@ public class ElderState {
     try {
       this.dm.getStats().incElders(1);
       ElderInitProcessor.init(this.dm, this.nameToInfo);
-    } catch (NullPointerException e) {
+    } catch (NullPointerException | InternalGemFireError e) {
       try {
         checkForProblem(dm);
       } finally {
-        if (true)
-          throw e; // conditional prevents eclipse warning
-      }
-    } catch (InternalGemFireError e) {
-      try {
-        checkForProblem(dm);
-      } finally {
-        if (true)
-          throw e; // conditional prevents eclipse warning
+        throw e;
       }
     } finally {
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
         StringBuffer sb = new StringBuffer("ElderState initialized with:");
         for (Iterator grantors = this.nameToInfo.keySet().iterator(); grantors.hasNext();) {
           Object key = grantors.next();
           // key=dlock svc name, value=GrantorInfo object
           sb.append("\n\t" + key + ": " + this.nameToInfo.get(key));
         }
-        logger.trace(LogMarker.DLS, sb.toString());
+        logger.trace(LogMarker.DLS_VERBOSE, sb.toString());
       }
     }
   }
 
   private void checkForProblem(DistributionManager checkDM) {
     if (checkDM.getSystem() == null) {
-      logger.warn(LogMarker.DLS, LocalizedMessage
+      logger.warn(LogMarker.DLS_MARKER, LocalizedMessage
           .create(LocalizedStrings.ElderState_ELDERSTATE_PROBLEM_SYSTEM_0, checkDM.getSystem()));
       return;
     }
     if (checkDM.getSystem().getDistributionManager() == null) {
-      logger.warn(LogMarker.DLS,
+      logger.warn(LogMarker.DLS_MARKER,
           LocalizedMessage.create(
               LocalizedStrings.ElderState_ELDERSTATE_PROBLEM_SYSTEM_DISTRIBUTIONMANAGER_0,
               checkDM.getSystem().getDistributionManager()));
     }
     if (checkDM != checkDM.getSystem().getDistributionManager()) {
-      logger.warn(LogMarker.DLS,
+      logger.warn(LogMarker.DLS_MARKER,
           LocalizedMessage.create(
               LocalizedStrings.ElderState_ELDERSTATE_PROBLEM_DM_0_BUT_SYSTEM_DISTRIBUTIONMANAGER_1,
               new Object[] {checkDM, checkDM.getSystem().getDistributionManager()}));
@@ -123,8 +115,8 @@ public class ElderState {
             && this.dm.getDistributionManagerIds().contains(currentGrantor)) {
           return gi;
         } else {
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "Elder setting grantor for {} to {} because {} ",
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "Elder setting grantor for {} to {} because {} ",
                 serviceName, requestor, (currentGrantor != null ? "current grantor crashed"
                     : "of unclean grantor shutdown"));
           }
@@ -135,8 +127,8 @@ public class ElderState {
           return new GrantorInfo(requestor, myVersion, dlsSerialNumberRequestor, true);
         }
       } else {
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
-          logger.trace(LogMarker.DLS,
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE,
               "Elder setting grantor for {} to {} because of clean grantor shutdown", serviceName,
               requestor);
         }
@@ -209,8 +201,8 @@ public class ElderState {
 
               // problem: specified oldTurk is not previousGrantor...
               if (oldTurk != null && !oldTurk.equals(previousGrantor)) {
-                if (logger.isTraceEnabled(LogMarker.DLS)) {
-                  logger.trace(LogMarker.DLS,
+                if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+                  logger.trace(LogMarker.DLS_VERBOSE,
                       "Elder did not become grantor for {} to {} because oldT was {} and the current grantor is {}",
                       serviceName, newGrantor, oldTurk, previousGrantor);
                 }
@@ -219,8 +211,8 @@ public class ElderState {
               // no oldTurk or oldTurk matches previousGrantor... transfer might occur
               else {
                 // install new grantor
-                if (logger.isTraceEnabled(LogMarker.DLS)) {
-                  logger.trace(LogMarker.DLS, "Elder forced to set grantor for {} to {}",
+                if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+                  logger.trace(LogMarker.DLS_VERBOSE, "Elder forced to set grantor for {} to {}",
                       serviceName, newGrantor);
                 }
                 long myVersion = gi.getVersionId() + 1;
@@ -245,8 +237,8 @@ public class ElderState {
 
             // problem: oldTurk was specified but there is no previousGrantor...
             if (oldTurk != null) {
-              if (logger.isTraceEnabled(LogMarker.DLS)) {
-                logger.trace(LogMarker.DLS,
+              if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+                logger.trace(LogMarker.DLS_VERBOSE,
                     "Elder did not become grantor for {} to {} because oldT was {} and the current grantor {} had crashed",
                     serviceName, newGrantor, oldTurk, previousGrantor);
               }
@@ -254,8 +246,8 @@ public class ElderState {
 
             // no oldTurk was specified...
             else {
-              if (logger.isTraceEnabled(LogMarker.DLS)) {
-                logger.trace(LogMarker.DLS,
+              if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+                logger.trace(LogMarker.DLS_VERBOSE,
                     "Elder forced to set grantor for {} to {} and noticed previous grantor had crashed",
                     serviceName, newGrantor);
               }
@@ -272,8 +264,8 @@ public class ElderState {
         else {
           // problem: no oldTurk was specified
           if (oldTurk != null) {
-            if (logger.isTraceEnabled(LogMarker.DLS)) {
-              logger.trace(LogMarker.DLS,
+            if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+              logger.trace(LogMarker.DLS_VERBOSE,
                   "Elder did not become grantor for {} to {} because oldT was {} and elder had no current grantor",
                   serviceName, newGrantor, oldTurk);
             }
@@ -281,8 +273,8 @@ public class ElderState {
 
           // no oldTurk was specified
           else {
-            if (logger.isTraceEnabled(LogMarker.DLS)) {
-              logger.trace(LogMarker.DLS,
+            if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+              logger.trace(LogMarker.DLS_VERBOSE,
                   "Elder forced to set grantor for {} to {} because of clean grantor shutdown",
                   serviceName, newGrantor);
             }
@@ -340,14 +332,14 @@ public class ElderState {
         InternalDistributedMember currentGrantor = gi.getId();
         if (!oldGrantor.equals(currentGrantor)) { // fix for 32603
           this.nameToInfo.put(serviceName, gi);
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS,
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE,
                 "Elder not making {} grantor shutdown for {} by {} because the current grantor is {}",
                 (locksHeld ? "unclean" : "clean"), serviceName, oldGrantor, currentGrantor);
           }
         } else {
-          if (logger.isTraceEnabled(LogMarker.DLS)) {
-            logger.trace(LogMarker.DLS, "Elder making {} grantor shutdown for {} by {}",
+          if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+            logger.trace(LogMarker.DLS_VERBOSE, "Elder making {} grantor shutdown for {} by {}",
                 (locksHeld ? "unclean" : "clean"), serviceName, oldGrantor);
           }
         }
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/GrantorRequestProcessor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/GrantorRequestProcessor.java
index 828068c..f6e8ce8 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/GrantorRequestProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/GrantorRequestProcessor.java
@@ -183,12 +183,6 @@ public class GrantorRequestProcessor extends ReplyProcessor21 {
     }
   }
 
-  /**
-   * elderSyncWait
-   *
-   * @param newElder
-   * @param dls
-   */
   private static void elderSyncWait(InternalDistributedSystem sys,
       InternalDistributedMember newElder, DLockService dls) {
     GrantorRequestContext grc = sys.getGrantorRequestContext();
@@ -197,7 +191,7 @@ public class GrantorRequestProcessor extends ReplyProcessor21 {
         LocalizedStrings.GrantorRequestProcessor_GRANTORREQUESTPROCESSOR_ELDERSYNCWAIT_THE_CURRENT_ELDER_0_IS_WAITING_FOR_THE_NEW_ELDER_1,
         new Object[] {grc.currentElder, newElder});
     while (grc.waitingToChangeElder) {
-      logger.info(LogMarker.DLS, message);
+      logger.info(LogMarker.DLS_MARKER, message);
       boolean interrupted = Thread.interrupted();
       try {
         grc.elderLockCondition.await(sys.getConfig().getMemberTimeout());
@@ -377,8 +371,8 @@ public class GrantorRequestProcessor extends ReplyProcessor21 {
             boolean sent = GrantorRequestMessage.send(grantorVersion, dlsSerialNumber, serviceName,
                 grc.currentElder, dm, processor, oldTurk, opCode);
             if (!sent) {
-              if (logger.isTraceEnabled(LogMarker.DLS)) {
-                logger.trace(LogMarker.DLS, "Unable to communicate with elder {}",
+              if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+                logger.trace(LogMarker.DLS_VERBOSE, "Unable to communicate with elder {}",
                     grc.currentElder);
               }
             }
@@ -466,29 +460,11 @@ public class GrantorRequestProcessor extends ReplyProcessor21 {
     private InternalDistributedMember oldTurk;
 
     /**
-     *
-     * @param serviceName
-     * @param elder
-     * @param dm
-     * @param proc
-     * @param oldTurk
-     * @param opCode
      * @return true if the message was sent
      */
     protected static boolean send(long grantorVersion, int dlsSerialNumber, String serviceName,
         InternalDistributedMember elder, DistributionManager dm, ReplyProcessor21 proc,
         InternalDistributedMember oldTurk, byte opCode) {
-      // bug36361: the following assertion doesn't work, since the client that sent us
-      // the request might have a different notion of the elder (no view synchrony on the
-      // current notion of the elder).
-      // InternalDistributedMember moi = dm.getDistributionManagerId();
-      // Assert.assertTrue(!(
-      // // Sending a message to ourself is REALLY WEIRD, so
-      // // we make that the first test...
-      // moi.equals(dm.getElderId())
-      // && !moi.equals(elder)
-      // && dm.getDistributionManagerIds().contains(elder)
-      // ));
 
       GrantorRequestMessage msg = new GrantorRequestMessage();
       msg.grantorVersion = grantorVersion;
@@ -498,8 +474,8 @@ public class GrantorRequestProcessor extends ReplyProcessor21 {
       msg.opCode = opCode;
       msg.processorId = proc.getProcessorId();
       msg.setRecipient(elder);
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "GrantorRequestMessage sending {} to {}", msg, elder);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "GrantorRequestMessage sending {} to {}", msg, elder);
       }
       Set failures = dm.putOutgoing(msg);
       return failures == null || failures.size() == 0;
@@ -520,24 +496,9 @@ public class GrantorRequestProcessor extends ReplyProcessor21 {
 
     @Override
     protected void process(ClusterDistributionManager dm) {
-      // executeBasicProcess(dm); // TODO change to this after things are stable
       basicProcess(dm);
     }
 
-    // private void executeBasicProcess(final DM dm) {
-    // final GrantorRequestMessage msg = this;
-    // try {
-    // dm.getWaitingThreadPool().execute(new Runnable() {
-    // public void run() {
-    // basicProcess(dm);
-    // }
-    // });
-    // }
-    // catch (RejectedExecutionException e) { {
-    // logger.debug("Rejected processing of <{}>", this, e);
-    // }
-    // }
-
     protected void basicProcess(final DistributionManager dm) {
       // we should be in the elder
       ElderState es = dm.getElderState(true, false);
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/NonGrantorDestroyedProcessor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/NonGrantorDestroyedProcessor.java
index 5d7821a..a7505c2 100755
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/NonGrantorDestroyedProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/locks/NonGrantorDestroyedProcessor.java
@@ -117,8 +117,9 @@ public class NonGrantorDestroyedProcessor extends ReplyProcessor21 {
       msg.processorId = proc.getProcessorId();
       msg.setRecipient(grantor);
 
-      if (logger.isTraceEnabled(LogMarker.DLS)) {
-        logger.trace(LogMarker.DLS, "NonGrantorDestroyedMessage sending {} to {}", msg, grantor);
+      if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+        logger.trace(LogMarker.DLS_VERBOSE, "NonGrantorDestroyedMessage sending {} to {}", msg,
+            grantor);
       }
 
       if (grantor.equals(dm.getId())) {
@@ -165,19 +166,19 @@ public class NonGrantorDestroyedProcessor extends ReplyProcessor21 {
         }
       } catch (InterruptedException e) {
         Thread.currentThread().interrupt();
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
-          logger.trace(LogMarker.DLS,
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE,
               "Processing of NonGrantorDestroyedMessage resulted in InterruptedException", e);
         }
       } catch (LockServiceDestroyedException e) {
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
-          logger.trace(LogMarker.DLS,
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE,
               "Processing of NonGrantorDestroyedMessage resulted in LockServiceDestroyedException",
               e);
         }
       } catch (LockGrantorDestroyedException e) {
-        if (logger.isTraceEnabled(LogMarker.DLS)) {
-          logger.trace(LogMarker.DLS,
+        if (logger.isTraceEnabled(LogMarker.DLS_VERBOSE)) {
+          logger.trace(LogMarker.DLS_VERBOSE,
               "Processing of NonGrantorDestroyedMessage resulted in LockGrantorDestroyedException",
               e);
         }
diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/mgr/GMSMembershipManager.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
index f8ed9b6..f393924 100644
--- a/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
+++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/membership/gms/mgr/GMSMembershipManager.java
@@ -664,7 +664,7 @@ public class GMSMembershipManager implements MembershipManager, Manager {
 
         long delta = System.currentTimeMillis() - start;
 
-        logger.info(LogMarker.DISTRIBUTION, LocalizedMessage
+        logger.info(LogMarker.DISTRIBUTION_MARKER, LocalizedMessage
             .create(LocalizedStrings.GroupMembershipService_JOINED_TOOK__0__MS, delta));
 
         NetView initialView = services.getJoinLeave().getView();
@@ -1083,7 +1083,6 @@ public class GMSMembershipManager implements MembershipManager, Manager {
 
     // If this member is shunned or new, grab the latestViewWriteLock: update the appropriate data
     // structure.
-    // synchronized (latestViewLock) {
     if (isShunnedOrNew(m)) {
       latestViewWriteLock.lock();
       try {
@@ -1111,8 +1110,8 @@ public class GMSMembershipManager implements MembershipManager, Manager {
     if (shunned) { // bug #41538 - shun notification must be outside synchronization to avoid
       // hanging
       warnShun(m);
-      if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_VIEWS)) {
-        logger.trace(LogMarker.DISTRIBUTION_VIEWS,
+      if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_VIEWS_VERBOSE)) {
+        logger.trace(LogMarker.DISTRIBUTION_VIEWS_VERBOSE,
             "Membership: Ignoring message from shunned member <{}>:{}", m, msg);
       }
       throw new MemberShunnedException(m);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java b/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java
index b0b0930..133c592 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java
@@ -1148,8 +1148,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
       Part.encodeInt(dataSerializer.getId(), idBytes);
       serializedDataSerializer[1] = idBytes;
     } catch (IOException ignored) {
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER,
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE,
             "InternalDataSerializer encountered an IOException while serializing DataSerializer :{}",
             dataSerializer);
       }
@@ -1229,7 +1229,7 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
           }
           return serializer;
         } catch (ClassNotFoundException ignored) {
-          logger.info(LogMarker.SERIALIZER,
+          logger.info(LogMarker.SERIALIZER_MARKER,
               LocalizedMessage.create(
                   LocalizedStrings.InternalDataSerializer_COULD_NOT_LOAD_DATASERIALIZER_CLASS_0,
                   dsClass));
@@ -1277,7 +1277,7 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
           }
           return ds;
         } catch (ClassNotFoundException ignored) {
-          logger.info(LogMarker.SERIALIZER,
+          logger.info(LogMarker.SERIALIZER_MARKER,
               LocalizedMessage.create(
                   LocalizedStrings.InternalDataSerializer_COULD_NOT_LOAD_DATASERIALIZER_CLASS_0,
                   dsClass));
@@ -1323,7 +1323,7 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
           supportedClassesToHolders.remove(clazz.getName());
         }
       } catch (ClassNotFoundException ignored) {
-        logger.info(LogMarker.SERIALIZER, LocalizedMessage.create(
+        logger.info(LogMarker.SERIALIZER_MARKER, LocalizedMessage.create(
             LocalizedStrings.InternalDataSerializer_COULD_NOT_LOAD_DATASERIALIZER_CLASS_0, name));
       }
     }
@@ -1384,7 +1384,7 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
         try {
           dsClass = getCachedClass(iterator.next().getKey());
         } catch (ClassNotFoundException ignored) {
-          logger.info(LogMarker.SERIALIZER,
+          logger.info(LogMarker.SERIALIZER_MARKER,
               LocalizedMessage.create(
                   LocalizedStrings.InternalDataSerializer_COULD_NOT_LOAD_DATASERIALIZER_CLASS_0,
                   dsClass));
@@ -1507,8 +1507,10 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
 
   public static void writeDSFID(DataSerializableFixedID o, DataOutput out) throws IOException {
     int dsfid = o.getDSFID();
-    if (dsfidToClassMap != null && logger.isTraceEnabled(LogMarker.DEBUG_DSFID)) {
-      logger.trace(LogMarker.DEBUG_DSFID, "writeDSFID {} class={}", dsfid, o.getClass());
+    if (dsfidToClassMap != null
+        && logger.isTraceEnabled(LogMarker.SERIALIZER_WRITE_DSFID_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_WRITE_DSFID_VERBOSE, "writeDSFID {} class={}", dsfid,
+          o.getClass());
       if (dsfid != DataSerializableFixedID.NO_FIXED_ID
           && dsfid != DataSerializableFixedID.ILLEGAL) {
         // consistency check to make sure that the same DSFID is not used
@@ -1516,8 +1518,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
         String newClassName = o.getClass().getName();
         String existingClassName = (String) dsfidToClassMap.putIfAbsent(dsfid, newClassName);
         if (existingClassName != null && !existingClassName.equals(newClassName)) {
-          logger.trace(LogMarker.DEBUG_DSFID, "dsfid={} is used for class {} and class {}", dsfid,
-              existingClassName, newClassName);
+          logger.trace(LogMarker.SERIALIZER_WRITE_DSFID_VERBOSE,
+              "dsfid={} is used for class {} and class {}", dsfid, existingClassName, newClassName);
         }
       }
     }
@@ -1808,8 +1810,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
       size = set.size();
     }
     writeArrayLength(size, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing HashSet with {} elements: {}", size, set);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing HashSet with {} elements: {}", size, set);
     }
     if (size > 0) {
       for (Object element : set) {
@@ -1854,8 +1856,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
         c.add(element);
       }
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read Collection with {} elements: {}", size, c);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Collection with {} elements: {}", size, c);
       }
       return true;
     }
@@ -2039,8 +2041,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
             LocalizedStrings.DataSerializer_UNKNOWN_TIMEUNIT_TYPE_0.toLocalizedString(type));
     }
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read TimeUnit: {}", unit);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read TimeUnit: {}", unit);
     }
 
     return unit;
@@ -2049,8 +2051,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
   private static void writeTimestamp(Timestamp o, DataOutput out) throws IOException {
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Timestamp: {}", o);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Timestamp: {}", o);
     }
     DataSerializer.writePrimitiveLong(o.getTime(), out);
   }
@@ -2058,8 +2060,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
   private static Timestamp readTimestamp(DataInput in) throws IOException {
     InternalDataSerializer.checkIn(in);
     Timestamp result = new Timestamp(DataSerializer.readPrimitiveLong(in));
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read Timestamp: {}", result);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Timestamp: {}", result);
     }
     return result;
   }
@@ -2067,8 +2069,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
   private static void writeUUID(java.util.UUID o, DataOutput out) throws IOException {
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing UUID: {}", o);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing UUID: {}", o);
     }
     DataSerializer.writePrimitiveLong(o.getMostSignificantBits(), out);
     DataSerializer.writePrimitiveLong(o.getLeastSignificantBits(), out);
@@ -2079,8 +2081,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
     long mb = DataSerializer.readPrimitiveLong(in);
     long lb = DataSerializer.readPrimitiveLong(in);
     UUID result = new UUID(mb, lb);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read UUID: {}", result);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read UUID: {}", result);
     }
     return result;
   }
@@ -2088,8 +2090,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
   private static void writeBigDecimal(BigDecimal o, DataOutput out) throws IOException {
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing BigDecimal: {}", o);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing BigDecimal: {}", o);
     }
     DataSerializer.writeString(o.toString(), out);
   }
@@ -2097,8 +2099,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
   private static BigDecimal readBigDecimal(DataInput in) throws IOException {
     InternalDataSerializer.checkIn(in);
     BigDecimal result = new BigDecimal(DataSerializer.readString(in));
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read BigDecimal: {}", result);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read BigDecimal: {}", result);
     }
     return result;
   }
@@ -2106,8 +2108,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
   private static void writeBigInteger(BigInteger o, DataOutput out) throws IOException {
     InternalDataSerializer.checkOut(out);
 
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing BigInteger: {}", o);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing BigInteger: {}", o);
     }
     DataSerializer.writeByteArray(o.toByteArray(), out);
   }
@@ -2115,14 +2117,15 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
   private static BigInteger readBigInteger(DataInput in) throws IOException {
     InternalDataSerializer.checkIn(in);
     BigInteger result = new BigInteger(DataSerializer.readByteArray(in));
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Read BigInteger: {}", result);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read BigInteger: {}", result);
     }
     return result;
   }
 
   private static final ConcurrentMap dsfidToClassMap =
-      logger.isTraceEnabled(LogMarker.DEBUG_DSFID) ? new ConcurrentHashMap() : null;
+      logger.isTraceEnabled(LogMarker.SERIALIZER_WRITE_DSFID_VERBOSE) ? new ConcurrentHashMap()
+          : null;
 
   public static void writeUserDataSerializableHeader(int classId, DataOutput out)
       throws IOException {
@@ -2153,8 +2156,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
       length = -1;
     }
     writeArrayLength(length, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing char array of length {}", length);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing char array of length {}", length);
     }
     if (length > 0) {
       for (int i = 0; i < length; i++) {
@@ -2176,9 +2179,9 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
       throws IOException {
     checkOut(out);
 
-    final boolean isDebugEnabled_SERIALIZER = logger.isTraceEnabled(LogMarker.SERIALIZER);
+    final boolean isDebugEnabled_SERIALIZER = logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE);
     if (isDebugEnabled_SERIALIZER) {
-      logger.trace(LogMarker.SERIALIZER, "basicWriteObject: {}", o);
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "basicWriteObject: {}", o);
     }
 
     // Handle special objects first
@@ -2202,7 +2205,7 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
           .getForPdx("PDX registry is unavailable because the Cache has been closed."), o, null);
     } else if (o instanceof DataSerializable) {
       if (isDebugEnabled_SERIALIZER) {
-        logger.trace(LogMarker.SERIALIZER, "Writing DataSerializable: {}", o);
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing DataSerializable: {}", o);
       }
       checkPdxCompatible(o, ensurePdxCompatibility);
 
@@ -2227,9 +2230,9 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
       // Nothing more to do...
     } else {
       checkPdxCompatible(o, ensurePdxCompatibility);
-      if (logger.isTraceEnabled(LogMarker.DUMP_SERIALIZED)) {
-        logger.trace(LogMarker.DUMP_SERIALIZED, "DataSerializer Serializing an instance of {}",
-            o.getClass().getName());
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_ANNOUNCE_TYPE_WRITTEN_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_ANNOUNCE_TYPE_WRITTEN_VERBOSE,
+            "DataSerializer Serializing an instance of {}", o.getClass().getName());
       }
 
       /*
@@ -2261,8 +2264,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
         GemFireCacheImpl.getForPdx("PDX registry is unavailable because the Cache has been closed.")
             .getPdxRegistry();
     int eId = tr.getEnumId(e);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "write PdxEnum id={} enum={}", eId, e);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "write PdxEnum id={} enum={}", eId, e);
     }
     writePdxEnumId(eId, out);
   }
@@ -2280,8 +2283,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
     int dsId = in.readByte();
     int tmp = readArrayLength(in);
     int enumId = dsId << 24 | tmp & 0xFFFFFF;
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "read PdxEnum id={}", enumId);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "read PdxEnum id={}", enumId);
     }
     InternalCache internalCache = GemFireCacheImpl
         .getForPdx("PDX registry is unavailable because the Cache has been closed.");
@@ -2518,8 +2521,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
       Assert.assertTrue(o instanceof DataSerializable);
       invokeFromData(o, in);
 
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Read DataSerializable {}", o);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read DataSerializable {}", o);
       }
 
       return o;
@@ -2548,8 +2551,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
 
         invokeFromData(o, in);
 
-        if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-          logger.trace(LogMarker.SERIALIZER, "Read DataSerializableFixedID {}", o);
+        if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+          logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read DataSerializableFixedID {}", o);
         }
 
         return o;
@@ -2696,8 +2699,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
   public static Object readDSFID(final DataInput in) throws IOException, ClassNotFoundException {
     checkIn(in);
     byte header = in.readByte();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "readDSFID: header={}", header);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "readDSFID: header={}", header);
     }
     if (header == DS_FIXED_ID_BYTE) {
       return DSFIDFactory.create(in.readByte(), in);
@@ -2740,34 +2743,34 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
   public static String readString(DataInput in, byte header) throws IOException {
     if (header == DSCODE.STRING_BYTES) {
       int len = in.readUnsignedShort();
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Reading STRING_BYTES of len={}", len);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Reading STRING_BYTES of len={}", len);
       }
       byte[] buf = new byte[len];
       in.readFully(buf, 0, len);
       return new String(buf, 0); // intentionally using deprecated constructor
     } else if (header == DSCODE.STRING) {
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Reading utf STRING");
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Reading utf STRING");
       }
       return in.readUTF();
     } else if (header == DSCODE.NULL_STRING) {
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Reading NULL_STRING");
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Reading NULL_STRING");
       }
       return null;
     } else if (header == DSCODE.HUGE_STRING_BYTES) {
       int len = in.readInt();
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Reading HUGE_STRING_BYTES of len={}", len);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Reading HUGE_STRING_BYTES of len={}", len);
       }
       byte[] buf = new byte[len];
       in.readFully(buf, 0, len);
       return new String(buf, 0); // intentionally using deprecated constructor
     } else if (header == DSCODE.HUGE_STRING) {
       int len = in.readInt();
-      if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-        logger.trace(LogMarker.SERIALIZER, "Reading HUGE_STRING of len={}", len);
+      if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+        logger.trace(LogMarker.SERIALIZER_VERBOSE, "Reading HUGE_STRING of len={}", len);
       }
       char[] buf = new char[len];
       for (int i = 0; i < len; i++) {
@@ -2854,8 +2857,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
 
     // Read the header byte
     byte header = in.readByte();
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "basicReadObject: header={}", header);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "basicReadObject: header={}", header);
     }
     switch (header) {
       case DS_FIXED_ID_BYTE:
@@ -2984,7 +2987,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
         return readDataSerializable(in);
 
       case SERIALIZABLE: {
-        final boolean isDebugEnabled_SERIALIZER = logger.isTraceEnabled(LogMarker.SERIALIZER);
+        final boolean isDebugEnabled_SERIALIZER =
+            logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE);
         Object serializableResult;
         if (in instanceof DSObjectInputStream) {
           serializableResult = ((DSObjectInputStream) in).readObject();
@@ -3018,11 +3022,12 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
           serializableResult = ois.readObject();
 
           if (isDebugEnabled_SERIALIZER) {
-            logger.trace(LogMarker.SERIALIZER, "Read Serializable object: {}", serializableResult);
+            logger.trace(LogMarker.SERIALIZER_VERBOSE, "Read Serializable object: {}",
+                serializableResult);
           }
         }
         if (isDebugEnabled_SERIALIZER) {
-          logger.trace(LogMarker.SERIALIZER, "deserialized instanceof {}",
+          logger.trace(LogMarker.SERIALIZER_VERBOSE, "deserialized instanceof {}",
               serializableResult.getClass());
         }
         return serializableResult;
@@ -3053,7 +3058,7 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
       throws IOException {
     Instantiator instantiator = InternalInstantiator.getInstantiator(classId);
     if (instantiator == null) {
-      logger.error(LogMarker.SERIALIZER,
+      logger.error(LogMarker.SERIALIZER_MARKER,
           LocalizedMessage.create(
               LocalizedStrings.DataSerializer_NO_INSTANTIATOR_HAS_BEEN_REGISTERED_FOR_CLASS_WITH_ID_0,
               classId));
@@ -3207,8 +3212,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
     InternalCache internalCache = GemFireCacheImpl
         .getForPdx("PDX registry is unavailable because the Cache has been closed.");
     PdxType pdxType = internalCache.getPdxRegistry().getType(typeId);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "readPdxSerializable pdxType={}", pdxType);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "readPdxSerializable pdxType={}", pdxType);
     }
     if (pdxType == null) {
       throw new IllegalStateException("Unknown pdx type=" + typeId);
@@ -3676,8 +3681,8 @@ public abstract class InternalDataSerializer extends DataSerializer implements D
       length = array.length;
     }
     InternalDataSerializer.writeArrayLength(length, out);
-    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
-      logger.trace(LogMarker.SERIALIZER, "Writing Object array of length {}", length);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing Object array of length {}", length);
     }
     if (length >= 0) {
       writeClass(array.getClass().getComponentType(), out);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/AdminWaiters.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/AdminWaiters.java
index d19885a..d358c01 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/AdminWaiters.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/AdminWaiters.java
@@ -27,7 +27,6 @@ import org.apache.geode.distributed.internal.ReplyProcessor21;
 import org.apache.geode.distributed.internal.membership.InternalDistributedMember;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.logging.LogService;
-import org.apache.geode.internal.logging.log4j.LogMarker;
 
 /**
  * Used by {@link AdminRequest} to wait for a {@link AdminResponse}. Prior to GemFire 4.0, a
@@ -42,10 +41,6 @@ import org.apache.geode.internal.logging.log4j.LogMarker;
  * class can go away.
  */
 public class AdminWaiters {
-  private static final Logger logger = LogService.getLogger();
-
-  // private static final long TIMEOUT = 10000L;
-
   /**
    * Sends <code>msg</code> using <code>dm</code> and waits for the response.
    *
@@ -74,10 +69,7 @@ public class AdminWaiters {
         if (failures != null && failures.size() > 0) { // didn't go out
           if (dm.getDistributionManagerIds().contains(msg.getRecipient())) {
             // it's still in the view
-            String s = "";
-            if (logger.isTraceEnabled(LogMarker.DM)) {
-              s += " (" + msg + ")";
-            }
+            String s = " (" + msg + ")";
             throw new RuntimeAdminException(
                 LocalizedStrings.AdminWaiters_COULD_NOT_SEND_REQUEST_0.toLocalizedString(s));
           }
@@ -103,10 +95,7 @@ public class AdminWaiters {
             throw new RuntimeAdminException(sb.toString());
           } // still here?
           // recipient vanished
-          String s = "";
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            s = " (" + msg + ")";
-          }
+          String s = " (" + msg + ")";
           throw new OperationCancelledException(
               LocalizedStrings.AdminWaiters_REQUEST_SENT_TO_0_FAILED_SINCE_MEMBER_DEPARTED_1
                   .toLocalizedString(new Object[] {msg.getRecipient(), s}));
@@ -118,17 +107,12 @@ public class AdminWaiters {
       Thread.currentThread().interrupt();
       dm.getCancelCriterion().checkCancelInProgress(ex);
       String s = LocalizedStrings.AdminWaiters_REQUEST_WAIT_WAS_INTERRUPTED.toLocalizedString();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        s += " (" + msg + ")";
-      }
+      s += " (" + msg + ")";
       throw new RuntimeAdminException(s, ex);
     }
 
     if (result == null) {
-      String s = "";
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        s += " (" + msg + ")";
-      }
+      String s = " (" + msg + ")";
       throw new OperationCancelledException(
           LocalizedStrings.AdminWaiters_REQUEST_SEND_TO_0_WAS_CANCELLED_1
               .toLocalizedString(new Object[] {msg.getRecipient(), s}));
@@ -150,7 +134,6 @@ public class AdminWaiters {
 
     if (processor == null) {
       return; // must've been cancelled
-
     } else {
       processor.process(msg);
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/AlertLevelChangeMessage.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/AlertLevelChangeMessage.java
index 3117654..7898d0b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/AlertLevelChangeMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/AlertLevelChangeMessage.java
@@ -63,8 +63,8 @@ public class AlertLevelChangeMessage extends SerialDistributionMessage {
 
     if (this.newLevel != Alert.OFF) {
       AlertAppender.getInstance().addAlertListener(this.getSender(), this.newLevel);
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "Added new AlertListener to application log writer");
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "Added new AlertListener to application log writer");
       }
     }
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGfManagerAgent.java b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGfManagerAgent.java
index bed3249..07e41fc 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGfManagerAgent.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/admin/remote/RemoteGfManagerAgent.java
@@ -854,8 +854,8 @@ class RemoteGfManagerAgent implements GfManagerAgent {
         try {
           handleJoined(member);
         } catch (OperationCancelledException ex) {
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "join cancelled by departure");
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "join cancelled by departure");
           }
         }
       }
@@ -864,16 +864,6 @@ class RemoteGfManagerAgent implements GfManagerAgent {
     } // sync
   }
 
-  // /**
-  // * Returns whether or not there are any members of the distributed
-  // * system.
-  // */
-  // private boolean membersExist() {
-  // // removed synchronized(members) {
-  // // removed synchronized (managers) {
-  // return this.members.size() > 0 || this.managers.size() > 0;
-  // }
-
   /**
    * Returns the thread group in which admin threads should run. This thread group handles uncaught
    * exceptions nicely.
@@ -1172,8 +1162,8 @@ class RemoteGfManagerAgent implements GfManagerAgent {
     }
 
     public void shutDown() {
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "JoinProcessor: shutting down");
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "JoinProcessor: shutting down");
       }
       this.shutDown = true;
       this.interrupt();
@@ -1184,14 +1174,10 @@ class RemoteGfManagerAgent implements GfManagerAgent {
     }
 
     private void resumeHandling() {
-      // if (this.shutDown) {
-      // return;
-      // }
-
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "JoinProcessor: resuming.  Is alive? {}", this.isAlive());
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "JoinProcessor: resuming.  Is alive? {}",
+            this.isAlive());
       }
-      // Assert.assertTrue(this.isAlive());
 
       // unpause if paused during a cancel...
       this.paused = false;
@@ -1220,7 +1206,6 @@ class RemoteGfManagerAgent implements GfManagerAgent {
       boolean noPendingJoins = false;
       OUTER: while (!this.shutDown) {
         SystemFailure.checkFailure();
-        // Thread.interrupted(); // clear the interrupted flag
         try {
           if (!RemoteGfManagerAgent.this.isListening()) {
             shutDown();
@@ -1233,16 +1218,15 @@ class RemoteGfManagerAgent implements GfManagerAgent {
 
           // if paused OR no pendingJoins then just wait...
           if (this.paused || noPendingJoins) {
-            if (logger.isTraceEnabled(LogMarker.DM)) {
-              logger.trace(LogMarker.DM, "JoinProcessor is about to wait...");
+            if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+              logger.trace(LogMarker.DM_VERBOSE, "JoinProcessor is about to wait...");
             }
-            // Thread.interrupted(); // clear the interrupted flag
             synchronized (this.lock) {
               this.lock.wait();
             }
           }
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "JoinProcessor has woken up...");
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "JoinProcessor has woken up...");
           }
           if (this.paused)
             continue;
@@ -1250,13 +1234,15 @@ class RemoteGfManagerAgent implements GfManagerAgent {
           // if no join was already in process or if aborted, get a new one...
           if (this.id == null) {
             List pendingJoinsRef = RemoteGfManagerAgent.this.pendingJoins;
-            if (logger.isTraceEnabled(LogMarker.DM)) {
-              logger.trace(LogMarker.DM, "JoinProcessor pendingJoins: {}", pendingJoinsRef.size());
+            if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+              logger.trace(LogMarker.DM_VERBOSE, "JoinProcessor pendingJoins: {}",
+                  pendingJoinsRef.size());
             }
             if (pendingJoinsRef.size() > 0) {
               this.id = (InternalDistributedMember) pendingJoinsRef.get(0);
-              if (logger.isTraceEnabled(LogMarker.DM)) {
-                logger.trace(LogMarker.DM, "JoinProcessor got a membership event for {}", this.id);
+              if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+                logger.trace(LogMarker.DM_VERBOSE, "JoinProcessor got a membership event for {}",
+                    this.id);
               }
             }
           }
@@ -1265,8 +1251,8 @@ class RemoteGfManagerAgent implements GfManagerAgent {
 
           // process the join...
           if (this.id != null) {
-            if (logger.isTraceEnabled(LogMarker.DM)) {
-              logger.trace(LogMarker.DM, "JoinProcessor handling join for {}", this.id);
+            if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+              logger.trace(LogMarker.DM_VERBOSE, "JoinProcessor handling join for {}", this.id);
             }
             try {
               RemoteGfManagerAgent.this.handleJoined(this.id);
@@ -1281,8 +1267,8 @@ class RemoteGfManagerAgent implements GfManagerAgent {
           break;
         } catch (InterruptedException ignore) {
           // When this thread is "paused", it is interrupted
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "JoinProcessor has been interrupted...");
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "JoinProcessor has been interrupted...");
           }
           if (shutDown) {
             break;
@@ -1303,12 +1289,11 @@ class RemoteGfManagerAgent implements GfManagerAgent {
             noPendingJoins = false;
             continue;
           }
-          // logger.warning("Unexpected thread interrupt", ignore);
           break; // Panic!
 
         } catch (OperationCancelledException ex) {
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "join cancelled by departure");
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "join cancelled by departure");
           }
           continue;
 
@@ -1326,8 +1311,8 @@ class RemoteGfManagerAgent implements GfManagerAgent {
           SystemFailure.checkFailure();
           for (Throwable cause = e.getCause(); cause != null; cause = cause.getCause()) {
             if (cause instanceof InterruptedException) {
-              if (logger.isTraceEnabled(LogMarker.DM)) {
-                logger.trace(LogMarker.DM, "JoinProcessor has been interrupted...");
+              if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+                logger.trace(LogMarker.DM_VERBOSE, "JoinProcessor has been interrupted...");
               }
               continue OUTER;
             }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractBucketRegionQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractBucketRegionQueue.java
index d6822da..32dfb80 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractBucketRegionQueue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractBucketRegionQueue.java
@@ -14,14 +14,24 @@
  */
 package org.apache.geode.internal.cache;
 
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.logging.log4j.Logger;
 
-import org.apache.geode.cache.*;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.EntryNotFoundException;
+import org.apache.geode.cache.Operation;
+import org.apache.geode.cache.RegionAttributes;
+import org.apache.geode.cache.RegionDestroyedException;
+import org.apache.geode.cache.TimeoutException;
 import org.apache.geode.distributed.internal.DistributionConfig;
 import org.apache.geode.internal.cache.versions.RegionVersionVector;
 import org.apache.geode.internal.cache.versions.VersionSource;
@@ -171,26 +181,6 @@ public abstract class AbstractBucketRegionQueue extends BucketRegion {
     return initializationLock;
   }
 
-  /**
-   * Does a get that attempts to not fault values in from disk or make the entry the most recent in
-   * the LRU.
-   */
-  /*
-   * protected Object optimalGet(Object k) { // Get the object at that key (to remove the index).
-   * Object object = null; try { object = getValueInVM(k); // OFFHEAP deserialize if (object ==
-   * null) { // must be on disk // fault it in w/o putting it back in the region object =
-   * getValueOnDiskOrBuffer(k); if (object == null) { // try memory one more time in case it was
-   * already faulted back in object = getValueInVM(k); // OFFHEAP deserialize if (object == null) {
-   * // if we get this far give up and just do a get object = get(k); } else { if (object instanceof
-   * CachedDeserializable) { object = ((CachedDeserializable)object).getDeserializedValue( this,
-   * this.getRegionEntry(k)); } } } } else { if (object instanceof CachedDeserializable) { object =
-   * ((CachedDeserializable)object).getDeserializedValue(this, this.getRegionEntry(k)); } } } catch
-   * (EntryNotFoundException ok) { // just return null; } if (object == Token.TOMBSTONE) { object =
-   * null; }
-   *
-   * return object; }
-   */
-
   public void destroyKey(Object key) throws ForceReattemptException {
     if (logger.isDebugEnabled()) {
       logger.debug(" destroying primary key {}", key);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractDiskRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractDiskRegion.java
index 1d7a79f..2ad305f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractDiskRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractDiskRegion.java
@@ -539,8 +539,8 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
   @Override
   public void memberOffline(PersistentMemberID persistentID) {
     this.ds.memberOffline(this, persistentID);
-    if (logger.isDebugEnabled()) {
-      logger.trace(LogMarker.PERSIST, "PersistentView {} - {} - member offline {}",
+    if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - member offline {}",
           getDiskStoreID().abbrev(), this.getName(), persistentID);
     }
   }
@@ -548,17 +548,18 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
   @Override
   public void memberOfflineAndEqual(PersistentMemberID persistentID) {
     this.ds.memberOfflineAndEqual(this, persistentID);
-    if (logger.isDebugEnabled()) {
-      logger.trace(LogMarker.PERSIST, "PersistentView {} - {} - member offline and equal {}",
-          getDiskStoreID().abbrev(), this.getName(), persistentID);
+    if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_VERBOSE,
+          "PersistentView {} - {} - member offline and equal {}", getDiskStoreID().abbrev(),
+          this.getName(), persistentID);
     }
   }
 
   @Override
   public void memberOnline(PersistentMemberID persistentID) {
     this.ds.memberOnline(this, persistentID);
-    if (logger.isDebugEnabled()) {
-      logger.trace(LogMarker.PERSIST, "PersistentView {} - {} - member online {}",
+    if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - member online {}",
           getDiskStoreID().abbrev(), this.getName(), persistentID);
     }
   }
@@ -566,8 +567,8 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
   @Override
   public void memberRemoved(PersistentMemberID persistentID) {
     this.ds.memberRemoved(this, persistentID);
-    if (logger.isDebugEnabled()) {
-      logger.trace(LogMarker.PERSIST, "PersistentView {} - {} - member removed {}",
+    if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - member removed {}",
           getDiskStoreID().abbrev(), this.getName(), persistentID);
     }
   }
@@ -575,8 +576,8 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
   @Override
   public void memberRevoked(PersistentMemberPattern revokedPattern) {
     this.ds.memberRevoked(revokedPattern);
-    if (logger.isDebugEnabled()) {
-      logger.trace(LogMarker.PERSIST, "PersistentView {} - {} - member revoked {}",
+    if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - member revoked {}",
           getDiskStoreID().abbrev(), this.getName(), revokedPattern);
     }
   }
@@ -584,8 +585,8 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
   @Override
   public void setInitializing(PersistentMemberID newId) {
     this.ds.setInitializing(this, newId);
-    if (logger.isDebugEnabled()) {
-      logger.trace(LogMarker.PERSIST, "PersistentView {} - {} - initializing local id: {}",
+    if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - initializing local id: {}",
           getDiskStoreID().abbrev(), this.getName(), getMyInitializingID());
     }
   }
@@ -593,8 +594,8 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
   @Override
   public void setInitialized() {
     this.ds.setInitialized(this);
-    if (logger.isDebugEnabled()) {
-      logger.trace(LogMarker.PERSIST, "PersistentView {} - {} - initialized local id: {}",
+    if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - initialized local id: {}",
           getDiskStoreID().abbrev(), this.getName(), getMyPersistentID());
     }
   }
@@ -644,14 +645,15 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
 
     if (aboutToDestroyDataStorage) {
       ds.endDestroyDataStorage(region, (DiskRegion) this);
-      if (logger.isDebugEnabled()) {
-        logger.trace(LogMarker.PERSIST, "PersistentView {} - {} - endDestroyDataStorage: {}",
-            getDiskStoreID().abbrev(), this.getName(), getMyPersistentID());
+      if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_VERBOSE,
+            "PersistentView {} - {} - endDestroyDataStorage: {}", getDiskStoreID().abbrev(),
+            this.getName(), getMyPersistentID());
       }
     } else {
       ds.endDestroyRegion(region, (DiskRegion) this);
-      if (logger.isDebugEnabled()) {
-        logger.trace(LogMarker.PERSIST, "PersistentView {} - {} - endDestroy: {}",
+      if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - endDestroy: {}",
             getDiskStoreID().abbrev(), this.getName(), getMyPersistentID());
       }
     }
@@ -663,8 +665,8 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
   @Override
   public void beginDestroy(LocalRegion region) {
     beginDestroyRegion(region);
-    if (logger.isDebugEnabled()) {
-      logger.trace(LogMarker.PERSIST, "PersistentView {} - {} - beginDestroy: {}",
+    if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_VERBOSE, "PersistentView {} - {} - beginDestroy: {}",
           getDiskStoreID().abbrev(), this.getName(), getMyPersistentID());
     }
     if (this.myInitializedId == null) {
@@ -679,9 +681,10 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
   @Override
   public void beginDestroyDataStorage() {
     this.ds.beginDestroyDataStorage((DiskRegion) this);
-    if (logger.isDebugEnabled()) {
-      logger.trace(LogMarker.PERSIST, "PersistentView {} - {} - beginDestroyDataStorage: {}",
-          getDiskStoreID().abbrev(), this.getName(), getMyPersistentID());
+    if (logger.isTraceEnabled(LogMarker.PERSIST_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_VERBOSE,
+          "PersistentView {} - {} - beginDestroyDataStorage: {}", getDiskStoreID().abbrev(),
+          this.getName(), getMyPersistentID());
     }
   }
 
@@ -878,7 +881,7 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
 
   public void dump(PrintStream printStream) {
     String name = getName();
-    if (isBucket() && !logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
+    if (isBucket() && !logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
       name = getPrName();
     }
     String msg = name + ":" + " -lru=" + getEvictionAttributes().getAlgorithm();
@@ -892,7 +895,7 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
         + getInitialCapacity() + " -loadFactor=" + getLoadFactor() + " -offHeap=" + getOffHeap()
         + " -compressor=" + (getCompressorClassName() == null ? "none" : getCompressorClassName())
         + " -statisticsEnabled=" + getStatisticsEnabled();
-    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
       msg += " drId=" + getId() + " isBucket=" + isBucket() + " clearEntryId="
           + getClearOplogEntryId() + " MyInitializingID=<" + getMyInitializingID() + ">"
           + " MyPersistentID=<" + getMyPersistentID() + ">" + " onlineMembers=" + getOnlineMembers()
@@ -906,7 +909,7 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
     final String lineSeparator = System.getProperty("line.separator");
     StringBuffer sb = new StringBuffer();
     String name = getName();
-    if (isBucket() && logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
+    if (isBucket() && logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
       name = getPrName();
     }
     String msg = name + ":" + " -lru=" + getEvictionAttributes().getAlgorithm();
@@ -940,7 +943,7 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
     sb.append("-statisticsEnabled=" + getStatisticsEnabled());
     sb.append(lineSeparator);
 
-    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
       sb.append("drId=" + getId());
       sb.append(lineSeparator);
       sb.append("isBucket=" + isBucket());
@@ -965,10 +968,6 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
 
   public void dumpMetadata() {
     String name = getName();
-    // TODO: make this a flag
-    // if (isBucket() && !DiskStoreImpl.TRACE_RECOVERY) {
-    // name = getPrName();
-    // }
 
     StringBuilder msg = new StringBuilder(name);
 
@@ -981,8 +980,6 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
 
   /**
    * Dump the (bucket specific) persistent view to the string builder
-   *
-   * @param msg
    */
   public void dumpPersistentView(StringBuilder msg) {
     msg.append("\n\tMyInitializingID=<").append(getMyInitializingID()).append(">");
@@ -1006,8 +1003,6 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
 
   /**
    * Dump the attributes which are common across the PR to the string builder.
-   *
-   * @param msg
    */
   public void dumpCommonAttributes(StringBuilder msg) {
     msg.append("\n\tlru=").append(getEvictionAttributes().getAlgorithm());
@@ -1056,9 +1051,6 @@ public abstract class AbstractDiskRegion implements DiskRegionView {
   }
 
   public void recordRecoveredGCVersion(VersionSource member, long gcVersion) {
-    // TODO - RVV - I'm not sure about this recordGCVersion method. It seems
-    // like it's not doing the right thing if the current member is the member
-    // we just recovered.
     this.versionVector.recordGCVersion(member, gcVersion);
 
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
index 08eacf8..3dd3cdf 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegionMap.java
@@ -2212,8 +2212,9 @@ public abstract class AbstractRegionMap
       // "fix" for bug 32440
       Assert.assertTrue(false, "The owner for RegionMap " + this + " is null for event " + event);
     }
-    if (logger.isTraceEnabled(LogMarker.LRU_TOMBSTONE_COUNT) && !(owner instanceof HARegion)) {
-      logger.trace(LogMarker.LRU_TOMBSTONE_COUNT,
+    if (logger.isTraceEnabled(LogMarker.LRU_TOMBSTONE_COUNT_VERBOSE)
+        && !(owner instanceof HARegion)) {
+      logger.trace(LogMarker.LRU_TOMBSTONE_COUNT_VERBOSE,
           "ARM.basicPut called for {} expectedOldValue={} requireOldValue={} ifNew={} ifOld={} initialized={} overwriteDestroyed={}",
           event, expectedOldValue, requireOldValue, ifNew, ifOld, owner.isInitialized(),
           overwriteDestroyed);
@@ -3320,23 +3321,23 @@ public abstract class AbstractRegionMap
       synchronized (re) {
         int entryVersion = re.getVersionStamp().getEntryVersion();
         if (!re.isTombstone() || entryVersion > destroyedVersion) {
-          if (logger.isTraceEnabled(LogMarker.TOMBSTONE_COUNT)) {
-            logger.trace(LogMarker.TOMBSTONE_COUNT,
+          if (logger.isTraceEnabled(LogMarker.TOMBSTONE_COUNT_VERBOSE)) {
+            logger.trace(LogMarker.TOMBSTONE_COUNT_VERBOSE,
                 "tombstone for {} was resurrected with v{}; destroyed version was v{}; count is {}; entryMap size is {}",
                 re.getKey(), re.getVersionStamp().getEntryVersion(), destroyedVersion,
                 this._getOwner().getTombstoneCount(), size());
           }
         } else {
-          if (logger.isTraceEnabled(LogMarker.TOMBSTONE_COUNT)) {
+          if (logger.isTraceEnabled(LogMarker.TOMBSTONE_COUNT_VERBOSE)) {
             if (entryVersion == destroyedVersion) {
               // logging this can put tremendous pressure on the log writer in tests
               // that "wait for silence"
-              logger.trace(LogMarker.TOMBSTONE_COUNT,
+              logger.trace(LogMarker.TOMBSTONE_COUNT_VERBOSE,
                   "removing tombstone for {} with v{} rv{}; count is {}", re.getKey(),
                   destroyedVersion, version.getRegionVersion(),
                   (this._getOwner().getTombstoneCount() - 1));
             } else {
-              logger.trace(LogMarker.TOMBSTONE_COUNT,
+              logger.trace(LogMarker.TOMBSTONE_COUNT_VERBOSE,
                   "removing entry (v{}) that is older than an expiring tombstone (v{} rv{}) for {}",
                   entryVersion, destroyedVersion, version.getRegionVersion(), re.getKey());
             }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
index 074a60d..bfe7472 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java
@@ -41,7 +41,6 @@ import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.CancelException;
 import org.apache.geode.DataSerializer;
-import org.apache.geode.cache.CacheClosedException;
 import org.apache.geode.cache.RegionDestroyedException;
 import org.apache.geode.cache.client.internal.locator.SerializationHelper;
 import org.apache.geode.cache.partition.PartitionListener;
@@ -528,16 +527,17 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
     // Only hosting buckets will be initializing, the isInitializing boolean is to
     // allow for early entry into the advisor for GII purposes
     if (!bp.isHosting && !bp.isInitializing) {
-      if (logger.isTraceEnabled(LogMarker.DA)) {
-        logger.trace(LogMarker.DA, "BucketAdvisor#putProfile early out");
+      if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE)) {
+        logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE, "BucketAdvisor#putProfile early out");
       }
       return false; // Do not allow introduction of proxy profiles, they don't provide anything
                     // useful
       // isHosting = false, isInitializing = false
     }
-    if (logger.isTraceEnabled(LogMarker.DA)) {
-      logger.trace(LogMarker.DA, "BucketAdvisor#putProfile profile=<{}> force={}; profile = {}",
-          profile, forceProfile, bp);
+    if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE)) {
+      logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE,
+          "BucketAdvisor#putProfile profile=<{}> force={}; profile = {}", profile, forceProfile,
+          bp);
     }
     // isHosting = false, isInitializing = true
     // isHosting = true, isInitializing = false
@@ -552,8 +552,7 @@ public class BucketAdvisor extends CacheDistributionAdvisor {
       applied = super.putProfile(profile, forceProfile);
       // skip following block if isPrimary to avoid race where we process late
       // arriving OTHER_PRIMARY profile after we've already become primary
-      if (applied && !isPrimary()) { // TODO is it safe to change the bucket state if the profile
-                                     // was not applied? -- mthomas 2/13/08
+      if (applied && !isPrimary()) {
         if (bp.isPrimary) {
           setPrimaryMember(bp.getDistributedMember());
         } else {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketPersistenceAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketPersistenceAdvisor.java
index 1760e02..42d758d 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketPersistenceAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketPersistenceAdvisor.java
@@ -25,7 +25,6 @@ import java.util.concurrent.CountDownLatch;
 
 import org.apache.logging.log4j.Logger;
 
-import org.apache.geode.cache.CacheClosedException;
 import org.apache.geode.cache.persistence.PartitionOfflineException;
 import org.apache.geode.distributed.DistributedLockService;
 import org.apache.geode.distributed.internal.ReplyException;
@@ -128,13 +127,13 @@ public class BucketPersistenceAdvisor extends PersistenceAdvisorImpl {
   protected void logWaitingForMember(Set<PersistentMemberID> allMembersToWaitFor,
       Set<PersistentMemberID> offlineMembersToWaitFor) {
     // We only log the bucket level information at fine level.
-    if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR)) {
+    if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR_VERBOSE)) {
       Set<String> membersToWaitForPrettyFormat = new HashSet<String>();
 
       if (offlineMembersToWaitFor != null && !offlineMembersToWaitFor.isEmpty()) {
         TransformUtils.transform(offlineMembersToWaitFor, membersToWaitForPrettyFormat,
             TransformUtils.persistentMemberIdToLogEntryTransformer);
-        logger.info(LogMarker.PERSIST_ADVISOR, LocalizedMessage.create(
+        logger.debug(LogMarker.PERSIST_ADVISOR_VERBOSE, LocalizedMessage.create(
             LocalizedStrings.BucketPersistenceAdvisor_WAITING_FOR_LATEST_MEMBER,
             new Object[] {proxyBucket.getPartitionedRegion().getFullPath(),
                 proxyBucket.getBucketId(),
@@ -144,10 +143,10 @@ public class BucketPersistenceAdvisor extends PersistenceAdvisorImpl {
         TransformUtils.transform(allMembersToWaitFor, membersToWaitForPrettyFormat,
             TransformUtils.persistentMemberIdToLogEntryTransformer);
         if (logger.isDebugEnabled()) {
-          logger.debug(LogMarker.PERSIST_ADVISOR,
+          logger.debug(LogMarker.PERSIST_ADVISOR_VERBOSE,
               "All persistent members being waited on are online, but they have not yet initialized");
         }
-        logger.info(LogMarker.PERSIST_ADVISOR, LocalizedMessage.create(
+        logger.debug(LogMarker.PERSIST_ADVISOR_VERBOSE, LocalizedMessage.create(
             LocalizedStrings.BucketPersistenceAdvisor_WAITING_FOR_LATEST_MEMBER,
             new Object[] {proxyBucket.getPartitionedRegion().getFullPath(),
                 proxyBucket.getBucketId(),
@@ -167,8 +166,8 @@ public class BucketPersistenceAdvisor extends PersistenceAdvisorImpl {
       // the delock.
       if (!proxyBucket.hasPersistentChildRegion()
           && !proxyBucket.checkBucketRedundancyBeforeGrab(null, false)) {
-        if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR)) {
-          logger.debug(LogMarker.PERSIST_ADVISOR,
+        if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR_VERBOSE)) {
+          logger.debug(LogMarker.PERSIST_ADVISOR_VERBOSE,
               "{}-{}: After reacquiring dlock, we detected that redundancy is already satisfied",
               shortDiskStoreId(), regionPath);
         }
@@ -406,8 +405,8 @@ public class BucketPersistenceAdvisor extends PersistenceAdvisorImpl {
   @Override
   public void setInitializing(PersistentMemberID newId) {
     if (atomicCreation) {
-      if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR)) {
-        logger.debug(LogMarker.PERSIST_ADVISOR,
+      if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR_VERBOSE)) {
+        logger.debug(LogMarker.PERSIST_ADVISOR_VERBOSE,
             "{}-{}: {} Deferring setInitializing until the EndBucketCreation phase for {}",
             shortDiskStoreId(), regionPath, regionPath, newId);
       }
@@ -428,8 +427,8 @@ public class BucketPersistenceAdvisor extends PersistenceAdvisorImpl {
     // is called, we will pass the "wasAtomicCreation" flag down to the super
     // class to ensure that it knows its coming online as part of an atomic creation.
     if (this.atomicCreation) {
-      if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR)) {
-        logger.debug(LogMarker.PERSIST_ADVISOR,
+      if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR_VERBOSE)) {
+        logger.debug(LogMarker.PERSIST_ADVISOR_VERBOSE,
             "{}-{}: {} Deferring setOnline until the EndBucketCreation phase for {}",
             shortDiskStoreId(), regionPath, regionPath, newId);
       }
@@ -445,16 +444,16 @@ public class BucketPersistenceAdvisor extends PersistenceAdvisorImpl {
   public void endBucketCreation(PersistentMemberID newId) {
     synchronized (lock) {
       if (!atomicCreation) {
-        if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR)) {
-          logger.debug(LogMarker.PERSIST_ADVISOR,
+        if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR_VERBOSE)) {
+          logger.debug(LogMarker.PERSIST_ADVISOR_VERBOSE,
               "{}-{}: {} In endBucketCreation - already online, skipping (possible concurrent endBucketCreation)",
               shortDiskStoreId(), regionPath, regionPath);
         }
         return;
       }
 
-      if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR)) {
-        logger.debug(LogMarker.PERSIST_ADVISOR,
+      if (logger.isDebugEnabled(LogMarker.PERSIST_ADVISOR_VERBOSE)) {
+        logger.debug(LogMarker.PERSIST_ADVISOR_VERBOSE,
             "{}-{}: {} In endBucketCreation - now persisting the id {}", shortDiskStoreId(),
             regionPath, regionPath, newId);
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
index 2909537..c5b8327 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java
@@ -17,14 +17,37 @@ package org.apache.geode.internal.cache;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.io.InputStream;
-import java.util.*;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Lock;
 
 import org.apache.logging.log4j.Logger;
 
-import org.apache.geode.*;
-import org.apache.geode.cache.*;
+import org.apache.geode.CancelException;
+import org.apache.geode.CopyHelper;
+import org.apache.geode.DataSerializer;
+import org.apache.geode.DeltaSerializationException;
+import org.apache.geode.InternalGemFireError;
+import org.apache.geode.InvalidDeltaException;
+import org.apache.geode.SystemFailure;
+import org.apache.geode.cache.CacheException;
+import org.apache.geode.cache.CacheWriter;
+import org.apache.geode.cache.CacheWriterException;
+import org.apache.geode.cache.DiskAccessException;
+import org.apache.geode.cache.EntryNotFoundException;
+import org.apache.geode.cache.EvictionAction;
+import org.apache.geode.cache.EvictionAlgorithm;
+import org.apache.geode.cache.EvictionAttributes;
+import org.apache.geode.cache.ExpirationAction;
+import org.apache.geode.cache.Operation;
+import org.apache.geode.cache.RegionAttributes;
+import org.apache.geode.cache.RegionDestroyedException;
+import org.apache.geode.cache.TimeoutException;
 import org.apache.geode.cache.partition.PartitionListener;
 import org.apache.geode.distributed.DistributedMember;
 import org.apache.geode.distributed.DistributedSystem;
@@ -518,9 +541,9 @@ public class BucketRegion extends DistributedRegion implements Bucket {
       // to members with bucket copies that may not have seen the event. Their
       // EventTrackers will keep them from applying the event a second time if
       // they've already seen it.
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "BR.virtualPut: this cache has already seen this event {}",
-            event);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
+            "BR.virtualPut: this cache has already seen this event {}", event);
       }
       if (!getConcurrencyChecksEnabled() || event.hasValidVersionTag()) {
         distributeUpdateOperation(event, lastModified);
@@ -909,8 +932,8 @@ public class BucketRegion extends DistributedRegion implements Bucket {
         }
         return;
       } else {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM,
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE,
               "LR.basicInvalidate: this cache has already seen this event {}", event);
         }
         if (!getConcurrencyChecksEnabled() || event.hasValidVersionTag()) {
@@ -1196,9 +1219,9 @@ public class BucketRegion extends DistributedRegion implements Bucket {
     DestroyOperation op = null;
 
     try {
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "BR.basicDestroy: this cache has already seen this event {}",
-            event);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
+            "BR.basicDestroy: this cache has already seen this event {}", event);
       }
       if (!event.isOriginRemote() && getBucketAdvisor().isPrimary()) {
         if (event.isBulkOpInProgress()) {
@@ -1324,8 +1347,8 @@ public class BucketRegion extends DistributedRegion implements Bucket {
         if (!hasSeenEvent(event)) {
           this.entries.updateEntryVersion(event);
         } else {
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM,
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE,
                 "BR.basicUpdateEntryVersion: this cache has already seen this event {}", event);
           }
         }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisor.java
index 7e4910d..d190e8c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisor.java
@@ -370,7 +370,7 @@ public class CacheDistributionAdvisor extends DistributionAdvisor {
       boolean persistent) {
     initializationGate();
 
-    if (logger.isTraceEnabled(LogMarker.DA)) {
+    if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE)) {
       dumpProfiles("AdviseInitialImage");
     }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java
index 838fa83..fb1b3ff 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DestroyPartitionedRegionMessage.java
@@ -142,27 +142,23 @@ public class DestroyPartitionedRegionMessage extends PartitionMessage {
       if (ok) {
         RegionAdvisor ra = r.getRegionAdvisor();
         ra.removeIdAndBuckets(this.sender, this.prSerial, this.bucketSerials, !this.op.isClose());
-        // r.getRegionAdvisor().removeId(this.sender);
       }
 
       sendReply(getSender(), getProcessorId(), dm, null, r, startTime);
-      /*
-       * } finally { isClosingWriteLock.unlock(); }
-       */
       return false;
     }
 
     // If region's isDestroyed flag is true, we can check if local destroy is done or not and if
-    // NOT,
-    // we can invoke destroyPartitionedRegionLocally method.
+    // NOT, we can invoke destroyPartitionedRegionLocally method.
     if (r.isDestroyed()) {
       boolean isClose = this.op.isClose();
       r.destroyPartitionedRegionLocally(!isClose);
       return true;
     }
 
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.trace(LogMarker.DM, "{} operateOnRegion: {}", getClass().getName(), r.getFullPath());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "{} operateOnRegion: {}", getClass().getName(),
+          r.getFullPath());
     }
     RegionEventImpl event = new RegionEventImpl(r, this.op, this.cbArg, true, r.getMyId());
     r.basicDestroyRegion(event, false, false, true);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskInitFile.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskInitFile.java
index 06514e2..9a6137d 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskInitFile.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskInitFile.java
@@ -55,7 +55,6 @@ import org.apache.geode.CancelCriterion;
 import org.apache.geode.CancelException;
 import org.apache.geode.DataSerializer;
 import org.apache.geode.Instantiator;
-import org.apache.geode.InternalGemFireException;
 import org.apache.geode.cache.DiskAccessException;
 import org.apache.geode.cache.EvictionAction;
 import org.apache.geode.cache.EvictionAlgorithm;
@@ -446,8 +445,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
 
         this.gotEOF = parser.gotEOF();
         this.nextSeekPosition = dis.getCount();
-        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-          logger.trace(LogMarker.PERSIST_RECOVERY, "liveRecordCount={} totalRecordCount={}",
+        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+          logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "liveRecordCount={} totalRecordCount={}",
               this.ifLiveRecordCount, this.ifTotalRecordCount);
         }
       } finally {
@@ -583,8 +582,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
       this.ifLiveRecordCount++;
       this.ifTotalRecordCount++;
     } else {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "bad disk region id!");
       } else {
         throw new IllegalStateException("bad disk region id");
       }
@@ -637,8 +636,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
       this.ifLiveRecordCount--;
       this.ifTotalRecordCount++;
     } else {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "bad disk region id!");
       } else {
         throw new IllegalStateException("bad disk region id");
       }
@@ -655,8 +654,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
       this.ifLiveRecordCount++;
       this.ifTotalRecordCount++;
     } else {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "bad disk region id!");
       } else {
         throw new IllegalStateException("bad disk region id");
       }
@@ -681,8 +680,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
       this.ifLiveRecordCount++;
       this.ifTotalRecordCount++;
     } else {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "bad disk region id!");
       } else {
         throw new IllegalStateException("bad disk region id");
       }
@@ -699,8 +698,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
       this.ifLiveRecordCount++;
       this.ifTotalRecordCount++;
     } else {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "bad disk region id!");
       } else {
         throw new IllegalStateException("bad disk region id");
       }
@@ -1146,8 +1145,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
       }
       this.ifTotalRecordCount++;
     } else {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "bad disk region id!");
       } else {
         throw new IllegalStateException("bad disk region id");
       }
@@ -1162,8 +1161,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
     if (dr != null) {
       dr.markInitialized();
     } else {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "bad disk region id!");
       } else {
         throw new IllegalStateException("bad disk region id");
       }
@@ -1176,8 +1175,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
     if (dr != null) {
       dr.markBeginDestroyRegion();
     } else {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "bad disk region id!");
       } else {
         throw new IllegalStateException("bad disk region id");
       }
@@ -1216,8 +1215,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
 
       dr.markEndDestroyRegion();
     } else {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "bad disk region id!");
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "bad disk region id!");
       } else {
         throw new IllegalStateException("bad disk region id");
       }
@@ -1375,18 +1374,13 @@ public class DiskInitFile implements DiskInitFileInterpreter {
 
   private void writeIFRecord(ByteBuffer bb, boolean doStats) throws IOException {
     assert lock.isHeldByCurrentThread();
-    // TODO soplog - this behavior isn't right.
-    // it should throw an exception or something.
     if (this.closed) {
       throw new DiskAccessException("The disk store is closed", parent);
     }
-    // if (this.closed) {
-    // throw new DiskAccessException("Init file is closed!", parent);
-    // }
 
     this.ifRAF.write(bb.array(), 0, bb.position());
-    if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
-      logger.trace(LogMarker.PERSIST_WRITES, "DiskInitFile writeIFRecord bb[0] = {}",
+    if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_WRITES_VERBOSE, "DiskInitFile writeIFRecord bb[0] = {}",
           bb.array()[0]);
     }
     if (doStats) {
@@ -1402,8 +1396,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
       throw new DiskAccessException("The disk store is closed", parent);
     }
     hdos.sendTo(this.ifRAF);
-    if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
-      logger.trace(LogMarker.PERSIST_WRITES, "DiskInitFile writeIFRecord HDOS");
+    if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_WRITES_VERBOSE, "DiskInitFile writeIFRecord HDOS");
     }
     if (doStats) {
       this.ifLiveRecordCount++;
@@ -2048,8 +2042,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
         cmnEndDestroyRegion(dr);
         writeIFRecord(IFREC_END_DESTROY_REGION_ID, dr);
         if (logger.isDebugEnabled()) {
-          logger.trace(LogMarker.PERSIST_WRITES, "DiskInitFile IFREC_END_DESTROY_REGION_ID drId={}",
-              dr.getId());
+          logger.trace(LogMarker.PERSIST_WRITES_VERBOSE,
+              "DiskInitFile IFREC_END_DESTROY_REGION_ID drId={}", dr.getId());
         }
       }
     } finally {
@@ -2431,8 +2425,8 @@ public class DiskInitFile implements DiskInitFileInterpreter {
 
   @Override
   public void cmnDiskStoreID(DiskStoreID diskStoreID) {
-    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-      logger.trace(LogMarker.PERSIST_RECOVERY, "diskStoreId={}", diskStoreID);
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "diskStoreId={}", diskStoreID);
     }
     this.parent.setDiskStoreID(diskStoreID);
   }
@@ -2450,7 +2444,7 @@ public class DiskInitFile implements DiskInitFileInterpreter {
   }
 
   public void dump() {
-    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
       System.out.println("expectedCrfs=" + Arrays.toString(this.crfIds.toArray()));
       System.out.println("expectedDrfs=" + Arrays.toString(this.drfIds.toArray()));
       System.out.println("dataSerializerIds=" + Arrays.toString(this.dsIds.toArray()));
@@ -2460,8 +2454,6 @@ public class DiskInitFile implements DiskInitFileInterpreter {
 
   /**
    * Returns a map of region_name->(pr_buckets|replicated_region)
-   *
-   * @param regName
    */
   private Map<String, List<PlaceHolderDiskRegion>> getRegionsToDump(String regName) {
     if (regName == null) {
@@ -2515,7 +2507,7 @@ public class DiskInitFile implements DiskInitFileInterpreter {
         .entrySet()) {
       printStream.print("  ");
       List<PlaceHolderDiskRegion> regions = regionEntry.getValue();
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
         for (PlaceHolderDiskRegion region : regions) {
           region.dump(printStream);
         }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreMonitor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreMonitor.java
index 55fcc5a..241e3b0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreMonitor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreMonitor.java
@@ -105,12 +105,12 @@ public class DiskStoreMonitor {
     disks = new ConcurrentHashMap<DiskStoreImpl, Set<DirectoryHolderUsage>>();
     logDisk = new LogUsage(getLogDir(logFile));
 
-    if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR)) {
-      logger.trace(LogMarker.DISK_STORE_MONITOR, "Disk monitoring is {}",
+    if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR_VERBOSE)) {
+      logger.trace(LogMarker.DISK_STORE_MONITOR_VERBOSE, "Disk monitoring is {}",
           (disableMonitor ? "disabled" : "enabled"));
-      logger.trace(LogMarker.DISK_STORE_MONITOR, "Log directory usage warning is set to {}%",
-          LOG_WARNING_THRESHOLD_PCT);
-      logger.trace(LogMarker.DISK_STORE_MONITOR, "Scheduling disk usage checks every {} ms",
+      logger.trace(LogMarker.DISK_STORE_MONITOR_VERBOSE,
+          "Log directory usage warning is set to {}%", LOG_WARNING_THRESHOLD_PCT);
+      logger.trace(LogMarker.DISK_STORE_MONITOR_VERBOSE, "Scheduling disk usage checks every {} ms",
           USAGE_CHECK_INTERVAL);
     }
 
@@ -147,8 +147,9 @@ public class DiskStoreMonitor {
   }
 
   public void addDiskStore(DiskStoreImpl ds) {
-    if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR)) {
-      logger.trace(LogMarker.DISK_STORE_MONITOR, "Now monitoring disk store {}", ds.getName());
+    if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR_VERBOSE)) {
+      logger.trace(LogMarker.DISK_STORE_MONITOR_VERBOSE, "Now monitoring disk store {}",
+          ds.getName());
     }
 
     Set<DirectoryHolderUsage> du = new HashSet<DirectoryHolderUsage>();
@@ -159,8 +160,8 @@ public class DiskStoreMonitor {
   }
 
   public void removeDiskStore(DiskStoreImpl ds) {
-    if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR)) {
-      logger.trace(LogMarker.DISK_STORE_MONITOR, "No longer monitoring disk store {}",
+    if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR_VERBOSE)) {
+      logger.trace(LogMarker.DISK_STORE_MONITOR_VERBOSE, "No longer monitoring disk store {}",
           ds.getName());
     }
 
@@ -183,22 +184,14 @@ public class DiskStoreMonitor {
 
   public void close() {
     // only shutdown if we're not waiting for the critical disk to return to normal
-    if (exec != null /* && criticalDisk == null */) {
+    if (exec != null) {
       exec.shutdownNow();
     }
     disks.clear();
   }
 
   private void checkUsage() {
-    // // 1) Check critical disk if needed
-    // if (criticalDisk != null) {
-    // criticalDisk.update(
-    // criticalDisk.disk.getDiskUsageWarningPercentage(),
-    // criticalDisk.disk.getDiskUsageCriticalPercentage());
-    // return;
-    // }
-
-    // 2) Check disk stores / dirs
+    // Check disk stores / dirs
     for (Entry<DiskStoreImpl, Set<DirectoryHolderUsage>> entry : disks.entrySet()) {
       DiskStoreImpl ds = entry.getKey();
       for (DiskUsage du : entry.getValue()) {
@@ -210,7 +203,7 @@ public class DiskStoreMonitor {
       }
     }
 
-    // 3) Check log dir
+    // Check log dir
     logDisk.update(LOG_WARNING_THRESHOLD_PCT, 100);
   }
 
@@ -250,17 +243,17 @@ public class DiskStoreMonitor {
       }
 
       if (!dir().exists()) {
-        if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR)) {
-          logger.trace(LogMarker.DISK_STORE_MONITOR, "Skipping check of non-existent directory {}",
-              dir().getAbsolutePath());
+        if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR_VERBOSE)) {
+          logger.trace(LogMarker.DISK_STORE_MONITOR_VERBOSE,
+              "Skipping check of non-existent directory {}", dir().getAbsolutePath());
         }
         return current;
       }
 
       long minMegabytes = getMinimumSpace();
       final long minBytes = minMegabytes * 1024 * 1024;
-      if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR)) {
-        logger.trace(LogMarker.DISK_STORE_MONITOR,
+      if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR_VERBOSE)) {
+        logger.trace(LogMarker.DISK_STORE_MONITOR_VERBOSE,
             "Checking usage for directory {}, minimum free space is {} MB", dir().getAbsolutePath(),
             minMegabytes);
       }
@@ -274,8 +267,8 @@ public class DiskStoreMonitor {
       recordStats(total, remaining, elapsed);
 
       String pct = Math.round(use) + "%";
-      if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR)) {
-        logger.trace(LogMarker.DISK_STORE_MONITOR,
+      if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR_VERBOSE)) {
+        logger.trace(LogMarker.DISK_STORE_MONITOR_VERBOSE,
             "Directory {} has {} bytes free out of {} ({} usage)", dir().getAbsolutePath(),
             remaining, total, pct);
       }
@@ -324,13 +317,13 @@ public class DiskStoreMonitor {
       Object[] args = new Object[] {dir.getAbsolutePath(), pct};
       switch (next) {
         case NORMAL:
-          logger.info(LogMarker.DISK_STORE_MONITOR,
+          logger.info(LogMarker.DISK_STORE_MONITOR_MARKER,
               LocalizedMessage.create(LocalizedStrings.DiskStoreMonitor_LOG_DISK_NORMAL, args));
           break;
 
         case WARN:
         case CRITICAL:
-          logger.warn(LogMarker.DISK_STORE_MONITOR,
+          logger.warn(LogMarker.DISK_STORE_MONITOR_MARKER,
               LocalizedMessage.create(LocalizedStrings.DiskStoreMonitor_LOG_DISK_WARNING, args));
           break;
       }
@@ -361,8 +354,8 @@ public class DiskStoreMonitor {
 
     protected void handleStateChange(DiskState next, String pct, String criticalMessage) {
       if (_testAction != null) {
-        logger.info(LogMarker.DISK_STORE_MONITOR, "Invoking test handler for state change to {}",
-            next);
+        logger.info(LogMarker.DISK_STORE_MONITOR_MARKER,
+            "Invoking test handler for state change to {}", next);
         _testAction.handleDiskStateChange(next);
       }
 
@@ -370,22 +363,15 @@ public class DiskStoreMonitor {
 
       switch (next) {
         case NORMAL:
-          logger.warn(LogMarker.DISK_STORE_MONITOR,
+          logger.warn(LogMarker.DISK_STORE_MONITOR_MARKER,
               LocalizedMessage.create(LocalizedStrings.DiskStoreMonitor_DISK_NORMAL, args));
-
-          // // try to restart cache after we return to normal operations
-          // if (AUTO_RECONNECT && this == criticalDisk) {
-          // performReconnect(msg);
-          // }
           break;
-
         case WARN:
-          logger.warn(LogMarker.DISK_STORE_MONITOR,
+          logger.warn(LogMarker.DISK_STORE_MONITOR_MARKER,
               LocalizedMessage.create(LocalizedStrings.DiskStoreMonitor_DISK_WARNING, args));
           break;
-
         case CRITICAL:
-          logger.error(LogMarker.DISK_STORE_MONITOR,
+          logger.error(LogMarker.DISK_STORE_MONITOR_MARKER,
               LocalizedMessage.create(LocalizedStrings.DiskStoreMonitor_DISK_CRITICAL, args));
           String msg = "Critical disk usage threshold exceeded for volume "
               + dir.getDir().getAbsolutePath() + ": " + criticalMessage;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXCommitMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXCommitMessage.java
index fd31de7..ed7207c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXCommitMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXCommitMessage.java
@@ -220,15 +220,16 @@ public class DistTXCommitMessage extends TXMessage {
     @Override
     public void process(final DistributionManager dm, ReplyProcessor21 processor) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "DistTXCommitPhaseTwoReplyMessage process invoking reply processor with processorId:{}",
             this.processorId);
       }
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "DistTXCommitPhaseTwoReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE,
+              "DistTXCommitPhaseTwoReplyMessage processor not found");
         }
         return;
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXPrecommitMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXPrecommitMessage.java
index ecaac7e..b7ba749 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXPrecommitMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXPrecommitMessage.java
@@ -226,15 +226,16 @@ public class DistTXPrecommitMessage extends TXMessage {
     @Override
     public void process(final DistributionManager dm, ReplyProcessor21 processor) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "DistTXPhaseOneCommitReplyMessage process invoking reply processor with processorId:{}",
             this.processorId);
       }
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "DistTXPhaseOneCommitReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE,
+              "DistTXPhaseOneCommitReplyMessage processor not found");
         }
         return;
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXRollbackMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXRollbackMessage.java
index 3c43b98..16a15cd 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXRollbackMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXRollbackMessage.java
@@ -181,15 +181,15 @@ public class DistTXRollbackMessage extends TXMessage {
     @Override
     public void process(final DistributionManager dm, ReplyProcessor21 processor) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "DistTXRollbackReplyMessage process invoking reply processor with processorId:{}",
             this.processorId);
       }
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "DistTXRollbackReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "DistTXRollbackReplyMessage processor not found");
         }
         return;
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java
index ad1fd75..f723f62 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java
@@ -256,15 +256,16 @@ public abstract class DistributedCacheOperation {
     if (this.containsRegionContentChange()) {
       viewVersion = region.getDistributionAdvisor().startOperation();
     }
-    if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-      logger.trace(LogMarker.STATE_FLUSH_OP, "dispatching operation in view version {}",
+    if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+      logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "dispatching operation in view version {}",
           viewVersion);
     }
     try {
       _distribute();
     } catch (InvalidVersionException e) {
-      if (logger.isTraceEnabled()) {
-        logger.trace(LogMarker.DM, "PutAll failed since versions were missing; retrying again", e);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
+            "PutAll failed since versions were missing; retrying again", e);
       }
 
       if (test_InvalidVersionAction != null) {
@@ -284,8 +285,8 @@ public abstract class DistributedCacheOperation {
     if (viewVersion != -1) {
       region.getDistributionAdvisor().endOperation(viewVersion);
       if (logger.isTraceEnabled()) {
-        logger.trace(LogMarker.STATE_FLUSH_OP, "done dispatching operation in view version {}",
-            viewVersion);
+        logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE,
+            "done dispatching operation in view version {}", viewVersion);
       }
     }
   }
@@ -1180,7 +1181,8 @@ public abstract class DistributedCacheOperation {
             if (!rgn.isEventTrackerInitialized()
                 && (rgn.getDataPolicy().withReplication() || rgn.getDataPolicy().withPreloaded())) {
               if (logger.isTraceEnabled()) {
-                logger.trace(LogMarker.DM_BRIDGE_SERVER, "Ignoring possible duplicate event");
+                logger.trace(LogMarker.DM_BRIDGE_SERVER_VERBOSE,
+                    "Ignoring possible duplicate event");
               }
               return;
             }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedPutAllOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedPutAllOperation.java
index f15e9be..81ddbd4 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedPutAllOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedPutAllOperation.java
@@ -679,9 +679,9 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation {
         }
       }
 
-      if (logger.isTraceEnabled(LogMarker.GII_VERSIONED_ENTRY)) {
-        logger.trace(LogMarker.GII_VERSIONED_ENTRY, "serializing {} with flags 0x{}", this,
-            Integer.toHexString(flags));
+      if (logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE)) {
+        logger.trace(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE, "serializing {} with flags 0x{}",
+            this, Integer.toHexString(flags));
       }
 
       out.writeByte(flags);
@@ -723,15 +723,16 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation {
       boolean hasTags = (flags & 0x04) == 0x04;
       boolean persistent = (flags & 0x20) == 0x20;
 
-      if (logger.isTraceEnabled(LogMarker.GII_VERSIONED_ENTRY)) {
-        logger.debug("deserializing a InitialImageVersionedObjectList with flags 0x{}",
+      if (logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE)) {
+        logger.trace(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE,
+            "deserializing a InitialImageVersionedObjectList with flags 0x{}",
             Integer.toHexString(flags));
       }
 
       if (hasTags) {
         int size = (int) InternalDataSerializer.readUnsignedVL(in);
-        if (logger.isTraceEnabled(LogMarker.GII_VERSIONED_ENTRY)) {
-          logger.trace(LogMarker.GII_VERSIONED_ENTRY, "reading {} version tags", size);
+        if (logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE)) {
+          logger.trace(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE, "reading {} version tags", size);
         }
         List<VersionSource> ids = new ArrayList<VersionSource>(size);
         for (int i = 0; i < size; i++) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegionFunctionStreamingMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegionFunctionStreamingMessage.java
index ee39e41d..6b84bf8 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegionFunctionStreamingMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRegionFunctionStreamingMessage.java
@@ -206,8 +206,8 @@ public class DistributedRegionFunctionStreamingMessage extends DistributionMessa
       }
       if (this.processorId == 0) {
         logger.debug("{} exception while processing message: {}", this, t.getMessage(), t);
-      } else if (logger.isTraceEnabled(LogMarker.DM) && (t instanceof RuntimeException)) {
-        logger.trace(LogMarker.DM, "Exception caught while processing message", t);
+      } else if (logger.isTraceEnabled(LogMarker.DM_VERBOSE) && (t instanceof RuntimeException)) {
+        logger.trace(LogMarker.DM_VERBOSE, "Exception caught while processing message", t);
       }
     } finally {
       cleanupTransaction(tx);
@@ -246,8 +246,8 @@ public class DistributedRegionFunctionStreamingMessage extends DistributionMessa
     }
 
 
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.trace(LogMarker.DM, "FunctionMessage operateOnRegion: {}", r.getFullPath());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "FunctionMessage operateOnRegion: {}", r.getFullPath());
     }
     try {
       r.executeOnRegion(this, this.functionObject, this.args, this.processorId, this.filter,
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
index f3c536d..abe5e33 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryEventImpl.java
@@ -651,9 +651,9 @@ public class EntryEventImpl implements InternalEntryEvent, InternalCacheEvent,
     Assert.assertTrue(this.eventID == null, "Double setting event id");
     EventID newID = new EventID(sys);
     if (this.eventID != null) {
-      if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
-        logger.trace(LogMarker.BRIDGE_SERVER, "Replacing event ID with {} in event {}", newID,
-            this);
+      if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE)) {
+        logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE, "Replacing event ID with {} in event {}",
+            newID, this);
       }
     }
     this.eventID = newID;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java
index cb9da65..46130ed 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EventID.java
@@ -504,7 +504,7 @@ public class EventID implements DataSerializableFixedID, Serializable, Externali
 
   @Override
   public String toString() {
-    if (logger.isTraceEnabled(LogMarker.DM_BRIDGE_SERVER)) {
+    if (logger.isTraceEnabled(LogMarker.EVENT_ID_TO_STRING_VERBOSE)) {
       return expensiveToString();
     } else {
       return cheapToString();
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java b/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java
index a0523cf..2edac38 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java
@@ -1296,8 +1296,8 @@ public class FilterProfile implements DataSerializableFixedID {
     Set clientsInv = Collections.emptySet();
     Set clients = Collections.emptySet();
 
-    if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
-      logger.trace(LogMarker.BRIDGE_SERVER, "finding interested clients for {}", event);
+    if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE)) {
+      logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE, "finding interested clients for {}", event);
     }
 
     FilterRoutingInfo frInfo = filterRoutingInfo;
@@ -1315,8 +1315,8 @@ public class FilterProfile implements DataSerializableFixedID {
         continue;
       }
 
-      if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
-        logger.trace(LogMarker.BRIDGE_SERVER, "Processing {}", pf);
+      if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE)) {
+        logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE, "Processing {}", pf);
       }
 
       if (!pf.hasInterest()) {
@@ -1620,17 +1620,9 @@ public class FilterProfile implements DataSerializableFixedID {
 
   @Override
   public String toString() {
-    final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.BRIDGE_SERVER);
-    return "FilterProfile(id=" + (this.isLocalProfile ? "local" : this.memberID)
-    // + "; allKeys: " + this.allKeyClients
-    // + "; keys: " + this.keysOfInterest
-    // + "; patterns: " + this.patternsOfInterest
-    // + "; filters: " + this.filtersOfInterest
-    // + "; allKeysInv: " + this.allKeyClientsInv
-    // + "; keysInv: " + this.keysOfInterestInv
-    // + "; patternsInv: " + this.patternsOfInterestInv
-    // + "; filtersInv: " + this.filtersOfInterestInv
-        + ";  numCQs: " + ((this.cqCount == null) ? 0 : this.cqCount.get())
+    final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE);
+    return "FilterProfile(id=" + (this.isLocalProfile ? "local" : this.memberID) + ";  numCQs: "
+        + ((this.cqCount == null) ? 0 : this.cqCount.get())
         + (isDebugEnabled ? (";  " + getClientMappingString()) : "")
         + (isDebugEnabled ? (";  " + getCqMappingString()) : "") + ")";
   }
@@ -2039,8 +2031,8 @@ public class FilterProfile implements DataSerializableFixedID {
             this.wireIDs.put(result, realId);
           }
         }
-        if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
-          logger.trace(LogMarker.BRIDGE_SERVER, "Profile for {} mapped {} to {}",
+        if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE)) {
+          logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE, "Profile for {} mapped {} to {}",
               region.getFullPath(), realId, result);
         }
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java
index e0b9a4f..306254c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java
@@ -514,8 +514,9 @@ public class InitialImageOperation {
               if (re == null) {
                 continue;
               }
-              if (logger.isTraceEnabled(LogMarker.GII)) {
-                logger.trace(LogMarker.GII, "Processing unfinished operation:entry={}", re);
+              if (logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERBOSE)) {
+                logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
+                    "Processing unfinished operation:entry={}", re);
               }
               DiskEntry de = (DiskEntry) re;
               synchronized (de) {
@@ -1083,8 +1084,8 @@ public class InitialImageOperation {
           remoteRVV.recordVersion(id, stamp.getRegionVersion());
 
           if (count < 10) {
-            if (logger.isTraceEnabled(LogMarker.GII)) {
-              logger.trace(LogMarker.GII,
+            if (logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERBOSE)) {
+              logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
                   "Region:{} found unfinished operation key={},member={},region version={}",
                   region.getFullPath(), mapEntry.getKey(), stamp.getMemberID(),
                   stamp.getRegionVersion());
@@ -1094,8 +1095,8 @@ public class InitialImageOperation {
         }
       }
       if (!keys.isEmpty()) {
-        if (logger.isTraceEnabled(LogMarker.GII)) {
-          logger.trace(LogMarker.GII, "Region:{} found {} unfinished operations",
+        if (logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERBOSE)) {
+          logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE, "Region:{} found {} unfinished operations",
               region.getFullPath(), keys.size());
         }
       }
@@ -1111,9 +1112,9 @@ public class InitialImageOperation {
     // concurrent clear() doesn't prevent the new region's RVV from being
     // initialized and that any vector entries that are no longer represented
     // by stamps in the region are not lost
-    if (logger.isTraceEnabled(LogMarker.GII)) {
-      logger.trace(LogMarker.GII, "Applying received version vector {} to {}", rvv.fullToString(),
-          region.getName());
+    if (logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERBOSE)) {
+      logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE, "Applying received version vector {} to {}",
+          rvv.fullToString(), region.getName());
     }
     // TODO - RVV - Our current RVV might reflect some operations
     // that are concurrent updates. We want to keep those updates. However
@@ -1124,8 +1125,8 @@ public class InitialImageOperation {
       region.getDiskRegion().writeRVV(region, false);
       region.getDiskRegion().writeRVVGC(region);
     }
-    if (logger.isTraceEnabled(LogMarker.GII)) {
-      logger.trace(LogMarker.GII, "version vector is now {}",
+    if (logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERBOSE)) {
+      logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE, "version vector is now {}",
           region.getVersionVector().fullToString());
     }
   }
@@ -1606,7 +1607,7 @@ public class InitialImageOperation {
 
     @Override
     protected void process(final ClusterDistributionManager dm) {
-      final boolean isGiiDebugEnabled = logger.isTraceEnabled(LogMarker.GII);
+      final boolean isGiiDebugEnabled = logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERBOSE);
 
       Throwable thr = null;
       final boolean lclAbortTest = abortTest;
@@ -1631,7 +1632,7 @@ public class InitialImageOperation {
         if (this.versionVector != null) {
           if (this.versionVector.isForSynchronization() && !rgn.getConcurrencyChecksEnabled()) {
             if (isGiiDebugEnabled) {
-              logger.trace(LogMarker.GII,
+              logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
                   "ignoring synchronization request as this region has no version vector");
             }
             replyNoData(dm, true, Collections.EMPTY_MAP);
@@ -1651,7 +1652,7 @@ public class InitialImageOperation {
             // vector doesn't have anything that the other region needs
             if (this.unfinishedKeys == null || this.unfinishedKeys.isEmpty()) {
               if (isGiiDebugEnabled) {
-                logger.trace(LogMarker.GII,
+                logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
                     "version vector reports that I have nothing that the requester hasn't already seen");
               }
               replyNoData(dm, true, rgn.getVersionVector().getMemberToGCVersion());
@@ -1660,7 +1661,7 @@ public class InitialImageOperation {
             }
           } else {
             if (isGiiDebugEnabled) {
-              logger.trace(LogMarker.GII,
+              logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
                   "version vector reports that I have updates the requester hasn't seen, remote rvv is {}",
                   this.versionVector);
             }
@@ -1672,8 +1673,8 @@ public class InitialImageOperation {
 
         // chunkEntries returns false if didn't finish
         if (isGiiDebugEnabled) {
-          logger.trace(LogMarker.GII, "RequestImageMessage: Starting chunkEntries for {}",
-              rgn.getFullPath());
+          logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
+              "RequestImageMessage: Starting chunkEntries for {}", rgn.getFullPath());
         }
 
         final InitialImageFlowControl flowControl =
@@ -1700,14 +1701,14 @@ public class InitialImageOperation {
                 RegionVersionHolder holderOfRequest =
                     this.versionVector.getHolderForMember(this.lostMemberVersionID);
                 if (holderToSync.isNewerThanOrCanFillExceptionsFor(holderOfRequest)) {
-                  logger.trace(LogMarker.GII,
+                  logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
                       "synchronizeWith detected mismatch region version holder for lost member {}. Old is {}, new is {}",
                       lostMemberVersionID, holderOfRequest, holderToSync);
                 }
               }
             } catch (TimeoutException e) {
               if (isGiiDebugEnabled) {
-                logger.trace(LogMarker.GII,
+                logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
                     "timed out waiting for the departure of {} before processing delta GII request",
                     this.lostMemberID);
               }
@@ -1733,7 +1734,7 @@ public class InitialImageOperation {
             synchronized (rgn.getCache().getTombstoneService().getBlockGCLock()) {
               if (goWithFullGII(rgn, this.versionVector)) {
                 if (isGiiDebugEnabled) {
-                  logger.trace(LogMarker.GII, "have to do fullGII");
+                  logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE, "have to do fullGII");
                 }
                 this.versionVector = null; // full GII
               } else {
@@ -1741,7 +1742,8 @@ public class InitialImageOperation {
                 int count = rgn.getCache().getTombstoneService().incrementGCBlockCount();
                 markedOngoingGII = true;
                 if (isGiiDebugEnabled) {
-                  logger.trace(LogMarker.GII, "There're {} Delta GII on going", count);
+                  logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE, "There're {} Delta GII on going",
+                      count);
                 }
               }
             }
@@ -1792,7 +1794,7 @@ public class InitialImageOperation {
 
 
           if (isGiiDebugEnabled) {
-            logger.trace(LogMarker.GII,
+            logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
                 "RequestImageMessage: ended chunkEntries for {}; finished = {}", rgn.getFullPath(),
                 finished);
           }
@@ -1813,7 +1815,7 @@ public class InitialImageOperation {
             if (count == 0) {
               markedOngoingGII = false;
               if (isGiiDebugEnabled) {
-                logger.trace(LogMarker.GII, "Delta GII count is reset");
+                logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE, "Delta GII count is reset");
               }
             }
           }
@@ -1832,15 +1834,18 @@ public class InitialImageOperation {
       } catch (RegionDestroyedException e) {
         // thr = e; Don't marshal an exception here; just return null
         if (isGiiDebugEnabled) {
-          logger.trace(LogMarker.GII, "{}; Region destroyed: aborting image provision", this);
+          logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
+              "{}; Region destroyed: aborting image provision", this);
         }
       } catch (IllegalStateException e) {
         // thr = e; Don't marshal an exception here; just return null
-        logger.trace(LogMarker.GII, "{}; disk region deleted? aborting image provision", this, e);
+        logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
+            "{}; disk region deleted? aborting image provision", this, e);
       } catch (CancelException e) {
         // thr = e; Don't marshal an exception here; just return null
         if (isGiiDebugEnabled) {
-          logger.trace(LogMarker.GII, "{}; Cache Closed: aborting image provision", this);
+          logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
+              "{}; Cache Closed: aborting image provision", this);
         }
       } catch (VirtualMachineError err) {
         sendFailureMessage = false; // Don't try to respond!
@@ -1971,8 +1976,9 @@ public class InitialImageOperation {
                     fillRes = mapEntry.fillInValue(rgn, entry, in, rgn.getDistributionManager(),
                         sender.getVersionObject());
                     if (versionVector != null) {
-                      if (logger.isTraceEnabled(LogMarker.GII)) {
-                        logger.trace(LogMarker.GII, "chunkEntries:entry={},stamp={}", entry, stamp);
+                      if (logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERBOSE)) {
+                        logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
+                            "chunkEntries:entry={},stamp={}", entry, stamp);
                       }
                     }
                   }
@@ -2342,7 +2348,7 @@ public class InitialImageOperation {
 
     @Override
     public void process(DistributionMessage msg) {
-      final boolean isGiiDebugEnabled = logger.isTraceEnabled(LogMarker.GII);
+      final boolean isGiiDebugEnabled = logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERBOSE);
 
       ReplyMessage reply = (ReplyMessage) msg;
       try {
@@ -2350,7 +2356,7 @@ public class InitialImageOperation {
         if (reply == null) {
           // if remote member is shutting down, the reply will be null
           if (isGiiDebugEnabled) {
-            logger.trace(LogMarker.GII,
+            logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
                 "Did not received RVVReply from {}. Remote member might be down.",
                 Arrays.toString(getMembers()));
           }
@@ -2358,8 +2364,8 @@ public class InitialImageOperation {
         }
         if (reply.getException() != null) {
           if (isGiiDebugEnabled) {
-            logger.trace(LogMarker.GII, "Failed to get RVV from {} due to {}", reply.getSender(),
-                reply.getException());
+            logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE, "Failed to get RVV from {} due to {}",
+                reply.getSender(), reply.getException());
           }
           return;
         }
@@ -2370,7 +2376,7 @@ public class InitialImageOperation {
       } finally {
         if (received_rvv == null) {
           if (isGiiDebugEnabled) {
-            logger.trace(LogMarker.GII,
+            logger.trace(LogMarker.INITIAL_IMAGE_VERBOSE,
                 "{} did not send back rvv. Maybe it's non-persistent proxy region or remote region {} not found or not initialized. Nothing to do.",
                 reply.getSender(), region.getFullPath());
           }
@@ -3234,9 +3240,9 @@ public class InitialImageOperation {
         flags |= 0x08;
       }
 
-      if (logger.isTraceEnabled(LogMarker.GII_VERSIONED_ENTRY)) {
-        logger.trace(LogMarker.GII_VERSIONED_ENTRY, "serializing {} with flags 0x{}", this,
-            Integer.toHexString(flags));
+      if (logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE)) {
+        logger.trace(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE, "serializing {} with flags 0x{}",
+            this, Integer.toHexString(flags));
       }
 
       out.writeByte(flags);
@@ -3281,7 +3287,7 @@ public class InitialImageOperation {
     @Override
     public void fromData(DataInput in) throws IOException, ClassNotFoundException {
       final boolean isGiiVersionEntryDebugEnabled =
-          logger.isTraceEnabled(LogMarker.GII_VERSIONED_ENTRY);
+          logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE);
 
       int flags = in.readByte();
       boolean hasEntries = (flags & 0x02) == 0x02;
@@ -3290,14 +3296,14 @@ public class InitialImageOperation {
       boolean persistent = (flags & 0x20) == 0x20;
 
       if (isGiiVersionEntryDebugEnabled) {
-        logger.trace(LogMarker.GII_VERSIONED_ENTRY,
+        logger.trace(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE,
             "deserializing a InitialImageVersionedObjectList with flags 0x{}",
             Integer.toHexString(flags));
       }
       if (hasEntries) {
         int size = (int) InternalDataSerializer.readUnsignedVL(in);
         if (isGiiVersionEntryDebugEnabled) {
-          logger.trace(LogMarker.GII_VERSIONED_ENTRY, "reading {} keys", size);
+          logger.trace(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE, "reading {} keys", size);
         }
         for (int i = 0; i < size; i++) {
           super.add((Entry) DataSerializer.readObject(in));
@@ -3307,7 +3313,7 @@ public class InitialImageOperation {
       if (hasTags) {
         int size = (int) InternalDataSerializer.readUnsignedVL(in);
         if (isGiiVersionEntryDebugEnabled) {
-          logger.trace(LogMarker.GII_VERSIONED_ENTRY, "reading {} version tags", size);
+          logger.trace(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE, "reading {} version tags", size);
         }
         this.versionTags = new ArrayList<VersionTag>(size);
         List<VersionSource> ids = new ArrayList<VersionSource>(size);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LatestLastAccessTimeReplyProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LatestLastAccessTimeReplyProcessor.java
index 3d73c60..6c82007 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LatestLastAccessTimeReplyProcessor.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LatestLastAccessTimeReplyProcessor.java
@@ -48,8 +48,8 @@ public class LatestLastAccessTimeReplyProcessor extends ReplyProcessor21 {
       ReplyMessage reply = (ReplyMessage) msg;
       long replyTime = (long) reply.getReturnValue();
       updateLatestLastAccessTime(replyTime);
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "LatestLastAccessTimeReplyMessage return value is {}",
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "LatestLastAccessTimeReplyMessage return value is {}",
             replyTime);
       }
     } finally {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
index ebc91f8..34728cd 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java
@@ -2560,8 +2560,8 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     }
 
     getEventTracker().stop();
-    if (logger.isTraceEnabled(LogMarker.RVV) && getVersionVector() != null) {
-      logger.trace(LogMarker.RVV, "version vector for {} is {}", getName(),
+    if (logger.isTraceEnabled(LogMarker.RVV_VERBOSE) && getVersionVector() != null) {
+      logger.trace(LogMarker.RVV_VERBOSE, "version vector for {} is {}", getName(),
           getVersionVector().fullToString());
     }
     cancelTTLExpiryTask();
@@ -3240,17 +3240,12 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
       incTombstoneCount(1);
     }
 
-    if (logger.isTraceEnabled(LogMarker.TOMBSTONE_COUNT)) {
-      logger.trace(LogMarker.TOMBSTONE_COUNT,
+    if (logger.isTraceEnabled(LogMarker.TOMBSTONE_COUNT_VERBOSE)) {
+      logger.trace(LogMarker.TOMBSTONE_COUNT_VERBOSE,
           "{} tombstone for {} version={} count is {} entryMap size is {}",
           reschedule ? "rescheduling" : "scheduling", entry.getKey(),
           entry.getVersionStamp().asVersionTag(), this.tombstoneCount.get(),
           this.entries.size()/* , new Exception("stack trace") */);
-      // this can be useful for debugging tombstone count problems if there aren't a lot of
-      // concurrent threads
-      // if (TombstoneService.DEBUG_TOMBSTONE_COUNT && this.entries instanceof AbstractRegionMap) {
-      // ((AbstractRegionMap)this.entries).verifyTombstoneCount(tombstoneCount);
-      // }
     }
     getGemFireCache().getTombstoneService().scheduleTombstone(this, entry, destroyedVersion);
   }
@@ -3267,12 +3262,12 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
 
   private void unscheduleTombstone(RegionEntry entry, boolean validate) {
     incTombstoneCount(-1);
-    if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
-      logger.trace(LogMarker.TOMBSTONE,
+    if (logger.isTraceEnabled(LogMarker.TOMBSTONE_VERBOSE)) {
+      logger.trace(LogMarker.TOMBSTONE_VERBOSE,
           "unscheduling tombstone for {} count is {} entryMap size is {}", entry.getKey(),
           this.tombstoneCount.get(), this.entries.size()/* , new Exception("stack trace") */);
     }
-    if (logger.isTraceEnabled(LogMarker.TOMBSTONE_COUNT) && validate) {
+    if (logger.isTraceEnabled(LogMarker.TOMBSTONE_COUNT_VERBOSE) && validate) {
       if (this.entries instanceof AbstractRegionMap) {
         ((AbstractRegionMap) this.entries).verifyTombstoneCount(this.tombstoneCount);
       }
@@ -4945,9 +4940,9 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     }
 
     if (hasSeenEvent(event)) {
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "LR.basicInvalidate: this cache has already seen this event {}",
-            event);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
+            "LR.basicInvalidate: this cache has already seen this event {}", event);
       }
       if (this.getConcurrencyChecksEnabled() && event.getVersionTag() != null
           && !event.getVersionTag().isRecorded()) {
@@ -5568,9 +5563,9 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
 
   void basicUpdateEntryVersion(EntryEventImpl event) throws EntryNotFoundException {
     if (hasSeenEvent(event)) {
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "LR.basicDestroy: this cache has already seen this event {}",
-            event);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
+            "LR.basicDestroy: this cache has already seen this event {}", event);
       }
       if (this.getConcurrencyChecksEnabled() && event.getVersionTag() != null
           && !event.getVersionTag().isRecorded()) {
@@ -6416,9 +6411,9 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
 
     if (hasSeenEvent(event)) {
       assert getJTAEnlistedTX() == null;
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "LR.basicDestroy: this cache has already seen this event {}",
-            event);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
+            "LR.basicDestroy: this cache has already seen this event {}", event);
       }
       if (this.getConcurrencyChecksEnabled() && event.getVersionTag() != null
           && !event.getVersionTag().isRecorded()) {
@@ -9004,7 +8999,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
    */
   void clearRegionLocally(RegionEventImpl regionEvent, boolean cacheWrite,
       RegionVersionVector vector) {
-    final boolean isRvvDebugEnabled = logger.isTraceEnabled(LogMarker.RVV);
+    final boolean isRvvDebugEnabled = logger.isTraceEnabled(LogMarker.RVV_VERBOSE);
 
     RegionVersionVector rvv = vector;
     if (this.serverRegionProxy != null) {
@@ -9013,13 +9008,14 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     }
     if (rvv != null && this.getDataPolicy().withStorage()) {
       if (isRvvDebugEnabled) {
-        logger.trace(LogMarker.RVV, "waiting for my version vector to dominate{}mine={}{} other={}",
-            getLineSeparator(), getLineSeparator(), this.versionVector.fullToString(), rvv);
+        logger.trace(LogMarker.RVV_VERBOSE,
+            "waiting for my version vector to dominate{}mine={}{} other={}", getLineSeparator(),
+            getLineSeparator(), this.versionVector.fullToString(), rvv);
       }
       boolean result = this.versionVector.waitToDominate(rvv, this);
       if (!result) {
         if (isRvvDebugEnabled) {
-          logger.trace(LogMarker.RVV, "incrementing clearTimeouts for {} rvv={}", getName(),
+          logger.trace(LogMarker.RVV_VERBOSE, "incrementing clearTimeouts for {} rvv={}", getName(),
               this.versionVector.fullToString());
         }
         getCachePerfStats().incClearTimeouts();
@@ -9047,7 +9043,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
     RegionVersionVector myVector = getVersionVector();
     if (myVector != null) {
       if (isRvvDebugEnabled) {
-        logger.trace(LogMarker.RVV, "processing version information for {}", regionEvent);
+        logger.trace(LogMarker.RVV_VERBOSE, "processing version information for {}", regionEvent);
       }
       if (!regionEvent.isOriginRemote() && !regionEvent.getOperation().isLocal()) {
         // generate a new version for the operation
@@ -9055,14 +9051,14 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
         tag.setVersionTimeStamp(cacheTimeMillis());
         tag.setRegionVersion(myVector.getNextVersionWhileLocked());
         if (isRvvDebugEnabled) {
-          logger.trace(LogMarker.RVV, "generated version tag for clear: {}", tag);
+          logger.trace(LogMarker.RVV_VERBOSE, "generated version tag for clear: {}", tag);
         }
         regionEvent.setVersionTag(tag);
       } else {
         VersionTag tag = regionEvent.getVersionTag();
         if (tag != null) {
           if (isRvvDebugEnabled) {
-            logger.trace(LogMarker.RVV, "recording version tag for clear: {}", tag);
+            logger.trace(LogMarker.RVV_VERBOSE, "recording version tag for clear: {}", tag);
           }
           // clear() events always have the ID in the tag
           myVector.recordVersion(tag.getMemberID(), tag);
@@ -9097,7 +9093,7 @@ public class LocalRegion extends AbstractRegion implements LoaderHelperFactory,
       if (this.getDataPolicy().withPersistence()) {
         // null means not to change dr.rvvTrust
         if (isRvvDebugEnabled) {
-          logger.trace(LogMarker.RVV, "Clear: Saved current rvv: {}",
+          logger.trace(LogMarker.RVV_VERBOSE, "Clear: Saved current rvv: {}",
               this.diskRegion.getRegionVersionVector());
         }
         this.diskRegion.writeRVV(this, null);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java b/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java
index 00fb08e..93d856d 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/Oplog.java
@@ -81,7 +81,6 @@ import org.apache.geode.internal.cache.DiskInitFile.DiskRegionFlag;
 import org.apache.geode.internal.cache.DiskStoreImpl.OplogCompactor;
 import org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet;
 import org.apache.geode.internal.cache.DistributedRegion.DiskPosition;
-import org.apache.geode.internal.cache.backup.BackupService;
 import org.apache.geode.internal.cache.entries.DiskEntry;
 import org.apache.geode.internal.cache.entries.DiskEntry.Helper.Flushable;
 import org.apache.geode.internal.cache.entries.DiskEntry.Helper.ValueWrapper;
@@ -112,8 +111,6 @@ import org.apache.geode.internal.offheap.annotations.Retained;
 import org.apache.geode.internal.sequencelog.EntryLogger;
 import org.apache.geode.internal.shared.NativeCalls;
 import org.apache.geode.internal.util.BlobHelper;
-import org.apache.geode.internal.util.IOUtils;
-import org.apache.geode.internal.util.TransformUtils;
 import org.apache.geode.pdx.internal.PdxWriterImpl;
 
 /**
@@ -721,8 +718,8 @@ public class Oplog implements CompactableOplog, Flushable {
         // write
         // the krf. If we don't, we will recover the wrong (old) value.
         getOrCreateDRI(dr).replaceLive(old, repl);
-        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-          logger.trace(LogMarker.PERSIST_RECOVERY,
+        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+          logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
               "replacing incompatible entry key = {} old = {} new = {} oldDiskId = {} new diskId = {} tag = {} in child oplog #{}",
               old.getKey(), System.identityHashCode(old), System.identityHashCode(repl),
               old.getDiskId(), repl.getDiskId(), old.getVersionStamp(), this.getOplogId());
@@ -1432,8 +1429,8 @@ public class Oplog implements CompactableOplog, Flushable {
    */
   public long calcModEntryId(long delta) {
     long oplogKeyId = this.recoverModEntryId + delta;
-    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-      logger.trace(LogMarker.PERSIST_RECOVERY,
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
           "calcModEntryId delta={} recoverModEntryId={}  oplogKeyId={}", delta,
           this.recoverModEntryId, oplogKeyId);
     }
@@ -1449,8 +1446,8 @@ public class Oplog implements CompactableOplog, Flushable {
    */
   public long calcDelEntryId(long delta) {
     long oplogKeyId = this.recoverDelEntryId + delta;
-    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-      logger.trace(LogMarker.PERSIST_RECOVERY,
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
           "calcDelEntryId delta={} recoverModEntryId={}  oplogKeyId={}", delta,
           this.recoverModEntryId, oplogKeyId);
     }
@@ -1504,8 +1501,8 @@ public class Oplog implements CompactableOplog, Flushable {
             }
             readLastRecord = false;
             byte opCode = dis.readByte();
-            if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-              logger.trace(LogMarker.PERSIST_RECOVERY, "drf byte={} location={}", opCode,
+            if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+              logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "drf byte={} location={}", opCode,
                   Long.toHexString(dis.getCount()));
             }
             switch (opCode) {
@@ -1745,8 +1742,8 @@ public class Oplog implements CompactableOplog, Flushable {
             oplogKeyIdHWM = oplogKeyId;
           }
           if (okToSkipModifyRecord(deletedIds, drId, drs, oplogKeyId, true, tag).skip()) {
-            if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-              logger.trace(LogMarker.PERSIST_RECOVERY,
+            if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+              logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                   "readNewEntry skipping oplogKeyId=<{}> drId={} userBits={} oplogOffset={} valueLen={}",
                   oplogKeyId, drId, userBits, oplogOffset, valueLength);
             }
@@ -1772,8 +1769,8 @@ public class Oplog implements CompactableOplog, Flushable {
             }
             DiskEntry de = drs.getDiskEntry(key);
             if (de == null) {
-              if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-                logger.trace(LogMarker.PERSIST_RECOVERY,
+              if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+                logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                     "readNewEntry oplogKeyId=<{}> drId={} userBits={} oplogOffset={} valueLen={}",
                     oplogKeyId, drId, userBits, oplogOffset, valueLength);
               }
@@ -1789,8 +1786,8 @@ public class Oplog implements CompactableOplog, Flushable {
             } else {
               DiskId curdid = de.getDiskId();
               // assert curdid.getOplogId() != getOplogId();
-              if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-                logger.trace(LogMarker.PERSIST_RECOVERY,
+              if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+                logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                     "ignore readNewEntry because getOplogId()={} != curdid.getOplogId()={} for drId={} key={}",
                     getOplogId(), curdid.getOplogId(), drId, key);
               }
@@ -1840,8 +1837,8 @@ public class Oplog implements CompactableOplog, Flushable {
   private void validateOpcode(DataInputStream dis, byte expect) throws IOException {
     byte opCode = dis.readByte();
     if (opCode != expect) {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "expected opcode id absent: {}", expect);
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "expected opcode id absent: {}", expect);
       }
       throw new IllegalStateException();
     }
@@ -1870,15 +1867,14 @@ public class Oplog implements CompactableOplog, Flushable {
             this.crf.f.length());
         boolean endOfLog = false;
         while (!endOfLog) {
-          // long startPosition = byteCount;
           if (dis.atEndOfFile()) {
             endOfLog = true;
             break;
           }
           readLastRecord = false;
           byte opCode = dis.readByte();
-          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-            logger.trace(LogMarker.PERSIST_RECOVERY, "Oplog opCode={}", opCode);
+          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+            logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "Oplog opCode={}", opCode);
           }
           switch (opCode) {
             case OPLOG_EOF_ID:
@@ -1891,8 +1887,8 @@ public class Oplog implements CompactableOplog, Flushable {
               break;
             case OPLOG_NEW_ENTRY_BASE_ID: {
               long newEntryBase = dis.readLong();
-              if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-                logger.trace(LogMarker.PERSIST_RECOVERY, "newEntryBase={}", newEntryBase);
+              if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+                logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "newEntryBase={}", newEntryBase);
               }
               readEndOfRecord(dis);
               setRecoverNewEntryId(newEntryBase);
@@ -2015,8 +2011,8 @@ public class Oplog implements CompactableOplog, Flushable {
     long leastSigBits = dis.readLong();
     long mostSigBits = dis.readLong();
     DiskStoreID readDSID = new DiskStoreID(mostSigBits, leastSigBits);
-    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-      logger.trace(LogMarker.PERSIST_RECOVERY, "diskStoreId={}", readDSID);
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "diskStoreId={}", readDSID);
     }
     readEndOfRecord(dis);
     DiskStoreID dsid = getParent().getDiskStoreID();
@@ -2036,20 +2032,20 @@ public class Oplog implements CompactableOplog, Flushable {
     dis.readFully(seq);
     for (int i = 0; i < OPLOG_TYPE.getLen(); i++) {
       if (seq[i] != type.getBytes()[i]) {
-        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-          logger.trace(LogMarker.PERSIST_RECOVERY,
+        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+          logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
               "oplog magic code mismatched at byte:{}, value:{}", (i + 1), seq[i]);
         }
         throw new DiskAccessException("Invalid oplog (" + type.name() + ") file provided: " + f,
             getParent());
       }
     }
-    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
       StringBuffer sb = new StringBuffer();
       for (int i = 0; i < OPLOG_TYPE.getLen(); i++) {
         sb.append(" ").append(seq[i]);
       }
-      logger.trace(LogMarker.PERSIST_RECOVERY, "oplog magic code: {}", sb);
+      logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "oplog magic code: {}", sb);
     }
     readEndOfRecord(dis);
   }
@@ -2103,7 +2099,9 @@ public class Oplog implements CompactableOplog, Flushable {
           LocalizedStrings.Oplog_UNEXPECTED_PRODUCT_VERSION_0.toLocalizedString(ver), e,
           getParent());
     }
-    logger.trace(LogMarker.PERSIST_RECOVERY, "version={}", recoveredGFVersion);
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "version={}", recoveredGFVersion);
+    }
     readEndOfRecord(dis);
     return recoveredGFVersion;
   }
@@ -2112,19 +2110,20 @@ public class Oplog implements CompactableOplog, Flushable {
     long recoveredCount = InternalDataSerializer.readUnsignedVL(dis);
     this.totalCount.set(recoveredCount);
 
-    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-      logger.trace(LogMarker.PERSIST_RECOVERY, "totalCount={}", totalCount);
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "totalCount={}", totalCount);
     }
     readEndOfRecord(dis);
   }
 
   private void readRVVRecord(DataInput dis, File f, boolean gcRVV, boolean latestOplog)
       throws IOException {
-    final boolean isPersistRecoveryDebugEnabled = logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY);
+    final boolean isPersistRecoveryDebugEnabled =
+        logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE);
 
     long numRegions = InternalDataSerializer.readUnsignedVL(dis);
     if (isPersistRecoveryDebugEnabled) {
-      logger.trace(LogMarker.PERSIST_RECOVERY, "readRVV entry numRegions={}", numRegions);
+      logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "readRVV entry numRegions={}", numRegions);
     }
     for (int region = 0; region < numRegions; region++) {
       long drId = InternalDataSerializer.readUnsignedVL(dis);
@@ -2132,7 +2131,7 @@ public class Oplog implements CompactableOplog, Flushable {
       // recovering
       DiskRecoveryStore drs = getOplogSet().getCurrentlyRecovering(drId);
       if (isPersistRecoveryDebugEnabled) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "readRVV drId={} region={}", drId, drs);
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "readRVV drId={} region={}", drId, drs);
       }
 
       if (gcRVV) {
@@ -2149,12 +2148,12 @@ public class Oplog implements CompactableOplog, Flushable {
             Object member = getParent().getDiskInitFile().getCanonicalObject((int) memberId);
             drs.recordRecoveredGCVersion((VersionSource) member, gcVersion);
             if (isPersistRecoveryDebugEnabled) {
-              logger.trace(LogMarker.PERSIST_RECOVERY,
+              logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                   "adding gcRVV entry drId={}, member={}, version={}", drId, memberId, gcVersion);
             }
           } else {
             if (isPersistRecoveryDebugEnabled) {
-              logger.trace(LogMarker.PERSIST_RECOVERY,
+              logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                   "skipping gcRVV entry drId={}, member={}, version={}", drId, memberId, gcVersion);
             }
           }
@@ -2166,8 +2165,8 @@ public class Oplog implements CompactableOplog, Flushable {
             // only set rvvtrust based on the newest oplog recovered
             drs.setRVVTrusted(rvvTrusted);
             if (isPersistRecoveryDebugEnabled) {
-              logger.trace(LogMarker.PERSIST_RECOVERY, "marking RVV trusted drId={},tvvTrusted={}",
-                  drId, rvvTrusted);
+              logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
+                  "marking RVV trusted drId={},tvvTrusted={}", drId, rvvTrusted);
             }
           }
         }
@@ -2182,13 +2181,13 @@ public class Oplog implements CompactableOplog, Flushable {
             Object member = getParent().getDiskInitFile().getCanonicalObject((int) memberId);
             drs.recordRecoveredVersonHolder((VersionSource) member, versionHolder, latestOplog);
             if (isPersistRecoveryDebugEnabled) {
-              logger.trace(LogMarker.PERSIST_RECOVERY,
+              logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                   "adding RVV entry drId={},member={},versionHolder={},latestOplog={},oplogId={}",
                   drId, memberId, versionHolder, latestOplog, getOplogId());
             }
           } else {
             if (isPersistRecoveryDebugEnabled) {
-              logger.trace(LogMarker.PERSIST_RECOVERY,
+              logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                   "skipping RVV entry drId={}, member={}, versionHolder={}", drId, memberId,
                   versionHolder);
             }
@@ -2434,7 +2433,8 @@ public class Oplog implements CompactableOplog, Flushable {
   private void readNewEntry(CountingDataInputStream dis, byte opcode, OplogEntryIdSet deletedIds,
       boolean recoverValue, final LocalRegion currentRegion, Version version, ByteArrayDataInput in,
       HeapDataOutputStream hdos) throws IOException {
-    final boolean isPersistRecoveryDebugEnabled = logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY);
+    final boolean isPersistRecoveryDebugEnabled =
+        logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE);
 
     long oplogOffset = -1;
     byte userBits = dis.readByte();
@@ -2534,7 +2534,7 @@ public class Oplog implements CompactableOplog, Flushable {
         }
 
         if (isPersistRecoveryDebugEnabled) {
-          logger.trace(LogMarker.PERSIST_RECOVERY,
+          logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
               "readNewEntry SKIPPING oplogKeyId=<{}> drId={} userBits={} keyLen={} valueLen={} tag={}",
               oplogKeyId, drId, userBits, len, valueLength, tag);
         }
@@ -2565,8 +2565,8 @@ public class Oplog implements CompactableOplog, Flushable {
             getOplogSet().getChild().copyForwardForOfflineCompact(oplogKeyId, p2cr.getKeyBytes(),
                 objValue, userBits, drId, tag);
             if (isPersistRecoveryDebugEnabled) {
-              logger.trace(LogMarker.PERSIST_RECOVERY, "readNewEntry copyForward oplogKeyId=<{}>",
-                  oplogKeyId);
+              logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
+                  "readNewEntry copyForward oplogKeyId=<{}>", oplogKeyId);
             }
             // add it to the deletedIds set so we will ignore it in earlier
             // oplogs
@@ -2584,7 +2584,7 @@ public class Oplog implements CompactableOplog, Flushable {
           DiskEntry de = drs.getDiskEntry(key);
           if (de == null) {
             if (isPersistRecoveryDebugEnabled) {
-              logger.trace(LogMarker.PERSIST_RECOVERY,
+              logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                   "readNewEntry oplogKeyId=<{}> drId={} key={} userBits={} oplogOffset={} valueLen={} tag={}",
                   oplogKeyId, drId, key, userBits, oplogOffset, valueLength, tag);
             }
@@ -2601,7 +2601,7 @@ public class Oplog implements CompactableOplog, Flushable {
             DiskId curdid = de.getDiskId();
             assert curdid.getOplogId() != getOplogId();
             if (isPersistRecoveryDebugEnabled) {
-              logger.trace(LogMarker.PERSIST_RECOVERY,
+              logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                   "ignore readNewEntry because getOplogId()={} != curdid.getOplogId()={} for drId={} key={}",
                   getOplogId(), curdid.getOplogId(), drId, key);
             }
@@ -2620,21 +2620,14 @@ public class Oplog implements CompactableOplog, Flushable {
   private void readModifyEntry(CountingDataInputStream dis, byte opcode, OplogEntryIdSet deletedIds,
       boolean recoverValue, LocalRegion currentRegion, Version version, ByteArrayDataInput in,
       HeapDataOutputStream hdos) throws IOException {
-    final boolean isPersistRecoveryDebugEnabled = logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY);
+    final boolean isPersistRecoveryDebugEnabled =
+        logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE);
 
     long oplogOffset = -1;
     byte userBits = dis.readByte();
 
     int idByteCount = (opcode - OPLOG_MOD_ENTRY_1ID) + 1;
-    // long debugRecoverModEntryId = this.recoverModEntryId;
     long oplogKeyId = getModEntryId(dis, idByteCount);
-    // long debugOplogKeyId = dis.readLong();
-    // //assert oplogKeyId == debugOplogKeyId
-    // // : "expected=" + debugOplogKeyId + " actual=" + oplogKeyId
-    // assert debugRecoverModEntryId == debugOplogKeyId
-    // : "expected=" + debugOplogKeyId + " actual=" + debugRecoverModEntryId
-    // + " idByteCount=" + idByteCount
-    // + " delta=" + this.lastDelta;
     long drId = DiskInitFile.readDiskRegionID(dis);
     DiskRecoveryStore drs = getOplogSet().getCurrentlyRecovering(drId);
     // read versions
@@ -2727,7 +2720,7 @@ public class Oplog implements CompactableOplog, Flushable {
         }
       }
       if (isPersistRecoveryDebugEnabled) {
-        logger.trace(LogMarker.PERSIST_RECOVERY,
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
             "readModifyEntry oplogKeyId=<{}> drId={} key=<{}> userBits={} oplogOffset={} tag={} valueLen={}",
             oplogKeyId, drId, key, userBits, oplogOffset, tag, valueLength);
       }
@@ -2745,8 +2738,8 @@ public class Oplog implements CompactableOplog, Flushable {
           getOplogSet().getChild().copyForwardForOfflineCompact(oplogKeyId, p2cr.getKeyBytes(),
               objValue, userBits, drId, tag);
           if (isPersistRecoveryDebugEnabled) {
-            logger.trace(LogMarker.PERSIST_RECOVERY, "readModifyEntry copyForward oplogKeyId=<{}>",
-                oplogKeyId);
+            logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
+                "readModifyEntry copyForward oplogKeyId=<{}>", oplogKeyId);
           }
           // add it to the deletedIds set so we will ignore it in earlier oplogs
           deletedIds.add(oplogKeyId);
@@ -2767,7 +2760,7 @@ public class Oplog implements CompactableOplog, Flushable {
             re.setVersionTag(tag);
           }
           if (isPersistRecoveryDebugEnabled) {
-            logger.trace(LogMarker.PERSIST_RECOVERY,
+            logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                 "readModEntryWK init oplogKeyId=<{}> drId={} key=<{}> oplogOffset={} userBits={} valueLen={} tag={}",
                 oplogKeyId, drId, key, oplogOffset, userBits, valueLength, tag);
           }
@@ -2790,8 +2783,8 @@ public class Oplog implements CompactableOplog, Flushable {
       }
     } else {
       if (isPersistRecoveryDebugEnabled) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "skipping readModifyEntry oplogKeyId=<{}> drId={}",
-            oplogKeyId, drId);
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
+            "skipping readModifyEntry oplogKeyId=<{}> drId={}", oplogKeyId, drId);
       }
     }
   }
@@ -2803,8 +2796,9 @@ public class Oplog implements CompactableOplog, Flushable {
     DiskRecoveryStore drs = getOplogSet().getCurrentlyRecovering(drId);
     // read versions
     VersionTag tag = readVersionsFromOplog(dis);
-    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-      logger.trace(LogMarker.PERSIST_RECOVERY, "readVersionTagOnlyEntry drId={} tag={}", drId, tag);
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "readVersionTagOnlyEntry drId={} tag={}",
+          drId, tag);
     }
     readEndOfRecord(dis);
 
@@ -2847,15 +2841,7 @@ public class Oplog implements CompactableOplog, Flushable {
     byte userBits = dis.readByte();
 
     int idByteCount = (opcode - OPLOG_MOD_ENTRY_WITH_KEY_1ID) + 1;
-    // long debugRecoverModEntryId = this.recoverModEntryId;
     long oplogKeyId = getModEntryId(dis, idByteCount);
-    // long debugOplogKeyId = dis.readLong();
-    // //assert oplogKeyId == debugOplogKeyId
-    // // : "expected=" + debugOplogKeyId + " actual=" + oplogKeyId
-    // assert debugRecoverModEntryId == debugOplogKeyId
-    // : "expected=" + debugOplogKeyId + " actual=" + debugRecoverModEntryId
-    // + " idByteCount=" + idByteCount
-    // + " delta=" + this.lastDelta;
     long drId = DiskInitFile.readDiskRegionID(dis);
     DiskRecoveryStore drs = getOplogSet().getCurrentlyRecovering(drId);
 
@@ -2940,8 +2926,8 @@ public class Oplog implements CompactableOplog, Flushable {
         skippedKeyBytes.put(oplogKeyId, keyBytes);
       }
       readEndOfRecord(dis);
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY,
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
             "skipping readModEntryWK init oplogKeyId=<{}> drId={}", oplogKeyId, drId);
       }
     } else {
@@ -2971,8 +2957,8 @@ public class Oplog implements CompactableOplog, Flushable {
           Assert.assertTrue(p2cr != null, "First pass did not find create a compaction record");
           getOplogSet().getChild().copyForwardForOfflineCompact(oplogKeyId, p2cr.getKeyBytes(),
               objValue, userBits, drId, tag);
-          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-            logger.trace(LogMarker.PERSIST_RECOVERY,
+          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+            logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                 "readModifyEntryWithKey copyForward oplogKeyId=<{}>", oplogKeyId);
           }
           // add it to the deletedIds set so we will ignore it in earlier oplogs
@@ -2996,8 +2982,8 @@ public class Oplog implements CompactableOplog, Flushable {
           if (tag != null) {
             re.setVersionTag(tag);
           }
-          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-            logger.trace(LogMarker.PERSIST_RECOVERY,
+          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+            logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                 "readModEntryWK init oplogKeyId=<{}> drId={} key={} oplogOffset={} userBits={} valueLen={} tag={}",
                 oplogKeyId, drId, key, oplogOffset, userBits, valueLength, tag);
           }
@@ -3011,14 +2997,11 @@ public class Oplog implements CompactableOplog, Flushable {
               .getOplogId() != getOplogId() : "Mutiple ModEntryWK in the same oplog for getOplogId()="
                   + getOplogId() + " , curdid.getOplogId()=" + curdid.getOplogId() + " , for drId="
                   + drId + " , key=" + key;
-          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-            logger.trace(LogMarker.PERSIST_RECOVERY,
+          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+            logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                 "ignore readModEntryWK because getOplogId()={} != curdid.getOplogId()={} for drId={} key={}",
                 getOplogId(), curdid.getOplogId(), drId, key);
           }
-          // de = drs.updateRecoveredEntry(key, re);
-          // updateRecoveredEntry(drv, de, re);
-          // this.stats.incRecoveredEntryUpdates();
         }
       }
     }
@@ -3034,19 +3017,13 @@ public class Oplog implements CompactableOplog, Flushable {
   private void readDelEntry(CountingDataInputStream dis, byte opcode, OplogEntryIdSet deletedIds,
       DiskStoreImpl parent) throws IOException {
     int idByteCount = (opcode - OPLOG_DEL_ENTRY_1ID) + 1;
-    // long debugRecoverDelEntryId = this.recoverDelEntryId;
     long oplogKeyId = getDelEntryId(dis, idByteCount);
-    // long debugOplogKeyId = dis.readLong();
     readEndOfRecord(dis);
-    // assert debugRecoverDelEntryId == debugOplogKeyId
-    // : "expected=" + debugOplogKeyId + " actual=" + debugRecoverDelEntryId
-    // + " idByteCount=" + idByteCount
-    // + " delta=" + this.lastDelta;
     deletedIds.add(oplogKeyId);
     setHasDeletes(true);
     this.stats.incRecoveredEntryDestroys();
-    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-      logger.trace(LogMarker.PERSIST_RECOVERY, "readDelEntry oplogKeyId=<{}>", oplogKeyId);
+    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "readDelEntry oplogKeyId=<{}>", oplogKeyId);
     }
   }
 
@@ -3093,32 +3070,16 @@ public class Oplog implements CompactableOplog, Flushable {
   private OkToSkipResult okToSkipModifyRecord(OplogEntryIdSet deletedIds, long drId,
       DiskRecoveryStore drs, long oplogEntryId, boolean checkRecoveryMap, VersionTag tag) {
     if (deletedIds.contains(oplogEntryId)) {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY,
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
             "okToSkip because oplogEntryId={} was deleted for drId={}", oplogEntryId, drId);
       }
       return OkToSkipResult.SKIP_RECORD;
     }
-    // if (dr == null || !dr.isReadyForRecovery()) {
-    // // Region has not yet been created (it is not in the diskStore drMap).
-    // // or it is not ready for recovery (i.e. it is a ProxyBucketRegion).
-    // if (getParent().getDiskInitFile().regionExists(drId)
-    // || (dr != null && !dr.isReadyForRecovery())) {
-    // // Prevent compactor from removing this oplog.
-    // // It needs to be in this state until all the regions stored it in
-    // // are recovered.
-    // addUnrecoveredRegion(drId);
-    // } else {
-    // // someone must have deleted the region from the initFile (with our
-    // public tool?)
-    // // so skip this record and don't count it as live so that the compactor
-    // can gc it.
-    // }
-    // return true;
-    // } else
     if (drs == null) { // we are not currently recovering this region
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "okToSkip because drs is null for drId={}", drId);
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "okToSkip because drs is null for drId={}",
+            drId);
       }
       // Now when the diskStore is created we recover all the regions
       // immediately.
@@ -3139,8 +3100,8 @@ public class Oplog implements CompactableOplog, Flushable {
           DiskId curdid = de.getDiskId();
           if (curdid != null) {
             if (curdid.getOplogId() != getOplogId()) {
-              if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-                logger.trace(LogMarker.PERSIST_RECOVERY,
+              if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+                logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                     "okToSkip because getOplogId()={} != curdid.getOplogId()={} for drId={} key={}",
                     getOplogId(), curdid.getOplogId(), drId, key);
               }
@@ -3160,15 +3121,15 @@ public class Oplog implements CompactableOplog, Flushable {
   private OkToSkipResult okToSkipRegion(DiskRegionView drv, long oplogKeyId, VersionTag tag) {
     long lastClearKeyId = drv.getClearOplogEntryId();
     if (lastClearKeyId != DiskStoreImpl.INVALID_ID) {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "lastClearKeyId={} oplogKeyId={}", lastClearKeyId,
-            oplogKeyId);
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "lastClearKeyId={} oplogKeyId={}",
+            lastClearKeyId, oplogKeyId);
       }
       if (lastClearKeyId >= 0) {
 
         if (oplogKeyId <= lastClearKeyId) {
-          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-            logger.trace(LogMarker.PERSIST_RECOVERY,
+          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+            logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                 "okToSkip because oplogKeyId={} <= lastClearKeyId={} for drId={}", oplogKeyId,
                 lastClearKeyId, drv.getId());
           }
@@ -3183,8 +3144,8 @@ public class Oplog implements CompactableOplog, Flushable {
           // (assume clear happened after we wrapped around to negative).
           // If oplogKeyId < 0 then it happened before the clear
           // if it is < lastClearKeyId
-          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-            logger.trace(LogMarker.PERSIST_RECOVERY,
+          if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+            logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
                 "okToSkip because oplogKeyId={} <= lastClearKeyId={} for drId={}", oplogKeyId,
                 lastClearKeyId, drv.getId());
           }
@@ -3194,12 +3155,12 @@ public class Oplog implements CompactableOplog, Flushable {
     }
     RegionVersionVector clearRVV = drv.getClearRVV();
     if (clearRVV != null) {
-      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-        logger.trace(LogMarker.PERSIST_RECOVERY, "clearRVV={} tag={}", clearRVV, tag);
+      if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE, "clearRVV={} tag={}", clearRVV, tag);
       }
       if (clearRVV.contains(tag.getMemberID(), tag.getRegionVersion())) {
-        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-          logger.trace(LogMarker.PERSIST_RECOVERY,
+        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY_VERBOSE)) {
+          logger.trace(LogMarker.PERSIST_RECOVERY_VERBOSE,
               "okToSkip because tag={} <= clearRVV={} for drId={}", tag, clearRVV, drv.getId());
         }
         // For an RVV clear, we can only skip the value during recovery
@@ -3219,7 +3180,7 @@ public class Oplog implements CompactableOplog, Flushable {
     return calcDelEntryId(getEntryIdDelta(dis, idByteCount));
   }
 
-  private/* HACK DEBUG */static long getEntryIdDelta(CountingDataInputStream dis, int idByteCount)
+  private static long getEntryIdDelta(CountingDataInputStream dis, int idByteCount)
       throws IOException {
     assert idByteCount >= 1 && idByteCount <= 8 : idByteCount;
 
@@ -3626,12 +3587,12 @@ public class Oplog implements CompactableOplog, Flushable {
           // we do not require a lock on DiskID, as concurrent access for
           // value will not occur.
           startPosForSynchOp += getOpStateValueOffset();
-          if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
+          if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES_VERBOSE)) {
             VersionTag tag = null;
             if (entry.getVersionStamp() != null) {
               tag = entry.getVersionStamp().asVersionTag();
             }
-            logger.trace(LogMarker.PERSIST_WRITES,
+            logger.trace(LogMarker.PERSIST_WRITES_VERBOSE,
                 "basicCreate: id=<{}> key=<{}> valueOffset={} userBits={} valueLen={} valueBytes={} drId={} versionTag={} oplog#{}",
                 abs(id.getKeyId()), entry.getKey(), startPosForSynchOp, userBits,
                 (value != null ? value.getLength() : 0), value.getBytesAsString(), dr.getId(), tag,
@@ -3920,8 +3881,8 @@ public class Oplog implements CompactableOplog, Flushable {
         }
       }
     }
-    if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
-      logger.trace(LogMarker.PERSIST_WRITES,
+    if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES_VERBOSE)) {
+      logger.trace(LogMarker.PERSIST_WRITES_VERBOSE,
           "krf oplogId={} key={} oplogKeyId={} de={} vo={} vl={} diskRegionId={} version tag={}",
           oplogId, deKey, oplogKeyId, System.identityHashCode(de), valueOffset, valueLength,
           diskRegionId, tag);
@@ -4604,12 +4565,12 @@ public class Oplog implements CompactableOplog, Flushable {
             startPosForSynchOp = writeOpLogBytes(this.crf, async, true);
             this.crf.currSize = temp;
             startPosForSynchOp += getOpStateValueOffset();
-            if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
+            if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES_VERBOSE)) {
               VersionTag tag = null;
               if (entry.getVersionStamp() != null) {
                 tag = entry.getVersionStamp().asVersionTag();
               }
-              logger.trace(LogMarker.PERSIST_WRITES,
+              logger.trace(LogMarker.PERSIST_WRITES_VERBOSE,
                   "basicModify: id=<{}> key=<{}> valueOffset={} userBits={} valueLen={} valueBytes=<{}> drId={} versionStamp={} oplog#{}",
                   abs(id.getKeyId()), entry.getKey(), startPosForSynchOp, userBits,
                   value.getLength(), value.getBytesAsString(), dr.getId(), tag, getOplogId());
@@ -4728,8 +4689,8 @@ public class Oplog implements CompactableOplog, Flushable {
             this.firstRecord = false;
             writeOpLogBytes(this.crf, async, true);
             this.crf.currSize = temp;
-            if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
-              logger.trace(LogMarker.PERSIST_WRITES,
+            if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES_VERBOSE)) {
+              logger.trace(LogMarker.PERSIST_WRITES_VERBOSE,
                   "basicSaveConflictVersionTag: drId={} versionStamp={} oplog#{}", dr.getId(), tag,
                   getOplogId());
             }
@@ -4787,8 +4748,8 @@ public class Oplog implements CompactableOplog, Flushable {
           getOplogSet().getChild().writeOneKeyEntryForKRF(keyBytes, userBits, valueBytes.length,
               drId, oplogKeyId, startPosForSynchOp, tag);
 
-          if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
-            logger.trace(LogMarker.PERSIST_WRITES,
+          if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES_VERBOSE)) {
+            logger.trace(LogMarker.PERSIST_WRITES_VERBOSE,
                 "basicCopyForwardForOfflineCompact: id=<{}> keyBytes=<{}> valueOffset={} userBits={} valueLen={} valueBytes=<{}> drId={} oplog#{}",
                 oplogKeyId, baToString(keyBytes), startPosForSynchOp, userBits, valueBytes.length,
                 baToString(valueBytes), drId, getOplogId());
@@ -5073,7 +5034,7 @@ public class Oplog implements CompactableOplog, Flushable {
             // because we might be killed right after we do this write.
             startPosForSynchOp = writeOpLogBytes(this.drf, async, true);
             setHasDeletes(true);
-            if (logger.isDebugEnabled(LogMarker.PERSIST_WRITES)) {
+            if (logger.isDebugEnabled(LogMarker.PERSIST_WRITES_VERBOSE)) {
               logger.debug("basicRemove: id=<{}> key=<{}> drId={} oplog#{}", abs(id.getKeyId()),
                   entry.getKey(), dr.getId(), getOplogId());
             }
@@ -5384,16 +5345,9 @@ public class Oplog implements CompactableOplog, Flushable {
     try {
       // No need to get the backup lock prior to synchronizing (correct lock order) since the
       // synchronized block does not attempt to get the backup lock (incorrect lock order)
-      synchronized (this.lock/* crf */) {
-        // if (this.closed || this.deleted.get()) {
-        // throw new DiskAccessException("attempting get on "
-        // + (this.deleted.get() ? "destroyed" : "closed")
-        // + " oplog #" + getOplogId(), this.owner);
-        // }
+      synchronized (this.lock) {
         this.beingRead = true;
-        if (/*
-             * !getParent().isSync() since compactor groups writes &&
-             */(offsetInOplog + valueLength) > this.crf.bytesFlushed && !this.closed) {
+        if ((offsetInOplog + valueLength) > this.crf.bytesFlushed && !this.closed) {
           flushAllNoSync(true); // fix for bug 41205
         }
         try {
@@ -5458,11 +5412,6 @@ public class Oplog implements CompactableOplog, Flushable {
           }
         } finally {
           this.beingRead = false;
-          // if (this.closed || this.deleted.get()) {
-          // throw new DiskAccessException("attempting get on "
-          // + (this.deleted.get() ? "destroyed" : "closed")
-          // + " oplog #" + getOplogId(), this.owner);
-          // }
         }
       } // sync
     } finally {
@@ -5608,11 +5557,6 @@ public class Oplog implements CompactableOplog, Flushable {
               this.crf.raf.seek(writePosition);
               this.stats.incOplogSeeks();
             }
-            // if (this.closed || this.deleted.get()) {
-            // throw new DiskAccessException("attempting get on "
-            // + (this.deleted.get() ? "destroyed" : "closed")
-            // + " oplog #" + getOplogId(), this.owner);
-            // }
           }
         }
       } catch (IOException ex) {
@@ -5653,15 +5597,12 @@ public class Oplog implements CompactableOplog, Flushable {
       // If compaction is possible then we need to leave this
       // oplog registered with the parent and allow the compactor to unregister
       // it.
-      // }
 
       deleteCRF();
       if (!crfOnly || !getHasDeletes()) {
         setHasDeletes(false);
         deleteDRF();
         // no need to call removeDrf since parent removeOplog did it
-        // getParent().removeDrf(this);
-        // getParent().oplogSetRemove(this);
       }
 
       // Fix for bug 42495 - Don't remove the oplog from this list
@@ -5674,7 +5615,6 @@ public class Oplog implements CompactableOplog, Flushable {
       setHasDeletes(false);
       deleteDRF();
       getOplogSet().removeDrf(this);
-      // getParent().oplogSetRemove(this);
     }
 
   }
@@ -6300,9 +6240,10 @@ public class Oplog implements CompactableOplog, Flushable {
       AbstractDiskRegion dr = regionEntry.getValue();
 
       RegionVersionVector rvv = dr.getRegionVersionVector();
-      if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
-        logger.trace(LogMarker.PERSIST_WRITES, "serializeRVVs: isGCRVV={} drId={} rvv={} oplog#{}",
-            gcRVV, diskRegionID, rvv.fullToString(), getOplogId());
+      if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES_VERBOSE)) {
+        logger.trace(LogMarker.PERSIST_WRITES_VERBOSE,
+            "serializeRVVs: isGCRVV={} drId={} rvv={} oplog#{}", gcRVV, diskRegionID,
+            rvv.fullToString(), getOplogId());
       }
 
       // Write the disk region id
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java b/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java
index 381be2b..2d6ac53 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/OverflowOplog.java
@@ -767,17 +767,17 @@ class OverflowOplog implements CompactableOplog, Flushable {
           olf.bytesFlushed = startPos;
           this.stats.incOplogSeeks();
         }
-        if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
-          logger.trace(LogMarker.PERSIST_WRITES, "writeOpLogBytes startPos={} oplog#{}", startPos,
-              getOplogId());
+        if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES_VERBOSE)) {
+          logger.trace(LogMarker.PERSIST_WRITES_VERBOSE, "writeOpLogBytes startPos={} oplog#{}",
+              startPos, getOplogId());
         }
         long oldBytesFlushed = olf.bytesFlushed;
         long bytesWritten = this.opState.write();
         if ((startPos + bytesWritten) > olf.currSize) {
           olf.currSize = startPos + bytesWritten;
         }
-        if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) {
-          logger.trace(LogMarker.PERSIST_WRITES,
+        if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES_VERBOSE)) {
+          logger.trace(LogMarker.PERSIST_WRITES_VERBOSE,
               "writeOpLogBytes bytesWritten={} oldBytesFlushed={} byteFlushed={} oplog#{}",
               bytesWritten, oldBytesFlushed, olf.bytesFlushed, getOplogId());
         }
@@ -792,7 +792,6 @@ class OverflowOplog implements CompactableOplog, Flushable {
         // // Moved the set of lastWritePos to after write
         // // so if write throws an exception it will not be updated.
         // // This fixes bug 40449.
-        // this.lastWritePos = startPos;
       }
     }
     return startPos;
@@ -809,9 +808,7 @@ class OverflowOplog implements CompactableOplog, Flushable {
         writePosition = myRAF.getFilePointer();
         bb = attemptWriteBufferGet(writePosition, offsetInOplog, valueLength, userBits);
         if (bb == null) {
-          if (/*
-               * !getParent().isSync() since compactor groups writes &&
-               */ (offsetInOplog + valueLength) > this.crf.bytesFlushed && !this.closed) {
+          if ((offsetInOplog + valueLength) > this.crf.bytesFlushed && !this.closed) {
             flushAll(); // fix for bug 41205
             writePosition = myRAF.getFilePointer();
           }
@@ -823,16 +820,6 @@ class OverflowOplog implements CompactableOplog, Flushable {
           this.stats.incOplogSeeks();
           byte[] valueBytes = new byte[valueLength];
           myRAF.readFully(valueBytes);
-          // if (EntryBits.isSerialized(userBits)) {
-          // try {
-          // org.apache.geode.internal.util.BlobHelper.deserializeBlob(valueBytes);
-          // } catch (IOException ex) {
-          // throw new RuntimeException("DEBUG readPos=" + readPosition + " len=" + valueLength +
-          // "doneApp=" + doneAppending + " userBits=" + userBits, ex);
-          // } catch (ClassNotFoundException ex2) {
-          // throw new RuntimeException(ex2);
-          // }
-          // }
           this.stats.incOplogReads();
           bb = new BytesAndBits(valueBytes, userBits);
         } finally {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
index a1cba79..51de306 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java
@@ -7667,8 +7667,8 @@ public class PartitionedRegion extends LocalRegion
         if (!ev.getInvokePRCallbacks()) {
           if (this.getSubscriptionAttributes()
               .getInterestPolicy() == InterestPolicy.CACHE_CONTENT) {
-            if (logger.isTraceEnabled()) {
-              logger.trace(LogMarker.DM_BRIDGE_SERVER,
+            if (logger.isTraceEnabled(LogMarker.DM_BRIDGE_SERVER_VERBOSE)) {
+              logger.trace(LogMarker.DM_BRIDGE_SERVER_VERBOSE,
                   "not dispatching PR event in this member as there is no interest in it");
             }
             return;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PersistentOplogSet.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PersistentOplogSet.java
index c3cd934..d6f080f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/PersistentOplogSet.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PersistentOplogSet.java
@@ -390,9 +390,6 @@ public class PersistentOplogSet implements OplogSet {
           if (parent.isValidating()) {
             if (drs instanceof ValidatingDiskRegion) {
               ValidatingDiskRegion vdr = ((ValidatingDiskRegion) drs);
-              if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
-                vdr.dump(System.out);
-              }
               if (vdr.isBucket()) {
                 String prName = vdr.getPrName();
                 if (prSizes.containsKey(prName)) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/StateFlushOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/StateFlushOperation.java
index dcd5130..21b2367 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/StateFlushOperation.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/StateFlushOperation.java
@@ -121,13 +121,13 @@ public class StateFlushOperation {
       ReplyProcessor21 processor = new ReplyProcessor21(dm, target);
       gr.processorId = processor.getProcessorId();
       gr.channelState = dm.getMembershipManager().getMessageState(target, false);
-      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)
+      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)
           && ((gr.channelState != null) && (gr.channelState.size() > 0))) {
-        logger.trace(LogMarker.STATE_FLUSH_OP, "channel states: {}",
+        logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "channel states: {}",
             gr.channelStateDescription(gr.channelState));
       }
-      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-        logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", gr);
+      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+        logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "Sending {}", gr);
       }
       dm.putOutgoing(gr);
       processors.add(processor);
@@ -219,14 +219,15 @@ public class StateFlushOperation {
       smm.severeAlertEnabled = true;
       gfprocessor.enableSevereAlertProcessing();
     }
-    if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-      logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {} with processor {}", smm, gfprocessor);
+    if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+      logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "Sending {} with processor {}", smm,
+          gfprocessor);
     }
     Set failures = this.dm.putOutgoing(smm);
     if (failures != null) {
       if (failures.contains(target)) {
-        if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-          logger.trace(LogMarker.STATE_FLUSH_OP,
+        if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+          logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE,
               "failed to send StateMarkerMessage to target {}; returning from flush without waiting for replies",
               target);
         }
@@ -236,12 +237,9 @@ public class StateFlushOperation {
     }
 
     try {
-      // try { Thread.sleep(100); } catch (InterruptedException e) {
-      // Thread.currentThread().interrupt(); } // DEBUGGING - stall before getting membership to
-      // increase odds that target has left
       gfprocessor.waitForReplies();
-      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-        logger.trace(LogMarker.STATE_FLUSH_OP, "Finished processing {}", smm);
+      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+        logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "Finished processing {}", smm);
       }
     } catch (ReplyException re) {
       logger.warn(LocalizedMessage
@@ -344,7 +342,7 @@ public class StateFlushOperation {
 
     @Override
     protected void process(ClusterDistributionManager dm) {
-      logger.trace(LogMarker.STATE_FLUSH_OP, "Processing {}", this);
+      logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "Processing {}", this);
       if (dm.getDistributionManagerId().equals(relayRecipient)) {
         try {
           // wait for inflight operations to the aeqs even if the recipient is the primary
@@ -372,8 +370,8 @@ public class StateFlushOperation {
           ga.sendingMember = relayRecipient;
           ga.setRecipient(this.getSender());
           ga.setProcessorId(processorId);
-          if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-            logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", ga);
+          if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+            logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "Sending {}", ga);
           }
           dm.putOutgoing(ga);
         }
@@ -392,8 +390,9 @@ public class StateFlushOperation {
           Set<DistributedRegion> regions = getRegions(dm);
           for (DistributedRegion r : regions) {
             if (r == null) {
-              if (logger.isTraceEnabled(LogMarker.DM)) {
-                logger.trace(LogMarker.DM, "Region not found - skipping channel state assessment");
+              if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+                logger.trace(LogMarker.DM_VERBOSE,
+                    "Region not found - skipping channel state assessment");
               }
             }
             if (r != null) {
@@ -413,9 +412,9 @@ public class StateFlushOperation {
                 } else {
                   gr.channelState = channelStates;
                 }
-                if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)
+                if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)
                     && ((gr.channelState != null) && (gr.channelState.size() > 0))) {
-                  logger.trace(LogMarker.STATE_FLUSH_OP, "channel states: {}",
+                  logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "channel states: {}",
                       gr.channelStateDescription(gr.channelState));
                 }
               }
@@ -429,8 +428,8 @@ public class StateFlushOperation {
               LocalizedStrings.StateFlushOperation_0__EXCEPTION_CAUGHT_WHILE_DETERMINING_CHANNEL_STATE,
               this), e);
         } finally {
-          if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-            logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", gr);
+          if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+            logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "Sending {}", gr);
           }
           dm.putOutgoing(gr);
         }
@@ -556,14 +555,14 @@ public class StateFlushOperation {
       // in the waiting pool to avoid blocking those connections
       dm.getWaitingThreadPool().execute(new Runnable() {
         public void run() {
-          if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-            logger.trace(LogMarker.STATE_FLUSH_OP, "Processing {}", this);
+          if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+            logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "Processing {}", this);
           }
           try {
             if (channelState != null) {
-              if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)
+              if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)
                   && ((channelState != null) && (channelState.size() > 0))) {
-                logger.trace(LogMarker.STATE_FLUSH_OP, "Waiting for channel states:  {}",
+                logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "Waiting for channel states:  {}",
                     channelStateDescription(channelState));
               }
               for (;;) {
@@ -609,8 +608,8 @@ public class StateFlushOperation {
               ga.sendingMember = getSender();
             }
             ga.setProcessorId(processorId);
-            if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-              logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", ga);
+            if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+              logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "Sending {}", ga);
             }
             if (requestingMember.equals(dm.getDistributionManagerId())) {
               ga.dmProcess(dm);
@@ -679,8 +678,8 @@ public class StateFlushOperation {
 
     @Override
     public void process(final DistributionManager dm, final ReplyProcessor21 processor) {
-      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
-        logger.trace(LogMarker.STATE_FLUSH_OP, "Processing {}", this);
+      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP_VERBOSE)) {
+        logger.trace(LogMarker.STATE_FLUSH_OP_VERBOSE, "Processing {}", this);
       }
       super.process(dm, processor);
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXRegionLockRequestImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXRegionLockRequestImpl.java
index 5b5425d..01c49ee 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXRegionLockRequestImpl.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXRegionLockRequestImpl.java
@@ -121,8 +121,8 @@ public class TXRegionLockRequestImpl implements TXRegionLockRequest {
   private Set<Object> readEntryKeySet(final int size, final DataInput in)
       throws IOException, ClassNotFoundException {
 
-    if (logger.isTraceEnabled()) {
-      logger.trace(LogMarker.SERIALIZER, "Reading HashSet with size {}", size);
+    if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) {
+      logger.trace(LogMarker.SERIALIZER_VERBOSE, "Reading HashSet with size {}", size);
     }
 
     final HashSet<Object> set = new HashSet<Object>(size);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXRemoteCommitMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXRemoteCommitMessage.java
index cbbbbf4..c4e2b8e 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXRemoteCommitMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXRemoteCommitMessage.java
@@ -187,15 +187,15 @@ public class TXRemoteCommitMessage extends TXMessage {
     @Override
     public void process(final DistributionManager dm, ReplyProcessor21 processor) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "TXRemoteCommitReply process invoking reply processor with processorId:{}",
             this.processorId);
       }
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "TXRemoteCommitReply processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "TXRemoteCommitReply processor not found");
         }
         return;
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TombstoneService.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TombstoneService.java
index e73c3ef..14c1d53 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/TombstoneService.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TombstoneService.java
@@ -380,8 +380,8 @@ public class TombstoneService {
 
     @Override
     protected void expireTombstone(Tombstone tombstone) {
-      if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
-        logger.trace(LogMarker.TOMBSTONE, "removing expired tombstone {}", tombstone);
+      if (logger.isTraceEnabled(LogMarker.TOMBSTONE_VERBOSE)) {
+        logger.trace(LogMarker.TOMBSTONE_VERBOSE, "removing expired tombstone {}", tombstone);
       }
       updateMemoryEstimate(-tombstone.getSize());
       tombstone.region.getRegionMap().removeTombstone(tombstone.entry, tombstone, false, true);
@@ -679,8 +679,9 @@ public class TombstoneService {
 
     @Override
     protected void expireTombstone(Tombstone tombstone) {
-      if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
-        logger.trace(LogMarker.TOMBSTONE, "adding expired tombstone {} to batch", tombstone);
+      if (logger.isTraceEnabled(LogMarker.TOMBSTONE_VERBOSE)) {
+        logger.trace(LogMarker.TOMBSTONE_VERBOSE, "adding expired tombstone {} to batch",
+            tombstone);
       }
       synchronized (expiredTombstonesLock) {
         expiredTombstones.add(tombstone);
@@ -880,8 +881,8 @@ public class TombstoneService {
     }
 
     public void run() {
-      if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
-        logger.trace(LogMarker.TOMBSTONE,
+      if (logger.isTraceEnabled(LogMarker.TOMBSTONE_VERBOSE)) {
+        logger.trace(LogMarker.TOMBSTONE_VERBOSE,
             "Destroyed entries sweeper starting with sleep interval of {} milliseconds",
             EXPIRY_TIME);
       }
@@ -919,8 +920,8 @@ public class TombstoneService {
       }
       beforeSleepChecks();
       sleepTime = Math.min(sleepTime, MAX_SLEEP_TIME);
-      if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
-        logger.trace(LogMarker.TOMBSTONE, "sleeping for {}", sleepTime);
+      if (logger.isTraceEnabled(LogMarker.TOMBSTONE_VERBOSE)) {
+        logger.trace(LogMarker.TOMBSTONE_VERBOSE, "sleeping for {}", sleepTime);
       }
       synchronized (this) {
         if (isStopped) {
@@ -951,8 +952,8 @@ public class TombstoneService {
       boolean removedObsoleteTombstone = removeIf(tombstone -> {
         if (tombstone.region.getRegionMap().isTombstoneNotNeeded(tombstone.entry,
             tombstone.getEntryVersion())) {
-          if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
-            logger.trace(LogMarker.TOMBSTONE, "removing obsolete tombstone: {}", tombstone);
+          if (logger.isTraceEnabled(LogMarker.TOMBSTONE_VERBOSE)) {
+            logger.trace(LogMarker.TOMBSTONE_VERBOSE, "removing obsolete tombstone: {}", tombstone);
           }
           return true;
         }
@@ -978,14 +979,14 @@ public class TombstoneService {
       Tombstone oldest = tombstones.peek();
       try {
         if (oldest == null) {
-          if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
-            logger.trace(LogMarker.TOMBSTONE, "queue is empty - will sleep");
+          if (logger.isTraceEnabled(LogMarker.TOMBSTONE_VERBOSE)) {
+            logger.trace(LogMarker.TOMBSTONE_VERBOSE, "queue is empty - will sleep");
           }
           handleNoUnexpiredTombstones();
           sleepTime = EXPIRY_TIME;
         } else {
-          if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
-            logger.trace(LogMarker.TOMBSTONE, "oldest unexpired tombstone is {}", oldest);
+          if (logger.isTraceEnabled(LogMarker.TOMBSTONE_VERBOSE)) {
+            logger.trace(LogMarker.TOMBSTONE_VERBOSE, "oldest unexpired tombstone is {}", oldest);
           }
           long msTillHeadTombstoneExpires = oldest.getVersionTimeStamp() + EXPIRY_TIME - now;
           if (hasExpired(msTillHeadTombstoneExpires)) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/VMLRURegionMap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/VMLRURegionMap.java
index 236c154..92d3a90 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/VMLRURegionMap.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/VMLRURegionMap.java
@@ -22,11 +22,8 @@ import org.apache.logging.log4j.Logger;
 
 import org.apache.geode.InternalGemFireException;
 import org.apache.geode.cache.EvictionAction;
-import org.apache.geode.cache.EvictionAlgorithm;
-import org.apache.geode.cache.Region;
 import org.apache.geode.cache.RegionDestroyedException;
 import org.apache.geode.internal.Assert;
-import org.apache.geode.internal.cache.RegionMap.Attributes;
 import org.apache.geode.internal.cache.control.InternalResourceManager;
 import org.apache.geode.internal.cache.entries.DiskEntry;
 import org.apache.geode.internal.cache.eviction.AbstractEvictionController;
@@ -45,7 +42,6 @@ import org.apache.geode.internal.logging.LogService;
 import org.apache.geode.internal.logging.log4j.LocalizedMessage;
 import org.apache.geode.internal.logging.log4j.LogMarker;
 import org.apache.geode.internal.offheap.StoredObject;
-import org.apache.geode.internal.size.ReflectionSingleObjectSizer;
 
 /**
  * Internal implementation of {@link RegionMap} for regions stored in normal VM memory that maintain
@@ -255,8 +251,8 @@ public class VMLRURegionMap extends AbstractRegionMap {
       synchronized (entry) {
         if (entry.isInUseByTransaction()) {
           entry.unsetEvicted();
-          if (logger.isTraceEnabled(LogMarker.LRU)) {
-            logger.trace(LogMarker.LRU, "No eviction of transactional entry for key={}",
+          if (logger.isTraceEnabled(LogMarker.LRU_VERBOSE)) {
+            logger.trace(LogMarker.LRU_VERBOSE, "No eviction of transactional entry for key={}",
                 entry.getKey());
           }
           return 0;
@@ -265,16 +261,17 @@ public class VMLRURegionMap extends AbstractRegionMap {
         // Do the following check while synchronized to fix bug 31761
         Token entryVal = entry.getValueAsToken();
         if (entryVal == null) {
-          if (logger.isTraceEnabled(LogMarker.LRU)) {
-            logger.trace(LogMarker.LRU, "no need to evict already evicted key={}", entry.getKey());
+          if (logger.isTraceEnabled(LogMarker.LRU_VERBOSE)) {
+            logger.trace(LogMarker.LRU_VERBOSE, "no need to evict already evicted key={}",
+                entry.getKey());
           }
           return 0;
         }
         if (Token.isInvalidOrRemoved(entryVal)) {
           // no need to evict these; it will not save any space
           // and the destroyed token needs to stay in memory
-          if (logger.isTraceEnabled(LogMarker.LRU)) {
-            logger.trace(LogMarker.LRU, "no need to evict {} token for key={}", entryVal,
+          if (logger.isTraceEnabled(LogMarker.LRU_VERBOSE)) {
+            logger.trace(LogMarker.LRU_VERBOSE, "no need to evict {} token for key={}", entryVal,
                 entry.getKey());
           }
           return 0;
@@ -289,16 +286,14 @@ public class VMLRURegionMap extends AbstractRegionMap {
         if (_getOwner() instanceof BucketRegion) {
           BucketRegion bucketRegion = (BucketRegion) _getOwner();
           bucketRegion.updateCounter(change);
-          // if(bucketRegion.getBucketAdvisor().isPrimary()){
           stats.updateCounter(change);
-          // }
         } else {
           stats.updateCounter(change);
         }
 
       } else {
-        if (logger.isTraceEnabled(LogMarker.LRU)) {
-          logger.trace(LogMarker.LRU,
+        if (logger.isTraceEnabled(LogMarker.LRU_VERBOSE)) {
+          logger.trace(LogMarker.LRU_VERBOSE,
               "no need to evict token for key={} because moving its value to disk resulted in a net change of {} bytes.",
               entry.getKey(), change);
         }
@@ -326,8 +321,8 @@ public class VMLRURegionMap extends AbstractRegionMap {
     getEvictionList().getStatistics().updateCounter(delta);
 
     if (delta > 0) {
-      if (logger.isTraceEnabled(LogMarker.LRU)) {
-        logger.trace(LogMarker.LRU, "total lru size is now: {}", getTotalEntrySize());
+      if (logger.isTraceEnabled(LogMarker.LRU_VERBOSE)) {
+        logger.trace(LogMarker.LRU_VERBOSE, "total lru size is now: {}", getTotalEntrySize());
       }
     }
   }
@@ -371,7 +366,7 @@ public class VMLRURegionMap extends AbstractRegionMap {
 
   @Override
   public void lruUpdateCallback() {
-    final boolean isDebugEnabled_LRU = logger.isTraceEnabled(LogMarker.LRU);
+    final boolean isDebugEnabled_LRU = logger.isTraceEnabled(LogMarker.LRU_VERBOSE);
 
     if (getCallbackDisabled()) {
       return;
@@ -380,7 +375,7 @@ public class VMLRURegionMap extends AbstractRegionMap {
     int bytesToEvict = delta;
     resetThreadLocals();
     if (isDebugEnabled_LRU && _isOwnerALocalRegion()) {
-      logger.trace(LogMarker.LRU,
+      logger.trace(LogMarker.LRU_VERBOSE,
           "lruUpdateCallback; list size is: {}; actual size is: {}; map size is: {}; delta is: {}; limit is: {}; tombstone count={}",
           getTotalEntrySize(), this.getEvictionList().size(), size(), delta, getLimit(),
           _getOwner().getTombstoneCount());
@@ -441,7 +436,7 @@ public class VMLRURegionMap extends AbstractRegionMap {
               if (sizeOfValue != 0) {
                 bytesToEvict -= sizeOfValue;
                 if (isDebugEnabled_LRU) {
-                  logger.trace(LogMarker.LRU,
+                  logger.trace(LogMarker.LRU_VERBOSE,
                       "evicted entry key={} total entry size is now: {} bytesToEvict :{}",
                       removalEntry.getKey(), getTotalEntrySize(), bytesToEvict);
                 }
@@ -450,14 +445,14 @@ public class VMLRURegionMap extends AbstractRegionMap {
                   _getOwner().incBucketEvictions();
                 }
                 if (isDebugEnabled_LRU) {
-                  logger.trace(LogMarker.LRU, "evictions={}", stats.getEvictions());
+                  logger.trace(LogMarker.LRU_VERBOSE, "evictions={}", stats.getEvictions());
                 }
               }
 
             } else {
               if (getTotalEntrySize() != 0) {
                 if (isDebugEnabled_LRU) {
-                  logger.trace(LogMarker.LRU, "leaving evict loop early");
+                  logger.trace(LogMarker.LRU_VERBOSE, "leaving evict loop early");
                 }
               }
               break;
@@ -466,7 +461,8 @@ public class VMLRURegionMap extends AbstractRegionMap {
         }
       } catch (RegionClearedException e) {
         if (isDebugEnabled_LRU) {
-          logger.trace(LogMarker.LRU, "exception ={}", e.getCause().getMessage(), e.getCause());
+          logger.trace(LogMarker.LRU_VERBOSE, "exception ={}", e.getCause().getMessage(),
+              e.getCause());
         }
       }
     } else {
@@ -478,7 +474,7 @@ public class VMLRURegionMap extends AbstractRegionMap {
           if (removalEntry != null) {
             if (evictEntry(removalEntry, stats) != 0) {
               if (isDebugEnabled_LRU) {
-                logger.trace(LogMarker.LRU,
+                logger.trace(LogMarker.LRU_VERBOSE,
                     "evicted entry key(2)={} total entry size is now: {} bytesToEvict: {}",
                     removalEntry.getKey(), getTotalEntrySize(), bytesToEvict);
               }
@@ -487,7 +483,7 @@ public class VMLRURegionMap extends AbstractRegionMap {
                 _getOwner().incBucketEvictions();
               }
               if (isDebugEnabled_LRU) {
-                logger.trace(LogMarker.LRU, "evictions={}", stats.getEvictions());
+                logger.trace(LogMarker.LRU_VERBOSE, "evictions={}", stats.getEvictions());
               }
 
             }
@@ -495,7 +491,7 @@ public class VMLRURegionMap extends AbstractRegionMap {
           } else {
             if (getTotalEntrySize() != 0) {
               if (isDebugEnabled_LRU) {
-                logger.trace(LogMarker.LRU, "leaving evict loop early");
+                logger.trace(LogMarker.LRU_VERBOSE, "leaving evict loop early");
               }
             }
             break;
@@ -509,7 +505,7 @@ public class VMLRURegionMap extends AbstractRegionMap {
       }
     }
     if (isDebugEnabled_LRU) {
-      logger.trace(LogMarker.LRU, "callback complete.  LRU size is now {}",
+      logger.trace(LogMarker.LRU_VERBOSE, "callback complete.  LRU size is now {}",
           getEvictionController().getCounters().getCounter());
     }
     // If in transaction context (either local or message)
@@ -525,7 +521,7 @@ public class VMLRURegionMap extends AbstractRegionMap {
 
   @Override
   public int centralizedLruUpdateCallback() {
-    final boolean isDebugEnabled_LRU = logger.isTraceEnabled(LogMarker.LRU);
+    final boolean isDebugEnabled_LRU = logger.isTraceEnabled(LogMarker.LRU_VERBOSE);
 
     int evictedBytes = 0;
     if (getCallbackDisabled()) {
@@ -534,8 +530,9 @@ public class VMLRURegionMap extends AbstractRegionMap {
     getDelta();
     resetThreadLocals();
     if (isDebugEnabled_LRU) {
-      logger.trace(LogMarker.LRU, "centralLruUpdateCallback: lru size is now {}, limit is: {}",
-          getTotalEntrySize(), getLimit());
+      logger.trace(LogMarker.LRU_VERBOSE,
+          "centralLruUpdateCallback: lru size is now {}, limit is: {}", getTotalEntrySize(),
+          getLimit());
     }
     EvictionCounters stats = getEvictionList().getStatistics();
     try {
@@ -553,7 +550,7 @@ public class VMLRURegionMap extends AbstractRegionMap {
         } else {
           if (getTotalEntrySize() != 0) {
             if (isDebugEnabled_LRU) {
-              logger.trace(LogMarker.LRU, "leaving evict loop early");
+              logger.trace(LogMarker.LRU_VERBOSE, "leaving evict loop early");
             }
           }
           break;
@@ -562,11 +559,12 @@ public class VMLRURegionMap extends AbstractRegionMap {
     } catch (RegionClearedException e) {
       // Ignore
       if (isDebugEnabled_LRU) {
-        logger.trace(LogMarker.LRU, "exception ={}", e.getCause().getMessage(), e.getCause());
+        logger.trace(LogMarker.LRU_VERBOSE, "exception ={}", e.getCause().getMessage(),
+            e.getCause());
       }
     }
     if (isDebugEnabled_LRU) {
-      logger.trace(LogMarker.LRU, "callback complete");
+      logger.trace(LogMarker.LRU_VERBOSE, "callback complete");
     }
     // If in transaction context (either local or message)
     // reset the tx thread local
@@ -584,9 +582,9 @@ public class VMLRURegionMap extends AbstractRegionMap {
   public void updateEvictionCounter() {
     final int delta = getDelta();
     resetThreadLocals();
-    if (logger.isTraceEnabled(LogMarker.LRU)) {
-      logger.trace(LogMarker.LRU, "updateStats - delta is: {} total is: {} limit is: {}", delta,
-          getTotalEntrySize(), getLimit());
+    if (logger.isTraceEnabled(LogMarker.LRU_VERBOSE)) {
+      logger.trace(LogMarker.LRU_VERBOSE, "updateStats - delta is: {} total is: {} limit is: {}",
+          delta, getTotalEntrySize(), getLimit());
     }
 
     if (delta != 0) {
@@ -660,14 +658,12 @@ public class VMLRURegionMap extends AbstractRegionMap {
   @Override
   protected void lruEntryCreate(RegionEntry re) {
     EvictableEntry e = (EvictableEntry) re;
-    // Assert.assertFalse(e._getValue() instanceof DiskEntry.RecoveredEntry)
-    if (logger.isTraceEnabled(LogMarker.LRU)) {
-      logger.trace(LogMarker.LRU,
+    if (logger.isTraceEnabled(LogMarker.LRU_VERBOSE)) {
+      logger.trace(LogMarker.LRU_VERBOSE,
           "lruEntryCreate for key={}; list size is: {}; actual size is: {}; map size is: {}; entry size: {}; in lru clock: {}",
           re.getKey(), getTotalEntrySize(), this.getEvictionList().size(), size(), e.getEntrySize(),
           !e.isEvicted());
     }
-    // this.lruCreatedKey = re.getKey(); // [ bruce ] for DEBUGGING only
     e.unsetEvicted();
     EvictionList lruList = getEvictionList();
     DiskRegion disk = _getOwner().getDiskRegion();
@@ -723,20 +719,14 @@ public class VMLRURegionMap extends AbstractRegionMap {
   @Override
   public void lruEntryDestroy(RegionEntry regionEntry) {
     final EvictableEntry e = (EvictableEntry) regionEntry;
-    if (logger.isTraceEnabled(LogMarker.LRU)) {
-      logger.trace(LogMarker.LRU,
+    if (logger.isTraceEnabled(LogMarker.LRU_VERBOSE)) {
+      logger.trace(LogMarker.LRU_VERBOSE,
           "lruEntryDestroy for key={}; list size is: {}; actual size is: {}; map size is: {}; entry size: {}; in lru clock: {}",
           regionEntry.getKey(), getTotalEntrySize(), this.getEvictionList().size(), size(),
           e.getEntrySize(), !e.isEvicted());
     }
 
-    // if (this.lruCreatedKey == re.getKey()) {
-    // String method = Thread.currentThread().getStackTrace()[5].getMethodName();
-    // }
-    // boolean wasEvicted = e.testEvicted();
-    /* boolean removed = */
     getEvictionList().destroyEntry(e);
-    // if (removed || wasEvicted) { // evicted entries have already been removed from the list
     changeTotalEntrySize(-1 * e.getEntrySize());// subtract the size.
     Token vTok = regionEntry.getValueAsToken();
     if (vTok == Token.DESTROYED || vTok == Token.TOMBSTONE) {
@@ -745,10 +735,6 @@ public class VMLRURegionMap extends AbstractRegionMap {
       // staying in the map and may be resurrected
       e.updateEntrySize(getEvictionController());
     }
-    // } else if (debug) {
-    // debugLogging("entry not removed from LRU list");
-    // }
-
   }
 
   /**
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/AbstractRegionEntry.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/AbstractRegionEntry.java
index 761cb43..7f8b6d4 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/AbstractRegionEntry.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/AbstractRegionEntry.java
@@ -1734,7 +1734,7 @@ public abstract class AbstractRegionEntry implements HashRegionEntry<Object, Obj
 
         if (stampDsId != 0 && stampDsId != tagDsId && stampDsId != -1) {
           StringBuilder verbose = null;
-          if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
+          if (logger.isTraceEnabled(LogMarker.TOMBSTONE_VERBOSE)) {
             verbose = new StringBuilder();
             verbose.append("processing tag for key ").append(getKey()).append(", stamp=")
                 .append(stamp.asVersionTag()).append(", tag=").append(tag);
@@ -1745,7 +1745,7 @@ public abstract class AbstractRegionEntry implements HashRegionEntry<Object, Obj
               && tag.getDistributedSystemId() >= stamp.getDistributedSystemId()))) {
             if (verbose != null) {
               verbose.append(" - allowing event");
-              logger.trace(LogMarker.TOMBSTONE, verbose);
+              logger.trace(LogMarker.TOMBSTONE_VERBOSE, verbose);
             }
             // Update the stamp with event's version information.
             applyVersionTag(r, stamp, tag, originator);
@@ -1755,7 +1755,7 @@ public abstract class AbstractRegionEntry implements HashRegionEntry<Object, Obj
           if (stampTime > 0) {
             if (verbose != null) {
               verbose.append(" - disallowing event");
-              logger.trace(LogMarker.TOMBSTONE, verbose);
+              logger.trace(LogMarker.TOMBSTONE_VERBOSE, verbose);
             }
             r.getCachePerfStats().incConflatedEventsCount();
             persistConflictingTag(r, tag);
@@ -1810,7 +1810,7 @@ public abstract class AbstractRegionEntry implements HashRegionEntry<Object, Obj
       VersionStamp stamp = getVersionStamp();
 
       StringBuilder verbose = null;
-      if (logger.isTraceEnabled(LogMarker.TOMBSTONE)) {
+      if (logger.isTraceEnabled(LogMarker.TOMBSTONE_VERBOSE)) {
         VersionTag stampTag = stamp.asVersionTag();
         if (stampTag.hasValidVersion() && checkForConflict) {
           // only be verbose here if there's a possibility we might reject the operation
@@ -1850,7 +1850,7 @@ public abstract class AbstractRegionEntry implements HashRegionEntry<Object, Obj
         throw e;
       } finally {
         if (verbose != null) {
-          logger.trace(LogMarker.TOMBSTONE, verbose);
+          logger.trace(LogMarker.TOMBSTONE_VERBOSE, verbose);
         }
       }
 
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/event/DistributedEventTracker.java b/geode-core/src/main/java/org/apache/geode/internal/cache/event/DistributedEventTracker.java
index fc11a04..68afe19 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/event/DistributedEventTracker.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/event/DistributedEventTracker.java
@@ -351,8 +351,8 @@ public class DistributedEventTracker implements EventTracker {
       }
       // log at fine because partitioned regions can send event multiple times
       // during normal operation during bucket region initialization
-      if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_BRIDGE_SERVER)) {
-        logger.trace(LogMarker.DISTRIBUTION_BRIDGE_SERVER,
+      if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_BRIDGE_SERVER_VERBOSE)) {
+        logger.trace(LogMarker.DISTRIBUTION_BRIDGE_SERVER_VERBOSE,
             "Cache encountered replay of event with ID {}.  Highest recorded for this source is {}",
             eventID, evh.getLastSequenceNumber());
       }
@@ -391,9 +391,9 @@ public class DistributedEventTracker implements EventTracker {
       }
       // log at fine because partitioned regions can send event multiple times
       // during normal operation during bucket region initialization
-      if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_BRIDGE_SERVER)
+      if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_BRIDGE_SERVER_VERBOSE)
           && evh.getVersionTag() == null) {
-        logger.trace(LogMarker.DISTRIBUTION_BRIDGE_SERVER,
+        logger.trace(LogMarker.DISTRIBUTION_BRIDGE_SERVER_VERBOSE,
             "Could not recover version tag.  Found event holder with no version tag for {}",
             eventID);
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/AbstractEvictionList.java b/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/AbstractEvictionList.java
index 7cee271..67811a8 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/AbstractEvictionList.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/AbstractEvictionList.java
@@ -102,8 +102,8 @@ abstract class AbstractEvictionList implements EvictionList {
       return;
     }
 
-    if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
-      logger.trace(LogMarker.LRU_CLOCK, LocalizedMessage
+    if (logger.isTraceEnabled(LogMarker.LRU_CLOCK_VERBOSE)) {
+      logger.trace(LogMarker.LRU_CLOCK_VERBOSE, LocalizedMessage
           .create(LocalizedStrings.NewLRUClockHand_ADDING_ANODE_TO_LRU_LIST, evictionNode));
     }
 
@@ -117,8 +117,8 @@ abstract class AbstractEvictionList implements EvictionList {
 
   @Override
   public synchronized void destroyEntry(EvictionNode evictionNode) {
-    if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
-      logger.trace(LogMarker.LRU_CLOCK, LocalizedMessage
+    if (logger.isTraceEnabled(LogMarker.LRU_CLOCK_VERBOSE)) {
+      logger.trace(LogMarker.LRU_CLOCK_VERBOSE, LocalizedMessage
           .create(LocalizedStrings.NewLRUClockHand_UNLINKENTRY_CALLED, evictionNode));
     }
 
@@ -173,8 +173,8 @@ abstract class AbstractEvictionList implements EvictionList {
 
   protected boolean isEvictable(EvictionNode evictionNode) {
     if (evictionNode.isEvicted()) {
-      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
-        logger.trace(LogMarker.LRU_CLOCK,
+      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK_VERBOSE)) {
+        logger.trace(LogMarker.LRU_CLOCK_VERBOSE,
             LocalizedMessage.create(LocalizedStrings.NewLRUClockHand_DISCARDING_EVICTED_ENTRY));
       }
       return false;
@@ -184,8 +184,8 @@ abstract class AbstractEvictionList implements EvictionList {
     // eviction should not cause commit conflicts
     synchronized (evictionNode) {
       if (evictionNode.isInUseByTransaction()) {
-        if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
-          logger.trace(LogMarker.LRU_CLOCK, LocalizedMessage.create(
+        if (logger.isTraceEnabled(LogMarker.LRU_CLOCK_VERBOSE)) {
+          logger.trace(LogMarker.LRU_CLOCK_VERBOSE, LocalizedMessage.create(
               LocalizedStrings.NewLRUClockHand_REMOVING_TRANSACTIONAL_ENTRY_FROM_CONSIDERATION));
         }
         return false;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/CountLRUEviction.java b/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/CountLRUEviction.java
index bac5a77..0dfbc08 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/CountLRUEviction.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/CountLRUEviction.java
@@ -87,8 +87,7 @@ public class CountLRUEviction extends AbstractEvictionController {
   @Override
   public int entrySize(Object key, Object value) {
 
-    if (Token.isRemoved(value) /* && (value != Token.TOMBSTONE) */) { // un-comment to make
-                                                                      // tombstones visible
+    if (Token.isRemoved(value)) {
       // bug #42228 - lruEntryDestroy removes an entry from the LRU, but if
       // it is subsequently resurrected we want the new entry to generate a delta
       return 0;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/LRUListWithAsyncSorting.java b/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/LRUListWithAsyncSorting.java
index bc15355..d65d378 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/LRUListWithAsyncSorting.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/LRUListWithAsyncSorting.java
@@ -131,8 +131,8 @@ public class LRUListWithAsyncSorting extends AbstractEvictionList {
         return null;
       }
 
-      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
-        logger.trace(LogMarker.LRU_CLOCK, "lru considering {}", evictionNode);
+      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK_VERBOSE)) {
+        logger.trace(LogMarker.LRU_CLOCK_VERBOSE, "lru considering {}", evictionNode);
       }
 
       if (!isEvictable(evictionNode)) {
@@ -146,8 +146,8 @@ public class LRUListWithAsyncSorting extends AbstractEvictionList {
         continue;
       }
 
-      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
-        logger.trace(LogMarker.LRU_CLOCK, LocalizedMessage
+      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK_VERBOSE)) {
+        logger.trace(LogMarker.LRU_CLOCK_VERBOSE, LocalizedMessage
             .create(LocalizedStrings.NewLRUClockHand_RETURNING_UNUSED_ENTRY, evictionNode));
       }
       if (evictionNode.isRecentlyUsed()) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/LRUListWithSyncSorting.java b/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/LRUListWithSyncSorting.java
index b410deb..f677dda 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/LRUListWithSyncSorting.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/eviction/LRUListWithSyncSorting.java
@@ -19,7 +19,6 @@ import java.util.Optional;
 
 import org.apache.logging.log4j.Logger;
 
-import org.apache.geode.internal.cache.BucketRegion;
 import org.apache.geode.internal.i18n.LocalizedStrings;
 import org.apache.geode.internal.lang.SystemPropertyHelper;
 import org.apache.geode.internal.logging.LogService;
@@ -58,8 +57,8 @@ public class LRUListWithSyncSorting extends AbstractEvictionList {
     for (;;) {
       EvictionNode aNode = this.unlinkHeadEntry();
 
-      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
-        logger.trace(LogMarker.LRU_CLOCK, "lru considering {}", aNode);
+      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK_VERBOSE)) {
+        logger.trace(LogMarker.LRU_CLOCK_VERBOSE, "lru considering {}", aNode);
       }
 
       if (aNode == null) { // hit the end of the list
@@ -77,23 +76,23 @@ public class LRUListWithSyncSorting extends AbstractEvictionList {
       // use various criteria to determine if it's good enough
       // to return, or if we need to add it back to the list.
       if (maxEntries > 0 && numEvals > maxEntries) {
-        if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
-          logger.trace(LogMarker.LRU_CLOCK, LocalizedMessage
+        if (logger.isTraceEnabled(LogMarker.LRU_CLOCK_VERBOSE)) {
+          logger.trace(LogMarker.LRU_CLOCK_VERBOSE, LocalizedMessage
               .create(LocalizedStrings.NewLRUClockHand_GREEDILY_PICKING_AN_AVAILABLE_ENTRY));
         }
         getStatistics().incGreedyReturns(1);
         // fall through, return this node
       } else if (aNode.isRecentlyUsed()) {
-        if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
-          logger.trace(LogMarker.LRU_CLOCK, LocalizedMessage
+        if (logger.isTraceEnabled(LogMarker.LRU_CLOCK_VERBOSE)) {
+          logger.trace(LogMarker.LRU_CLOCK_VERBOSE, LocalizedMessage
               .create(LocalizedStrings.NewLRUClockHand_SKIPPING_RECENTLY_USED_ENTRY, aNode));
         }
         aNode.unsetRecentlyUsed();
         appendEntry(aNode);
         continue; // keep looking
       } else {
-        if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
-          logger.trace(LogMarker.LRU_CLOCK, LocalizedMessage
+        if (logger.isTraceEnabled(LogMarker.LRU_CLOCK_VERBOSE)) {
+          logger.trace(LogMarker.LRU_CLOCK_VERBOSE, LocalizedMessage
               .create(LocalizedStrings.NewLRUClockHand_RETURNING_UNUSED_ENTRY, aNode));
         }
         // fall through, return this node
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
index 1c84967..688d681 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java
@@ -410,9 +410,7 @@ public class HARegionQueue implements RegionQueue {
     Exception problem = null;
     try {
       createHARegion(regionName, cache);
-    } catch (IOException e) {
-      problem = e;
-    } catch (ClassNotFoundException e) {
+    } catch (IOException | ClassNotFoundException e) {
       problem = e;
     }
     if (problem != null) {
@@ -434,7 +432,7 @@ public class HARegionQueue implements RegionQueue {
   @SuppressWarnings("synthetic-access")
   public void recordEventState(InternalDistributedMember sender, Map eventState) {
     StringBuffer sb = null;
-    final boolean isDebugEnabled_BS = logger.isTraceEnabled(LogMarker.BRIDGE_SERVER);
+    final boolean isDebugEnabled_BS = logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE);
     if (isDebugEnabled_BS) {
       sb = new StringBuffer(500);
       sb.append("Recording initial event state for ").append(this.regionName).append(" from ")
@@ -463,7 +461,7 @@ public class HARegionQueue implements RegionQueue {
       }
     }
     if (isDebugEnabled_BS) {
-      logger.trace(LogMarker.BRIDGE_SERVER, sb.toString());
+      logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE, sb.toString());
     }
   }
 
@@ -514,9 +512,6 @@ public class HARegionQueue implements RegionQueue {
               // TODO: remove this assertion
               Assert.assertTrue(counterInRegion > max);
               max = counterInRegion;
-              // putInQueue(val);
-              // logger.info(LocalizedStrings.DEBUG, this + " putting GII entry #" + counterInRegion
-              // + " into queue: " + val);
               this.put(val);
             } else if (isDebugEnabled) {
               logger.debug(
@@ -680,8 +675,9 @@ public class HARegionQueue implements RegionQueue {
             this.put(object);
           }
         } else {
-          if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
-            logger.trace(LogMarker.BRIDGE_SERVER, "{}: Adding message to queue: {}", this, object);
+          if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE)) {
+            logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE, "{}: Adding message to queue: {}", this,
+                object);
           }
         }
 
@@ -708,8 +704,9 @@ public class HARegionQueue implements RegionQueue {
       if (!dace.putObject(event, sequenceID)) {
         this.put(object);
       } else {
-        if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER)) {
-          logger.trace(LogMarker.BRIDGE_SERVER, "{}: Adding message to queue: {}", this, object);
+        if (logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE)) {
+          logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE, "{}: Adding message to queue: {}", this,
+              object);
         }
       }
     }
@@ -727,9 +724,6 @@ public class HARegionQueue implements RegionQueue {
       logger.debug("{}: startGiiQueueing count is now {}", this.region.getName(), this.giiCount);
     }
     this.giiLock.writeLock().unlock();
-    // slow GII serving for debugging #43609
-    // try {Thread.sleep(5000);} catch (InterruptedException e) {
-    // Thread.currentThread().interrupt(); }
   }
 
   /**
@@ -2900,10 +2894,10 @@ public class HARegionQueue implements RegionQueue {
     protected boolean putObject(Conflatable event, long sequenceID)
         throws CacheException, InterruptedException {
       Long oldPosition = null;
-      final boolean isDebugEnabled_BS = logger.isTraceEnabled(LogMarker.BRIDGE_SERVER);
+      final boolean isDebugEnabled_BS = logger.isTraceEnabled(LogMarker.BRIDGE_SERVER_VERBOSE);
       if (isDebugEnabled_BS && this.lastSequenceIDPut >= sequenceID
           && !owningQueue.puttingGIIDataInQueue) {
-        logger.trace(LogMarker.BRIDGE_SERVER,
+        logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE,
             "HARegionQueue::DACE:putObject: Given sequence ID is already present ({}).\nThis may be a recovered operation via P2P or a GetInitialImage.\nlastSequenceIDPut = {} ; event = {};\n",
             sequenceID, lastSequenceIDPut, event);
       }
@@ -2917,7 +2911,7 @@ public class HARegionQueue implements RegionQueue {
           this.lastSequenceIDPut = sequenceID;
         } else if (!owningQueue.puttingGIIDataInQueue) {
           if (isDebugEnabled_BS) {
-            logger.trace(LogMarker.BRIDGE_SERVER,
+            logger.trace(LogMarker.BRIDGE_SERVER_VERBOSE,
                 "{} eliding event with ID {}, because it is not greater than the last sequence ID ({}). The rejected event has key <{}> and value <{}>",
                 this, event.getEventId(), this.lastSequenceIDPut, event.getKeyToConflate(),
                 event.getValueToConflate());
@@ -3346,9 +3340,7 @@ public class HARegionQueue implements RegionQueue {
 
       try {
         this.region.destroyRegion();
-      } catch (RegionDestroyedException ignore) {
-        // keep going
-      } catch (CancelException ignore) {
+      } catch (RegionDestroyedException | CancelException ignore) {
         // keep going
       }
       ((HAContainerWrapper) haContainer).removeProxy(regionName);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/map/RegionMapDestroy.java b/geode-core/src/main/java/org/apache/geode/internal/cache/map/RegionMapDestroy.java
index 0a2c1d2..085c4b4 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/map/RegionMapDestroy.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/map/RegionMapDestroy.java
@@ -121,9 +121,9 @@ public class RegionMapDestroy {
         invokeTestHookForConcurrentOperation();
 
         try {
-          if (logger.isTraceEnabled(LogMarker.LRU_TOMBSTONE_COUNT)
+          if (logger.isTraceEnabled(LogMarker.LRU_TOMBSTONE_COUNT_VERBOSE)
               && !(internalRegion instanceof HARegion)) {
-            logger.trace(LogMarker.LRU_TOMBSTONE_COUNT,
+            logger.trace(LogMarker.LRU_TOMBSTONE_COUNT_VERBOSE,
                 "ARM.destroy() inTokenMode={}; duringRI={}; riLocalDestroy={}; withRepl={}; fromServer={}; concurrencyEnabled={}; isOriginRemote={}; isEviction={}; operation={}; re={}",
                 inTokenMode, duringRI, event.isFromRILocalDestroy(),
                 internalRegion.getDataPolicy().withReplication(), event.isFromServer(),
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BecomePrimaryBucketMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BecomePrimaryBucketMessage.java
index 5a4a7cf..a418446 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BecomePrimaryBucketMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BecomePrimaryBucketMessage.java
@@ -199,22 +199,22 @@ public class BecomePrimaryBucketMessage extends PartitionMessage {
     @Override
     public void process(final DistributionManager dm, final ReplyProcessor21 processor) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "BecomePrimaryBucketReplyMessage process invoking reply processor with processorId:{}",
             this.processorId);
       }
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "BecomePrimaryBucketReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "BecomePrimaryBucketReplyMessage processor not found");
         }
         return;
       }
       processor.process(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "processed {}", this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "processed {}", this);
       }
       dm.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
     }
@@ -265,11 +265,11 @@ public class BecomePrimaryBucketMessage extends PartitionMessage {
           BecomePrimaryBucketReplyMessage reply = (BecomePrimaryBucketReplyMessage) msg;
           this.success = reply.isSuccess();
           if (reply.isSuccess()) {
-            if (logger.isTraceEnabled(LogMarker.DM)) {
-              logger.trace(LogMarker.DM, "BecomePrimaryBucketResponse return OK");
+            if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+              logger.trace(LogMarker.DM_VERBOSE, "BecomePrimaryBucketResponse return OK");
             }
-          } else if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "BecomePrimaryBucketResponse return NOT_PRIMARY");
+          } else if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "BecomePrimaryBucketResponse return NOT_PRIMARY");
           }
         }
       } finally {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketBackupMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketBackupMessage.java
index c2b5db1..0c16647 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketBackupMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketBackupMessage.java
@@ -89,8 +89,9 @@ public class BucketBackupMessage extends PartitionMessage {
       return false;
     }
 
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.trace(LogMarker.DM, "BucketBackupMessage operateOnRegion: {}", pr.getFullPath());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "BucketBackupMessage operateOnRegion: {}",
+          pr.getFullPath());
     }
     PartitionedRegionDataStore ds = pr.getDataStore();
     if (ds != null) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java
index 48b9cb1..a0da1e6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java
@@ -170,8 +170,8 @@ public class BucketSizeMessage extends PartitionMessage {
     @Override
     protected void process(final ClusterDistributionManager dm) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "PRDistributedBucketSizeReplyMessage process invoking reply processor with processorId: {}",
             this.processorId);
       }
@@ -179,15 +179,15 @@ public class BucketSizeMessage extends PartitionMessage {
       ReplyProcessor21 processor = ReplyProcessor21.getProcessor(this.processorId);
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
           logger.debug("PRDistributedBucketSizeReplyMessage processor not found");
         }
         return;
       }
       processor.process(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} Processed {}", processor, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} Processed {}", processor, this);
       }
       dm.getStats().incReplyMessageTime(DistributionStats.getStatTime() - startTime);
     }
@@ -242,8 +242,9 @@ public class BucketSizeMessage extends PartitionMessage {
         if (msg instanceof BucketSizeReplyMessage) {
           BucketSizeReplyMessage reply = (BucketSizeReplyMessage) msg;
           this.returnValue = reply.getSize();
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "BucketSizeResponse return value is {}", this.returnValue);
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "BucketSizeResponse return value is {}",
+                this.returnValue);
           }
         }
       } finally {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java
index 3e4506c..30a668f 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java
@@ -206,15 +206,15 @@ public class ContainsKeyValueMessage extends PartitionMessageWithDirectReply {
       final long startTime = getTimestamp();
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "ContainsKeyValueReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "ContainsKeyValueReplyMessage processor not found");
         }
         return;
       }
       processor.process(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} Processed {}", processor, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} Processed {}", processor, this);
       }
       dm.getStats().incReplyMessageTime(DistributionStats.getStatTime() - startTime);
     }
@@ -272,8 +272,8 @@ public class ContainsKeyValueMessage extends PartitionMessageWithDirectReply {
           ContainsKeyValueReplyMessage reply = (ContainsKeyValueReplyMessage) msg;
           this.returnValue = reply.doesItContainKeyValue();
           this.returnValueReceived = true;
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "ContainsKeyValueResponse return value is {}",
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "ContainsKeyValueResponse return value is {}",
                 this.returnValue);
           }
         }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/CreateBucketMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/CreateBucketMessage.java
index e134f94..f35a9f6 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/CreateBucketMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/CreateBucketMessage.java
@@ -122,8 +122,9 @@ public class CreateBucketMessage extends PartitionMessage {
   @Override
   protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, PartitionedRegion r,
       long startTime) {
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.trace(LogMarker.DM, "CreateBucketMessage operateOnRegion: {}", r.getFullPath());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "CreateBucketMessage operateOnRegion: {}",
+          r.getFullPath());
     }
 
     // This is to ensure that initialization is complete before bucket creation request is
@@ -235,22 +236,22 @@ public class CreateBucketMessage extends PartitionMessage {
     @Override
     public void process(final DistributionManager dm, final ReplyProcessor21 processor) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "CreateBucketReplyMessage process invoking reply processor with processorId:"
                 + this.processorId);
       }
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "CreateBucketReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "CreateBucketReplyMessage processor not found");
         }
         return;
       }
       processor.process(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} processed {}", processor, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", processor, this);
       }
       dm.getStats().incReplyMessageTime(DistributionStats.getStatTime() - startTime);
     }
@@ -308,8 +309,8 @@ public class CreateBucketMessage extends PartitionMessage {
         if (msg instanceof CreateBucketReplyMessage) {
           CreateBucketReplyMessage reply = (CreateBucketReplyMessage) msg;
           this.msg = reply;
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.debug("NodeResponse return value is ");
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "NodeResponse return value is ");
           }
         } else {
           Assert.assertTrue(msg instanceof ReplyMessage);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DeposePrimaryBucketMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DeposePrimaryBucketMessage.java
index 249e41c..67d2515 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DeposePrimaryBucketMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DeposePrimaryBucketMessage.java
@@ -168,22 +168,22 @@ public class DeposePrimaryBucketMessage extends PartitionMessage {
     @Override
     public void process(final DistributionManager dm, final ReplyProcessor21 processor) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "DeposePrimaryBucketReplyMessage process invoking reply processor with processorId: {}",
             this.processorId);
       }
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "DeposePrimaryBucketReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "DeposePrimaryBucketReplyMessage processor not found");
         }
         return;
       }
       processor.process(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} processed {}", processor, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", processor, this);
       }
       dm.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
     }
@@ -228,11 +228,11 @@ public class DeposePrimaryBucketMessage extends PartitionMessage {
         if (msg instanceof DeposePrimaryBucketReplyMessage) {
           DeposePrimaryBucketReplyMessage reply = (DeposePrimaryBucketReplyMessage) msg;
           if (reply.isSuccess()) {
-            if (logger.isTraceEnabled(LogMarker.DM)) {
-              logger.trace(LogMarker.DM, "DeposePrimaryBucketResponse return OK");
+            if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+              logger.trace(LogMarker.DM_VERBOSE, "DeposePrimaryBucketResponse return OK");
             }
-          } else if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "DeposePrimaryBucketResponse return NOT_PRIMARY");
+          } else if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "DeposePrimaryBucketResponse return NOT_PRIMARY");
           }
         }
       } finally {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java
index 4aeaedd..0aca9aa 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java
@@ -264,26 +264,17 @@ public class DestroyMessage extends PartitionMessageWithDirectReply {
         try {
           Integer bucket = Integer
               .valueOf(PartitionedRegionHelper.getHashKey(r, null, this.key, null, this.cbArg));
-          // try {
-          // // the event must show its true origin for cachewriter invocation
-          // event.setOriginRemote(true);
-          // event.setPartitionMessage(this);
-          // r.doCacheWriteBeforeDestroy(event);
-          // }
-          // finally {
-          // event.setOriginRemote(false);
-          // }
           event.setCausedByMessage(this);
           r.getDataView().destroyOnRemote(event, true/* cacheWrite */, this.expectedOldValue);
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "{} updated bucket: {} with key: {}", getClass().getName(),
-                bucket, this.key);
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "{} updated bucket: {} with key: {}",
+                getClass().getName(), bucket, this.key);
           }
         } catch (CacheWriterException cwe) {
           sendReply(getSender(), this.processorId, dm, new ReplyException(cwe), r, startTime);
           return false;
         } catch (EntryNotFoundException eee) {
-          logger.trace(LogMarker.DM, "{}: operateOnRegion caught EntryNotFoundException",
+          logger.trace(LogMarker.DM_VERBOSE, "{}: operateOnRegion caught EntryNotFoundException",
               getClass().getName());
           ReplyMessage.send(getSender(), getProcessorId(), new ReplyException(eee),
               getReplySender(dm), r.isInternalRegion());
@@ -499,16 +490,16 @@ public class DestroyMessage extends PartitionMessageWithDirectReply {
     @Override
     public void process(final DistributionManager dm, final ReplyProcessor21 rp) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "DestroyReplyMessage process invoking reply processor with processorId: {}",
             this.processorId);
       }
       // dm.getLogger().warning("RemotePutResponse processor is " +
       // ReplyProcessor21.getProcessor(this.processorId));
       if (rp == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "DestroyReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "DestroyReplyMessage processor not found");
         }
         return;
       }
@@ -524,8 +515,8 @@ public class DestroyMessage extends PartitionMessageWithDirectReply {
       }
       rp.process(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.debug("{} processed {} ", rp, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} processed {} ", rp, this);
       }
       dm.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
     }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyRegionOnDataStoreMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyRegionOnDataStoreMessage.java
index ca88b54..2348060 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyRegionOnDataStoreMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyRegionOnDataStoreMessage.java
@@ -78,8 +78,9 @@ public class DestroyRegionOnDataStoreMessage extends PartitionMessage {
 
 
     org.apache.logging.log4j.Logger logger = pr.getLogger();
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.trace("DestroyRegionOnDataStore operateOnRegion: " + pr.getFullPath());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE,
+          "DestroyRegionOnDataStore operateOnRegion: " + pr.getFullPath());
     }
     pr.destroyRegion(callbackArg);
     return true;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpAllPRConfigMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpAllPRConfigMessage.java
index 91b1936..9abbeef 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpAllPRConfigMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpAllPRConfigMessage.java
@@ -46,23 +46,21 @@ public class DumpAllPRConfigMessage extends PartitionMessage {
     DumpAllPRConfigMessage m = new DumpAllPRConfigMessage(recipients, r.getPRId(), p);
     m.setTransactionDistributed(r.getCache().getTxManager().isDistributed());
 
-    /* Set failures = */r.getDistributionManager().putOutgoing(m);
-    // if (failures != null && failures.size() > 0) {
-    // throw new PartitionedRegionCommunicationException("Failed sending ", m);
-    // }
+    r.getDistributionManager().putOutgoing(m);
     return p;
   }
 
   @Override
   protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, PartitionedRegion pr,
       long startTime) throws CacheException {
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.trace(LogMarker.DM, "DumpAllPRConfigMessage operateOnRegion: {}", pr.getFullPath());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "DumpAllPRConfigMessage operateOnRegion: {}",
+          pr.getFullPath());
     }
     pr.dumpSelfEntryFromAllPartitionedRegions();
 
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.debug("{} dumped allPartitionedRegions", getClass().getName());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "{} dumped allPartitionedRegions", getClass().getName());
     }
     return true;
   }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpB2NRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpB2NRegion.java
index bd8d11b..0a90eb7 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpB2NRegion.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpB2NRegion.java
@@ -190,22 +190,22 @@ public class DumpB2NRegion extends PartitionMessage {
     @Override
     public void process(final DistributionManager dm, final ReplyProcessor21 processor) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "DumpB2NReplyMessage process invoking reply processor with processorId: {}",
             this.processorId);
       }
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "DumpB2NReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "DumpB2NReplyMessage processor not found");
         }
         return;
       }
       processor.process(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} processed {}", processor, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", processor, this);
       }
       dm.getStats().incReplyMessageTime(DistributionStats.getStatTime() - startTime);
     }
@@ -266,8 +266,8 @@ public class DumpB2NRegion extends PartitionMessage {
             this.primaryInfos.add(newBucketHost);
           }
         }
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "DumpB2NResponse got a primaryInfo {} from {}",
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "DumpB2NResponse got a primaryInfo {} from {}",
               reply.getPrimaryInfo(), reply.getSender());
         }
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpBucketsMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpBucketsMessage.java
index 05ae825..ee53507 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpBucketsMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpBucketsMessage.java
@@ -58,10 +58,7 @@ public class DumpBucketsMessage extends PartitionMessage {
         new DumpBucketsMessage(recipients, r.getPRId(), p, validateOnly, onlyBuckets);
     m.setTransactionDistributed(r.getCache().getTxManager().isDistributed());
 
-    /* Set failures = */ r.getDistributionManager().putOutgoing(m);
-    // if (failures != null && failures.size() > 0) {
-    // throw new PartitionedRegionCommunicationException("Failed sending ", m);
-    // }
+    r.getDistributionManager().putOutgoing(m);
     return p;
   }
 
@@ -69,8 +66,9 @@ public class DumpBucketsMessage extends PartitionMessage {
   protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, PartitionedRegion pr,
       long startTime) throws CacheException {
 
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.trace(LogMarker.DM, "DumpBucketsMessage operateOnRegion: {}", pr.getFullPath());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "DumpBucketsMessage operateOnRegion: {}",
+          pr.getFullPath());
     }
 
     PartitionedRegionDataStore ds = pr.getDataStore();
@@ -80,8 +78,8 @@ public class DumpBucketsMessage extends PartitionMessage {
       } else {
         ds.dumpEntries(this.validateOnly);
       }
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} dumped buckets", getClass().getName());
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} dumped buckets", getClass().getName());
       }
     }
     return true;
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchBulkEntriesMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchBulkEntriesMessage.java
index 25f7fa9..f6bdffd 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchBulkEntriesMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchBulkEntriesMessage.java
@@ -131,8 +131,9 @@ public class FetchBulkEntriesMessage extends PartitionMessage {
   @Override
   protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, PartitionedRegion pr,
       long startTime) throws CacheException, ForceReattemptException {
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.debug("FetchBulkEntriesMessage operateOnRegion: {}", pr.getFullPath());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "FetchBulkEntriesMessage operateOnRegion: {}",
+          pr.getFullPath());
     }
 
     FetchBulkEntriesReplyMessage.sendReply(pr, getSender(), getProcessorId(), dm, this.bucketKeys,
@@ -414,15 +415,15 @@ public class FetchBulkEntriesMessage extends PartitionMessage {
       FetchBulkEntriesResponse processor = (FetchBulkEntriesResponse) p;
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "FetchBulkEntriesReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "FetchBulkEntriesReplyMessage processor not found");
         }
         return;
       }
       processor.processChunkResponse(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} processed {}", processor, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", processor, this);
       }
 
       dm.getStats().incReplyMessageTime(DistributionStats.getStatTime() - startTime);
@@ -525,7 +526,7 @@ public class FetchBulkEntriesMessage extends PartitionMessage {
           Object key;
           int currentId;
 
-          final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.DM);
+          final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.DM_VERBOSE);
           while (in.available() > 0) {
             currentId = DataSerializer.readPrimitiveInt(in);
             if (currentId == -1) {
@@ -592,8 +593,9 @@ public class FetchBulkEntriesMessage extends PartitionMessage {
             }
 
             if (isDebugEnabled) {
-              logger.trace(LogMarker.DM, "{} chunksProcessed={}, lastChunkReceived={},done={}",
-                  this, this.chunksProcessed, this.lastChunkReceived, doneProcessing);
+              logger.trace(LogMarker.DM_VERBOSE,
+                  "{} chunksProcessed={}, lastChunkReceived={},done={}", this, this.chunksProcessed,
+                  this.lastChunkReceived, doneProcessing);
             }
           }
         } catch (Exception e) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessage.java
index a31bcf8..d54186c 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessage.java
@@ -105,16 +105,17 @@ public class FetchEntriesMessage extends PartitionMessage {
   @Override
   protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, PartitionedRegion pr,
       long startTime) throws CacheException, ForceReattemptException {
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.trace(LogMarker.DM, "FetchEntriesMessage operateOnRegion: {}", pr.getFullPath());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "FetchEntriesMessage operateOnRegion: {}",
+          pr.getFullPath());
     }
 
     PartitionedRegionDataStore ds = pr.getDataStore();
     BucketRegion entries = null;
     if (ds != null) {
       entries = ds.handleRemoteGetEntries(this.bucketId);
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "FetchKeysMessage send keys back using processorId: {}",
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "FetchKeysMessage send keys back using processorId: {}",
             getProcessorId());
       }
     } else {
@@ -357,16 +358,16 @@ public class FetchEntriesMessage extends PartitionMessage {
       FetchEntriesResponse processor = (FetchEntriesResponse) p;
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "FetchEntriesReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "FetchEntriesReplyMessage processor not found");
         }
         return;
       }
 
       processor.processChunk(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} processed {}", processor, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", processor, this);
       }
 
       dm.getStats().incReplyMessageTime(DistributionStats.getStatTime() - startTime);
@@ -497,7 +498,7 @@ public class FetchEntriesMessage extends PartitionMessage {
       // of this message, we'll need to handle failover in this processor class and track results
       // differently.
 
-      final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.DM);
+      final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.DM_VERBOSE);
 
       boolean doneProcessing = false;
 
@@ -550,7 +551,7 @@ public class FetchEntriesMessage extends PartitionMessage {
               doneProcessing = true;
             }
             if (isDebugEnabled) {
-              logger.trace(LogMarker.DM,
+              logger.trace(LogMarker.DM_VERBOSE,
                   "{} chunksProcessed={},lastChunkReceived={},chunksExpected={},done={}", this,
                   chunksProcessed, lastChunkReceived, chunksExpected, doneProcessing);
             }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntryMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntryMessage.java
index 68bdf4d..3bae679 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntryMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntryMessage.java
@@ -248,22 +248,22 @@ public class FetchEntryMessage extends PartitionMessage {
     @Override
     public void process(final DistributionManager dm, final ReplyProcessor21 processor) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "FetchEntryReplyMessage process invoking reply processor with processorId: {}",
             this.processorId);
       }
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "FetchEntryReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "FetchEntryReplyMessage processor not found");
         }
         return;
       }
       processor.process(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.debug("{} processed {}", processor, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", processor, this);
       }
       dm.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
     }
@@ -337,8 +337,9 @@ public class FetchEntryMessage extends PartitionMessage {
         if (msg instanceof FetchEntryReplyMessage) {
           FetchEntryReplyMessage reply = (FetchEntryReplyMessage) msg;
           this.returnValue = reply.getValue();
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "FetchEntryResponse return value is {}", this.returnValue);
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "FetchEntryResponse return value is {}",
+                this.returnValue);
           }
         }
       } finally {
@@ -358,9 +359,7 @@ public class FetchEntryMessage extends PartitionMessage {
         final String msg = "FetchEntryResponse got remote ForceReattemptException; rethrowing";
         logger.debug(msg, e);
         throw e;
-      } catch (EntryNotFoundException e) {
-        throw e;
-      } catch (TransactionException e) {
+      } catch (EntryNotFoundException | TransactionException e) {
         throw e;
       } catch (CacheException ce) {
         logger.debug("FetchEntryResponse got remote CacheException; forcing reattempt.", ce);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchKeysMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchKeysMessage.java
index 9d19864..98f7df3 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchKeysMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchKeysMessage.java
@@ -159,8 +159,9 @@ public class FetchKeysMessage extends PartitionMessage {
       try {
         Set keys =
             ds.handleRemoteGetKeys(this.bucketId, interestType, interestArg, allowTombstones);
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.debug("FetchKeysMessage sending {} keys back using processorId: : {}", keys.size(),
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE,
+              "FetchKeysMessage sending {} keys back using processorId: : {}", keys.size(),
               getProcessorId(), keys);
         }
         r.getPrStats().endPartitionMessagesProcessing(startTime);
@@ -385,16 +386,16 @@ public class FetchKeysMessage extends PartitionMessage {
       FetchKeysResponse processor = (FetchKeysResponse) p;
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "FetchKeysReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "FetchKeysReplyMessage processor not found");
         }
         return;
       }
 
       processor.processChunk(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} processed {}", processor, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", processor, this);
       }
 
       dm.getStats().incReplyMessageTime(DistributionStats.getStatTime() - startTime);
@@ -514,9 +515,10 @@ public class FetchKeysMessage extends PartitionMessage {
             if (lastChunkReceived && (chunksExpected == chunksProcessed)) {
               doneProcessing = true;
             }
-            if (logger.isTraceEnabled(LogMarker.DM)) {
-              logger.debug("{} chunksProcessed={},lastChunkReceived={},chunksExpected={},done={}",
-                  this, chunksProcessed, lastChunkReceived, chunksExpected, doneProcessing);
+            if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+              logger.trace(LogMarker.DM_VERBOSE,
+                  "{} chunksProcessed={},lastChunkReceived={},chunksExpected={},done={}", this,
+                  chunksProcessed, lastChunkReceived, chunksExpected, doneProcessing);
             }
           }
         } catch (Exception e) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchPartitionDetailsMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchPartitionDetailsMessage.java
index cbc6218..fd62718 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchPartitionDetailsMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchPartitionDetailsMessage.java
@@ -203,22 +203,23 @@ public class FetchPartitionDetailsMessage extends PartitionMessage {
     @Override
     public void process(final DistributionManager dm, final ReplyProcessor21 processor) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "FetchPartitionDetailsReplyMessage process invoking reply processor with processorId: {}",
             this.processorId);
       }
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "FetchPartitionDetailsReplyMessage processor not found");
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE,
+              "FetchPartitionDetailsReplyMessage processor not found");
         }
         return;
       }
       processor.process(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} processed {}", processor, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} processed {}", processor, this);
       }
       dm.getStats().incReplyMessageTime(NanoTimer.getTime() - startTime);
     }
@@ -328,11 +329,13 @@ public class FetchPartitionDetailsMessage extends PartitionMessage {
               // This just picks the offline details from the last member to return
               this.offlineDetails = reply.offlineDetails;
             }
-            if (logger.isTraceEnabled(LogMarker.DM)) {
-              logger.debug("FetchPartitionDetailsResponse return details is {}", details);
+            if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+              logger.trace(LogMarker.DM_VERBOSE,
+                  "FetchPartitionDetailsResponse return details is {}", details);
             }
-          } else if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.debug("FetchPartitionDetailsResponse ignoring null details");
+          } else if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE,
+                "FetchPartitionDetailsResponse ignoring null details");
           }
         }
       } finally {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/GetMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/GetMessage.java
index 65d56b8..3a740f0 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/GetMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/GetMessage.java
@@ -156,8 +156,8 @@ public class GetMessage extends PartitionMessageWithDirectReply {
   @Override
   protected boolean operateOnPartitionedRegion(final ClusterDistributionManager dm,
       PartitionedRegion r, long startTime) throws ForceReattemptException {
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.trace(LogMarker.DM, "GetMessage operateOnRegion: {}", r.getFullPath());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "GetMessage operateOnRegion: {}", r.getFullPath());
     }
 
     PartitionedRegionDataStore ds = r.getDataStore();
@@ -196,16 +196,13 @@ public class GetMessage extends PartitionMessageWithDirectReply {
                   sde)),
               r, startTime);
           return false;
-        } catch (PrimaryBucketException pbe) {
+        } catch (PrimaryBucketException | DataLocationException pbe) {
           sendReply(getSender(), getProcessorId(), dm, new ReplyException(pbe), r, startTime);
           return false;
-        } catch (DataLocationException e) {
-          sendReply(getSender(), getProcessorId(), dm, new ReplyException(e), r, startTime);
-          return false;
         }
 
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.debug(
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.debug(LogMarker.DM_VERBOSE,
               "GetMessage sending serialized value {} back via GetReplyMessage using processorId: {}",
               valueBytes, getProcessorId());
         }
@@ -396,10 +393,10 @@ public class GetMessage extends PartitionMessageWithDirectReply {
      */
     @Override
     public void process(final DistributionManager dm, ReplyProcessor21 processor) {
-      final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.DM);
+      final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.DM_VERBOSE);
       final long startTime = getTimestamp();
       if (isDebugEnabled) {
-        logger.trace(LogMarker.DM,
+        logger.trace(LogMarker.DM_VERBOSE,
             "GetReplyMessage process invoking reply processor with processorId: {}",
             this.processorId);
       }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IdentityRequestMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IdentityRequestMessage.java
index ee44f23..2b8f97b 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IdentityRequestMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IdentityRequestMessage.java
@@ -83,8 +83,8 @@ public class IdentityRequestMessage extends DistributionMessage implements Messa
   @Override
   protected void process(ClusterDistributionManager dm) {
     try {
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{}: processing message {}", getClass().getName(), this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{}: processing message {}", getClass().getName(), this);
       }
 
       IdentityReplyMessage.send(getSender(), getProcessorId(), dm);
@@ -193,23 +193,24 @@ public class IdentityRequestMessage extends DistributionMessage implements Messa
     @Override
     protected void process(final ClusterDistributionManager dm) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} process invoking reply processor with processorId:{}",
-            getClass().getName(), this.processorId);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
+            "{} process invoking reply processor with processorId:{}", getClass().getName(),
+            this.processorId);
       }
 
       ReplyProcessor21 processor = ReplyProcessor21.getProcessor(this.processorId);
 
       if (processor == null) {
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.trace(LogMarker.DM, "Processor not found: {}", getClass().getName());
+        if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+          logger.trace(LogMarker.DM_VERBOSE, "Processor not found: {}", getClass().getName());
         }
         return;
       }
       processor.process(this);
 
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{} Processed {}", processor, this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{} Processed {}", processor, this);
       }
       dm.getStats().incReplyMessageTime(DistributionStats.getStatTime() - startTime);
     }
@@ -287,8 +288,8 @@ public class IdentityRequestMessage extends DistributionMessage implements Messa
               }
             }
           }
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "{} return value is {}", getClass().getName(),
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "{} return value is {}", getClass().getName(),
                 this.returnValue);
           }
         }
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IdentityUpdateMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IdentityUpdateMessage.java
index 1740632..0fb2621 100644
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IdentityUpdateMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IdentityUpdateMessage.java
@@ -60,8 +60,8 @@ public class IdentityUpdateMessage extends DistributionMessage implements Messag
   @Override
   protected void process(ClusterDistributionManager dm) {
     try {
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM, "{}: processing message {}", getClass().getName(), this);
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE, "{}: processing message {}", getClass().getName(), this);
       }
 
       IdentityRequestMessage.setLatestId(this.newId);
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IndexCreationMsg.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IndexCreationMsg.java
index 35c4717..3ef2b59 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IndexCreationMsg.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/IndexCreationMsg.java
@@ -304,9 +304,9 @@ public class IndexCreationMsg extends PartitionMessage {
       // log the exception at fine level if there is no reply to the message
       if (this.processorId == 0) {
         logger.debug("{} exception while processing message:{}", this, t.getMessage(), t);
-      } else if (logger.isDebugEnabled(LogMarker.DM) && (t instanceof RuntimeException)) {
-        logger.debug(LogMarker.DM, "Exception caught while processing message: {}", t.getMessage(),
-            t);
+      } else if (logger.isDebugEnabled(LogMarker.DM_VERBOSE) && (t instanceof RuntimeException)) {
+        logger.debug(LogMarker.DM_VERBOSE, "Exception caught while processing message: {}",
+            t.getMessage(), t);
       }
       if (t instanceof RegionDestroyedException && pr != null) {
         if (pr.isClosed) {
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InterestEventMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InterestEventMessage.java
index 52cc2f2..985c861 100755
--- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InterestEventMessage.java
+++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InterestEventMessage.java
@@ -70,8 +70,9 @@ public class InterestEventMessage extends PartitionMessage {
   @Override
   protected boolean operateOnPartitionedRegion(final ClusterDistributionManager dm,
       PartitionedRegion r, long startTime) throws ForceReattemptException {
-    if (logger.isTraceEnabled(LogMarker.DM)) {
-      logger.debug("InterestEventMessage operateOnPartitionedRegion: {}", r.getFullPath());
+    if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+      logger.trace(LogMarker.DM_VERBOSE, "InterestEventMessage operateOnPartitionedRegion: {}",
+          r.getFullPath());
     }
 
     PartitionedRegionDataStore ds = r.getDataStore();
@@ -170,8 +171,8 @@ public class InterestEventMessage extends PartitionMessage {
     @Override
     protected void process(final ClusterDistributionManager dm) {
       final long startTime = getTimestamp();
-      if (logger.isTraceEnabled(LogMarker.DM)) {
-        logger.trace(LogMarker.DM,
+      if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+        logger.trace(LogMarker.DM_VERBOSE,
             "InterestEventReplyMessage process invoking reply processor with processorId: {}",
             this.processorId);
       }
@@ -180,15 +181,15 @@ public class InterestEventMessage extends PartitionMessage {
         ReplyProcessor21 processor = ReplyProcessor21.getProcessor(this.processorId);
 
         if (processor == null) {
-          if (logger.isTraceEnabled(LogMarker.DM)) {
-            logger.trace(LogMarker.DM, "InterestEventReplyMessage processor not found");
+          if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) {
+            logger.trace(LogMarker.DM_VERBOSE, "InterestEventReplyMessage processor not found");
           }
           return;
         }
         processor.process(this);
 
-        if (logger.isTraceEnabled(LogMarker.DM)) {
-          logger.debug("{} processed {}", processor, this);
... 3851 lines suppressed ...

-- 
To stop receiving notification emails like this one, please contact
prhomberg@apache.org.