You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ey...@apache.org on 2019/03/15 23:22:44 UTC

[hadoop] branch trunk updated: YARN-9349. Changed logging to use slf4j api. Contributed by Prabhu Joseph

This is an automated email from the ASF dual-hosted git repository.

eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 2064ca0  YARN-9349.  Changed logging to use slf4j api.             Contributed by Prabhu Joseph
2064ca0 is described below

commit 2064ca015d1584263aac0cc20c60b925a3aff612
Author: Eric Yang <ey...@apache.org>
AuthorDate: Fri Mar 15 19:20:59 2019 -0400

    YARN-9349.  Changed logging to use slf4j api.
                Contributed by Prabhu Joseph
---
 .../org/apache/hadoop/security/ProviderUtils.java  |   5 +-
 .../hadoop/yarn/util/resource/ResourceUtils.java   |  29 ++--
 .../distributedshell/ApplicationMaster.java        |  14 +-
 .../service/client/SystemServiceManagerImpl.java   |   4 +-
 .../hadoop/yarn/service/client/ServiceClient.java  |   6 +-
 .../service/containerlaunch/AbstractLauncher.java  |   2 +-
 .../timelineservice/ServiceMetricsSink.java        |   8 +-
 .../apache/hadoop/yarn/client/api/AMRMClient.java  |   5 +-
 .../yarn/client/api/async/AMRMClientAsync.java     |   5 +-
 .../api/impl/ContainerManagementProtocolProxy.java |  26 ++--
 .../yarn/client/api/impl/RemoteRequestsTable.java  |  49 ++-----
 .../client/api/impl/SharedCacheClientImpl.java     |   4 +-
 .../yarn/client/api/impl/YarnClientImpl.java       |   6 +-
 .../client/api/impl/FileSystemTimelineWriter.java  |  34 ++---
 .../yarn/client/api/impl/TimelineWriter.java       |  15 +-
 .../apache/hadoop/yarn/event/AsyncDispatcher.java  |   6 +-
 .../apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java |   6 +-
 .../java/org/apache/hadoop/yarn/ipc/YarnRPC.java   |   2 +-
 .../ifile/LogAggregationIndexedFileController.java |   6 +-
 .../nodelabels/NonAppendableFSNodeLabelStore.java  |   4 +-
 .../hadoop/yarn/security/AMRMTokenSelector.java    |   6 +-
 .../yarn/security/ContainerTokenIdentifier.java    |   2 +-
 .../yarn/security/ContainerTokenSelector.java      |   6 +-
 .../hadoop/yarn/security/NMTokenIdentifier.java    |   2 +-
 .../hadoop/yarn/security/NMTokenSelector.java      |   6 +-
 .../yarn/security/YarnAuthorizationProvider.java   |   2 +-
 .../security/client/ClientToAMTokenSelector.java   |   6 +-
 .../security/client/RMDelegationTokenSelector.java |   6 +-
 .../client/TimelineDelegationTokenSelector.java    |  10 +-
 .../server/security/ApplicationACLsManager.java    |  26 ++--
 .../yarn/util/DockerClientConfigHandler.java       |   6 +-
 .../org/apache/hadoop/yarn/util/FSDownload.java    |  18 +--
 .../hadoop/yarn/util/ProcfsBasedProcessTree.java   |  23 +--
 .../hadoop/yarn/util/WindowsBasedProcessTree.java  |   6 +-
 .../apache/hadoop/yarn/util/YarnVersionInfo.java   |   2 +-
 .../yarn/util/TestProcfsBasedProcessTree.java      |   2 +-
 .../yarn/csi/adaptor/DefaultCsiAdaptorImpl.java    |  20 +--
 .../webapp/AHSWebServices.java                     |   8 +-
 .../yarn/server/timeline/LeveldbTimelineStore.java |  26 ++--
 .../timeline/RollingLevelDBTimelineStore.java      |  46 ++----
 .../timeline/security/TimelineACLsManager.java     |   9 +-
 ...elineV1DelegationTokenSecretManagerService.java |  20 +--
 .../yarn/server/AMHeartbeatRequestHandler.java     |  20 +--
 .../LocalityMulticastAMRMProxyPolicy.java          |   7 +-
 .../store/impl/SQLFederationStateStore.java        |  18 +--
 .../security/BaseContainerTokenSecretManager.java  |   8 +-
 .../server/uam/UnmanagedApplicationManager.java    |   8 +-
 .../yarn/server/utils/YarnServerSecurityUtils.java |   2 +-
 .../hadoop/yarn/server/webapp/LogWebService.java   |   8 +-
 .../nodemanager/DefaultContainerExecutor.java      |   6 +-
 .../yarn/server/nodemanager/DeletionService.java   |   7 +-
 .../server/nodemanager/LinuxContainerExecutor.java |   8 +-
 .../yarn/server/nodemanager/NodeManager.java       |  23 +--
 .../server/nodemanager/NodeStatusUpdaterImpl.java  |  64 +++------
 .../nodemanager/amrmproxy/AMRMProxyService.java    |  10 +-
 .../amrmproxy/AMRMProxyTokenSecretManager.java     |   5 +-
 .../amrmproxy/DefaultRequestInterceptor.java       |  10 +-
 .../amrmproxy/FederationInterceptor.java           |  16 +--
 .../nodemanager/api/impl/pb/NMProtoUtils.java      |  12 +-
 .../nodemanager/containermanager/AuxServices.java  |   4 +-
 .../containermanager/ContainerManagerImpl.java     |  29 ++--
 .../application/ApplicationImpl.java               |   5 +-
 .../containermanager/container/ContainerImpl.java  |   4 +-
 .../deletion/task/DockerContainerDeletionTask.java |   5 +-
 .../deletion/task/FileDeletionTask.java            |  18 +--
 .../launcher/ContainerCleanup.java                 |  34 ++---
 .../containermanager/launcher/ContainerLaunch.java |  33 ++---
 .../linux/resources/CGroupsHandlerImpl.java        |  11 +-
 .../resources/NetworkPacketTaggingHandlerImpl.java |   4 +-
 .../linux/resources/ResourceHandlerModule.java     |  16 +--
 .../TrafficControlBandwidthHandlerImpl.java        |  10 +-
 .../linux/resources/TrafficController.java         |  20 +--
 .../runtime/DelegatingLinuxContainerRuntime.java   |   4 +-
 .../linux/runtime/DockerLinuxContainerRuntime.java |  40 ++----
 .../runtime/docker/DockerCommandExecutor.java      |  19 +--
 .../localizer/LocalizedResource.java               |  12 +-
 .../localizer/ResourceLocalizationService.java     |  37 ++---
 .../localizer/security/LocalizerTokenSelector.java |   4 +-
 .../logaggregation/AppLogAggregatorImpl.java       |   8 +-
 .../loghandler/NonAggregatingLogHandler.java       |   6 +-
 .../monitor/ContainersMonitorImpl.java             |  26 ++--
 .../com/nvidia/NvidiaGPUPluginForRuntimeV2.java    |  18 +--
 .../resourceplugin/fpga/IntelFpgaOpenclPlugin.java |  10 +-
 .../resourceplugin/gpu/GpuDiscoverer.java          |   4 +-
 .../AllocationBasedResourceUtilizationTracker.java |   8 +-
 .../scheduler/ContainerScheduler.java              |   6 +-
 .../recovery/NMLeveldbStateStoreService.java       |  77 +++-------
 .../scheduler/DistributedScheduler.java            |   4 +-
 .../security/NMTokenSecretManagerInNM.java         |  16 +--
 .../timelineservice/NMTimelinePublisher.java       |  56 +++-----
 .../util/CgroupsLCEResourcesHandler.java           |  14 +-
 .../nodemanager/util/NodeManagerHardwareUtils.java |   8 +-
 .../nodemanager/util/ProcessIdFileReader.java      |  10 +-
 .../nodemanager/webapp/ContainerLogsPage.java      |   4 +-
 .../server/nodemanager/webapp/NMWebServices.java   |  10 +-
 .../ActiveStandbyElectorBasedElectorService.java   |   6 +-
 .../server/resourcemanager/ClientRMService.java    |   6 +-
 .../DecommissioningNodesWatcher.java               |  10 +-
 .../server/resourcemanager/NodesListManager.java   |   6 +-
 .../yarn/server/resourcemanager/RMAppManager.java  |   4 +-
 .../server/resourcemanager/ResourceManager.java    |   4 +-
 .../resourcemanager/ResourceTrackerService.java    |  33 ++---
 .../blacklist/SimpleBlacklistManager.java          |   8 +-
 .../metrics/TimelineServiceV2Publisher.java        |   4 +-
 .../monitor/capacity/FifoCandidatesSelector.java   |   6 +-
 .../capacity/FifoIntraQueuePreemptionPlugin.java   |  10 +-
 .../capacity/IntraQueueCandidatesSelector.java     |  26 ++--
 .../capacity/PreemptableResourceCalculator.java    |  12 +-
 .../ProportionalCapacityPreemptionPolicy.java      |   8 +-
 .../QueuePriorityContainerCandidateSelector.java   |  26 ++--
 .../ReservedContainerCandidatesSelector.java       |  15 +-
 .../nodelabels/RMDelegatedNodeLabelsUpdater.java   |   6 +-
 .../recovery/FileSystemRMStateStore.java           |  21 +--
 .../recovery/LeveldbRMStateStore.java              |  67 +++------
 .../resourcemanager/recovery/RMStateStore.java     |  13 +-
 .../resourcemanager/recovery/ZKRMStateStore.java   | 155 +++++++--------------
 .../reservation/AbstractSchedulerPlanFollower.java |  12 +-
 .../resource/DynamicResourceConfiguration.java     |  15 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java    |   9 +-
 .../rmapp/attempt/RMAppAttemptImpl.java            |  29 ++--
 .../rmcontainer/RMContainerImpl.java               |   6 +-
 .../server/resourcemanager/rmnode/RMNodeImpl.java  |   9 +-
 .../scheduler/AbstractYarnScheduler.java           |  23 ++-
 .../scheduler/ActiveUsersManager.java              |  12 +-
 .../scheduler/AppSchedulingInfo.java               |  11 +-
 .../scheduler/SchedulerAppUtils.java               |  14 +-
 .../resourcemanager/scheduler/SchedulerUtils.java  |   6 +-
 .../capacity/AbstractAutoCreatedLeafQueue.java     |   2 +-
 .../scheduler/capacity/AbstractCSQueue.java        |  21 ++-
 .../capacity/AbstractManagedParentQueue.java       |   4 +-
 .../scheduler/capacity/CapacityScheduler.java      | 125 ++++++-----------
 .../capacity/CapacitySchedulerConfiguration.java   |  42 +++---
 .../scheduler/capacity/LeafQueue.java              |  62 +++------
 .../scheduler/capacity/ParentQueue.java            |   5 +-
 .../scheduler/capacity/UsersManager.java           |  31 ++---
 .../allocator/RegularContainerAllocator.java       |  28 ++--
 .../scheduler/common/fica/FiCaSchedulerApp.java    |  67 +++------
 .../MemoryPlacementConstraintManager.java          |  12 +-
 .../constraint/PlacementConstraintsUtil.java       |  11 +-
 .../algorithm/LocalAllocationTagsManager.java      |   4 +-
 .../distributed/NodeQueueLoadMonitor.java          |  20 ++-
 .../scheduler/fair/FSAppAttempt.java               |  45 ++----
 .../scheduler/fair/FSLeafQueue.java                |   6 +-
 .../resourcemanager/scheduler/fair/FSQueue.java    |   4 +-
 .../scheduler/fair/FairScheduler.java              |  30 ++--
 .../security/AMRMTokenSecretManager.java           |   4 +-
 .../security/DelegationTokenRenewer.java           |   5 +-
 .../RMTimelineCollectorManager.java                |  12 +-
 .../yarn/server/resourcemanager/Application.java   |   6 +-
 .../yarn/server/resourcemanager/NodeManager.java   |  16 +--
 .../clientrm/FederationClientInterceptor.java      |   2 +-
 .../metrics/ClientSCMMetrics.java                  |   2 +-
 .../metrics/SharedCacheUploaderMetrics.java        |   2 +-
 .../server/ContainerTokenIdentifierForTest.java    |   2 +-
 .../yarn/server/NMTokenIdentifierNewForTest.java   |   2 +-
 .../storage/HBaseTimelineReaderImpl.java           |   4 +-
 .../storage/common/ColumnRWHelper.java             |   4 +-
 .../storage/reader/TimelineEntityReader.java       |   8 +-
 .../timelineservice/storage/flow/FlowScanner.java  |  12 +-
 .../collector/NodeTimelineCollectorManager.java    |  16 +--
 .../collector/TimelineCollector.java               |  16 +--
 .../reader/TimelineReaderWebServices.java          |   6 +-
 .../storage/NoOpTimelineReaderImpl.java            |  12 +-
 .../storage/NoOpTimelineWriterImpl.java            |  16 +--
 .../hadoop/yarn/server/webproxy/ProxyCA.java       |   4 +-
 .../hadoop/yarn/server/webproxy/ProxyUtils.java    |   4 +-
 .../yarn/server/webproxy/WebAppProxyServlet.java   |   8 +-
 .../yarn/server/webproxy/amfilter/AmIpFilter.java  |  27 ++--
 168 files changed, 873 insertions(+), 1748 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
index 4eb3622..d7a6821 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
@@ -167,9 +167,8 @@ public final class ProviderUtils {
         }
         if (clazz != null) {
           if (fileSystemClass.isAssignableFrom(clazz)) {
-            LOG.debug("Filesystem based provider" +
-                " excluded from provider path due to recursive dependency: "
-                + provider);
+            LOG.debug("Filesystem based provider excluded from provider " +
+                "path due to recursive dependency: {}", provider);
           } else {
             if (newProviderPath.length() > 0) {
               newProviderPath.append(",");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index b6cb581..13a872c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -138,20 +138,17 @@ public class ResourceUtils {
       Map<String, ResourceInformation> res) {
     ResourceInformation ri;
     if (!res.containsKey(MEMORY)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding resource type - name = " + MEMORY + ", units = "
-            + ResourceInformation.MEMORY_MB.getUnits() + ", type = "
-            + ResourceTypes.COUNTABLE);
-      }
+      LOG.debug("Adding resource type - name = {}, units = {}, type = {}",
+          MEMORY, ResourceInformation.MEMORY_MB.getUnits(),
+          ResourceTypes.COUNTABLE);
       ri = ResourceInformation.newInstance(MEMORY,
           ResourceInformation.MEMORY_MB.getUnits());
       res.put(MEMORY, ri);
     }
     if (!res.containsKey(VCORES)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding resource type - name = " + VCORES
-            + ", units = , type = " + ResourceTypes.COUNTABLE);
-      }
+      LOG.debug("Adding resource type - name = {}, units = {}, type = {}",
+          VCORES, ResourceInformation.VCORES.getUnits(),
+          ResourceTypes.COUNTABLE);
       ri = ResourceInformation.newInstance(VCORES);
       res.put(VCORES, ri);
     }
@@ -189,9 +186,9 @@ public class ResourceUtils {
       String resourceTypesKey, String schedulerKey, long schedulerDefault) {
     long value = conf.getLong(resourceTypesKey, -1L);
     if (value == -1) {
-      LOG.debug("Mandatory Resource '" + resourceTypesKey + "' is not "
+      LOG.debug("Mandatory Resource '{}' is not "
           + "configured in resource-types config file. Setting allocation "
-          + "specified using '" + schedulerKey + "'");
+          + "specified using '{}'", resourceTypesKey, schedulerKey);
       value = conf.getLong(schedulerKey, schedulerDefault);
     }
     return value;
@@ -450,9 +447,7 @@ public class ResourceUtils {
       Configuration conf) {
     try {
       InputStream ris = getConfInputStream(resourceFile, conf);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Found " + resourceFile + ", adding to configuration");
-      }
+      LOG.debug("Found {}, adding to configuration", resourceFile);
       conf.addResource(ris);
     } catch (FileNotFoundException fe) {
       LOG.info("Unable to find '" + resourceFile + "'.");
@@ -575,10 +570,8 @@ public class ResourceUtils {
       }
       nodeResources.get(resourceType).setValue(resourceValue);
       nodeResources.get(resourceType).setUnits(units);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting value for resource type " + resourceType + " to "
-            + resourceValue + " with units " + units);
-      }
+      LOG.debug("Setting value for resource type {} to {} with units {}",
+          resourceType, resourceValue, units);
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 333e00c..5d437c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -1269,19 +1269,15 @@ public class ApplicationMaster {
 
     @Override
     public void onContainerStopped(ContainerId containerId) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Succeeded to stop Container " + containerId);
-      }
+      LOG.debug("Succeeded to stop Container {}", containerId);
       containers.remove(containerId);
     }
 
     @Override
     public void onContainerStatusReceived(ContainerId containerId,
         ContainerStatus containerStatus) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Container Status: id=" + containerId + ", status=" +
-            containerStatus);
-      }
+      LOG.debug("Container Status: id={}, status={}", containerId,
+          containerStatus);
 
       // If promote_opportunistic_after_start is set, automatically promote
       // opportunistic containers to guaranteed.
@@ -1305,9 +1301,7 @@ public class ApplicationMaster {
     @Override
     public void onContainerStarted(ContainerId containerId,
         Map<String, ByteBuffer> allServiceResponse) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Succeeded to start Container " + containerId);
-      }
+      LOG.debug("Succeeded to start Container {}", containerId);
       Container container = containers.get(containerId);
       if (container != null) {
         applicationMaster.nmClientAsync.getContainerStatusAsync(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
index 08ad1b6..db11f40 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
@@ -361,9 +361,7 @@ public class SystemServiceManagerImpl extends AbstractService
   private Service getServiceDefinition(Path filePath) {
     Service service = null;
     try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Loading service definition from FS: " + filePath);
-      }
+      LOG.debug("Loading service definition from FS: {}", filePath);
       service = jsonSerDeser.load(fs, filePath);
     } catch (IOException e) {
       LOG.info("Error while loading service definition from FS: {}", e);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 5cdb8c9..08352a8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -1189,7 +1189,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
           .append(entry.getValue().getResource().getFile())
           .append(System.lineSeparator());
     }
-    LOG.debug(builder.toString());
+    LOG.debug("{}", builder);
   }
 
   private String buildCommandLine(Service app, Configuration conf,
@@ -1249,7 +1249,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     }
     if (!UserGroupInformation.isSecurityEnabled()) {
       String userName = UserGroupInformation.getCurrentUser().getUserName();
-      LOG.debug("Run as user " + userName);
+      LOG.debug("Run as user {}", userName);
       // HADOOP_USER_NAME env is used by UserGroupInformation when log in
       // This env makes AM run as this user
       env.put("HADOOP_USER_NAME", userName);
@@ -1405,7 +1405,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
       if (LOG.isDebugEnabled()) {
         if (tokens != null && tokens.length != 0) {
           for (Token<?> token : tokens) {
-            LOG.debug("Got DT: " + token);
+            LOG.debug("Got DT: {}", token);
           }
         }
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java
index aff07e5..d1dda38 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java
@@ -196,7 +196,7 @@ public class AbstractLauncher {
 
         String key = entry.getKey();
         LocalResource val = entry.getValue();
-        log.debug(key + "=" + ServiceUtils.stringify(val.getResource()));
+        log.debug("{} = {}", key, ServiceUtils.stringify(val.getResource()));
       }
     }
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java
index cf4e836..ff4556f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java
@@ -77,16 +77,12 @@ public class ServiceMetricsSink implements MetricsSink {
     }
 
     if (isServiceMetrics && appId != null) {
-      if (log.isDebugEnabled()) {
-        log.debug("Publishing service metrics. " + record);
-      }
+      log.debug("Publishing service metrics. {}", record);
       serviceTimelinePublisher.publishMetrics(record.metrics(), appId,
           ServiceTimelineEntityType.SERVICE_ATTEMPT.toString(),
           record.timestamp());
     } else if (isComponentMetrics) {
-      if (log.isDebugEnabled()) {
-        log.debug("Publishing Component metrics. " + record);
-      }
+      log.debug("Publishing Component metrics. {}", record);
       serviceTimelinePublisher.publishMetrics(record.metrics(), record.name(),
           ServiceTimelineEntityType.COMPONENT.toString(), record.timestamp());
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index 954b9f6..3840fa5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -857,10 +857,7 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
 
     int loggingCounter = logInterval;
     do {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Check the condition for main loop.");
-      }
-
+      LOG.debug("Check the condition for main loop.");
       boolean result = check.get();
       if (result) {
         LOG.info("Exits the main loop.");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
index c9f4e5f..688c843 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
@@ -465,10 +465,7 @@ extends AbstractService {
 
     int loggingCounter = logInterval;
     do {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Check the condition for main loop.");
-      }
-
+      LOG.debug("Check the condition for main loop.");
       boolean result = check.get();
       if (result) {
         LOG.info("Exits the main loop.");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
index c35b018..ed544f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
@@ -80,10 +80,8 @@ public class ContainerManagementProtocolProxy {
               + " (" + maxConnectedNMs + ") can not be less than 0.");
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(YarnConfiguration.NM_CLIENT_MAX_NM_PROXIES + " : " +
-          maxConnectedNMs);
-    }
+    LOG.debug("{} : {}", YarnConfiguration.NM_CLIENT_MAX_NM_PROXIES,
+        maxConnectedNMs);
 
     if (maxConnectedNMs > 0) {
       cmProxy =
@@ -110,10 +108,8 @@ public class ContainerManagementProtocolProxy {
     while (proxy != null
         && !proxy.token.getIdentifier().equals(
             nmTokenCache.getToken(containerManagerBindAddr).getIdentifier())) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Refreshing proxy as NMToken got updated for node : "
-            + containerManagerBindAddr);
-      }
+      LOG.debug("Refreshing proxy as NMToken got updated for node : {}",
+          containerManagerBindAddr);
       // Token is updated. check if anyone has already tried closing it.
       if (!proxy.scheduledForClose) {
         // try closing the proxy. Here if someone is already using it
@@ -149,10 +145,8 @@ public class ContainerManagementProtocolProxy {
   private void addProxyToCache(String containerManagerBindAddr,
       ContainerManagementProtocolProxyData proxy) {
     while (cmProxy.size() >= maxConnectedNMs) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Cleaning up the proxy cache, size=" + cmProxy.size()
-            + " max=" + maxConnectedNMs);
-      }
+      LOG.debug("Cleaning up the proxy cache, size={} max={}", cmProxy.size(),
+          maxConnectedNMs);
       boolean removedProxy = false;
       for (ContainerManagementProtocolProxyData otherProxy : cmProxy.values()) {
         removedProxy = removeProxy(otherProxy);
@@ -193,9 +187,7 @@ public class ContainerManagementProtocolProxy {
       ContainerManagementProtocolProxyData proxy) {
     proxy.activeCallers--;
     if (proxy.scheduledForClose && proxy.activeCallers < 0) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Closing proxy : " + proxy.containerManagerBindAddr);
-      }
+      LOG.debug("Closing proxy : {}", proxy.containerManagerBindAddr);
       cmProxy.remove(proxy.containerManagerBindAddr);
       try {
         rpc.stopProxy(proxy.getContainerManagementProtocol(), conf);
@@ -265,9 +257,7 @@ public class ContainerManagementProtocolProxy {
       
       final InetSocketAddress cmAddr =
           NetUtils.createSocketAddr(containerManagerBindAddr);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Opening proxy : " + containerManagerBindAddr);
-      }
+      LOG.debug("Opening proxy : {}", containerManagerBindAddr);
       // the user in createRemoteUser in this context has to be ContainerID
       UserGroupInformation user =
           UserGroupInformation.createRemoteUser(containerId
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java
index e1b7bb2..b95b793 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java
@@ -137,27 +137,21 @@ class RemoteRequestsTable<T> implements Iterable<ResourceRequestInfo>{
     if (locationMap == null) {
       locationMap = new HashMap<>();
       this.remoteRequestsTable.put(priority, locationMap);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Added priority=" + priority);
-      }
+      LOG.debug("Added priority={}", priority);
     }
     Map<ExecutionType, TreeMap<Resource, ResourceRequestInfo>>
         execTypeMap = locationMap.get(resourceName);
     if (execTypeMap == null) {
       execTypeMap = new HashMap<>();
       locationMap.put(resourceName, execTypeMap);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Added resourceName=" + resourceName);
-      }
+      LOG.debug("Added resourceName={}", resourceName);
     }
     TreeMap<Resource, ResourceRequestInfo> capabilityMap =
         execTypeMap.get(execType);
     if (capabilityMap == null) {
       capabilityMap = new TreeMap<>(new AMRMClientImpl.ResourceReverseComparator());
       execTypeMap.put(execType, capabilityMap);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Added Execution Type=" + execType);
-      }
+      LOG.debug("Added Execution Type={}", execType);
     }
     capabilityMap.put(capability, resReqInfo);
   }
@@ -168,25 +162,19 @@ class RemoteRequestsTable<T> implements Iterable<ResourceRequestInfo>{
     Map<String, Map<ExecutionType, TreeMap<Resource,
         ResourceRequestInfo>>> locationMap = remoteRequestsTable.get(priority);
     if (locationMap == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No such priority=" + priority);
-      }
+      LOG.debug("No such priority={}", priority);
       return null;
     }
     Map<ExecutionType, TreeMap<Resource, ResourceRequestInfo>>
         execTypeMap = locationMap.get(resourceName);
     if (execTypeMap == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No such resourceName=" + resourceName);
-      }
+      LOG.debug("No such resourceName={}", resourceName);
       return null;
     }
     TreeMap<Resource, ResourceRequestInfo> capabilityMap =
         execTypeMap.get(execType);
     if (capabilityMap == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No such Execution Type=" + execType);
-      }
+      LOG.debug("No such Execution Type={}", execType);
       return null;
     }
     retVal = capabilityMap.remove(capability);
@@ -286,9 +274,8 @@ class RemoteRequestsTable<T> implements Iterable<ResourceRequestInfo>{
     if (ResourceRequest.ANY.equals(resourceName)) {
       resourceRequestInfo.remoteRequest.setNodeLabelExpression(labelExpression);
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Adding request to ask " + resourceRequestInfo.remoteRequest);
-    }
+    LOG.debug("Adding request to ask {}", resourceRequestInfo.remoteRequest);
+
     return resourceRequestInfo;
   }
 
@@ -298,22 +285,16 @@ class RemoteRequestsTable<T> implements Iterable<ResourceRequestInfo>{
         execTypeReq.getExecutionType(), capability);
 
     if (resourceRequestInfo == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Not decrementing resource as ResourceRequestInfo with" +
-            "priority=" + priority + ", " +
-            "resourceName=" + resourceName + ", " +
-            "executionType=" + execTypeReq + ", " +
-            "capability=" + capability + " is not present in request table");
-      }
+      LOG.debug("Not decrementing resource as ResourceRequestInfo with"
+          + " priority={} resourceName={} executionType={} capability={} is"
+          + " not present in request table", priority, resourceName,
+          execTypeReq, capability);
       return null;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("BEFORE decResourceRequest:" + " applicationId="
-          + " priority=" + priority.getPriority()
-          + " resourceName=" + resourceName + " numContainers="
-          + resourceRequestInfo.remoteRequest.getNumContainers());
-    }
+    LOG.debug("BEFORE decResourceRequest: applicationId= priority={}"
+        +" resourceName={} numContainers={}", priority.getPriority(),
+        resourceName, resourceRequestInfo.remoteRequest.getNumContainers());
 
     resourceRequestInfo.remoteRequest.setNumContainers(
         resourceRequestInfo.remoteRequest.getNumContainers() - 1);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
index 51dbf37..4763b53 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
@@ -84,9 +84,7 @@ public class SharedCacheClientImpl extends SharedCacheClient {
   @Override
   protected void serviceStart() throws Exception {
     this.scmClient = createClientProxy();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Connecting to Shared Cache Manager at " + this.scmAddress);
-    }
+    LOG.debug("Connecting to Shared Cache Manager at {}", this.scmAddress);
     super.serviceStart();
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 86bda98..14133ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -397,10 +397,8 @@ public class YarnClientImpl extends YarnClient {
       return;
     }
     credentials.addToken(timelineService, timelineDelegationToken);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Add timeline delegation token into credentials: "
-          + timelineDelegationToken);
-    }
+    LOG.debug("Add timeline delegation token into credentials: {}",
+        timelineDelegationToken);
     DataOutputBuffer dob = new DataOutputBuffer();
     credentials.writeTokenStorageToStream(dob);
     tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
index ace5fdf..e605184 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
@@ -224,10 +224,8 @@ public class FileSystemTimelineWriter extends TimelineWriter{
     if (!entitiesToSummaryCache.isEmpty()) {
       Path summaryLogPath =
           new Path(attemptDir, SUMMARY_LOG_PREFIX + appAttemptId.toString());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Writing summary log for " + appAttemptId.toString() + " to "
-            + summaryLogPath);
-      }
+      LOG.debug("Writing summary log for {} to {}", appAttemptId,
+          summaryLogPath);
       this.logFDsCache.writeSummaryEntityLogs(fs, summaryLogPath, objMapper,
           appAttemptId, entitiesToSummaryCache, isAppendSupported);
     }
@@ -235,10 +233,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
     if (!entitiesToEntityCache.isEmpty()) {
       Path entityLogPath =
           new Path(attemptDir, ENTITY_LOG_PREFIX + groupId.toString());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Writing entity log for " + groupId.toString() + " to "
-            + entityLogPath);
-      }
+      LOG.debug("Writing entity log for {} to {}", groupId, entityLogPath);
       this.logFDsCache.writeEntityLogs(fs, entityLogPath, objMapper,
           appAttemptId, groupId, entitiesToEntityCache, isAppendSupported);
     }
@@ -293,8 +288,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
         new Path(attemptDirCache.getAppAttemptDir(appAttemptId),
             DOMAIN_LOG_PREFIX + appAttemptId.toString());
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Writing domains for " + appAttemptId.toString() + " to "
-          + domainLogPath);
+      LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath);
     }
     this.logFDsCache.writeDomainLog(
         fs, domainLogPath, objMapper, domain, isAppendSupported);
@@ -324,9 +318,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
       if (writerClosed()) {
         prepareForWrite();
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Writing entity list of size " + entities.size());
-      }
+      LOG.debug("Writing entity list of size {}", entities.size());
       for (TimelineEntity entity : entities) {
         getObjectMapper().writeValue(getJsonGenerator(), entity);
       }
@@ -558,9 +550,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
         try {
           flush();
         } catch (Exception e) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(e.toString());
-          }
+          LOG.debug("{}", e);
         }
       }
     }
@@ -997,9 +987,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
       Path attemptDir = new Path(appDir, appAttemptId.toString());
       if (FileSystem.mkdirs(fs, attemptDir,
           new FsPermission(APP_LOG_DIR_PERMISSIONS))) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("New attempt directory created - " + attemptDir);
-        }
+        LOG.debug("New attempt directory created - {}", attemptDir);
       }
       return attemptDir;
     }
@@ -1009,9 +997,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
       Path appDir = new Path(appRootDir, appId.toString());
       if (FileSystem.mkdirs(fs, appDir,
           new FsPermission(APP_LOG_DIR_PERMISSIONS))) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("New app directory created - " + appDir);
-        }
+        LOG.debug("New app directory created - {}", appDir);
       }
       return appDir;
     }
@@ -1023,9 +1009,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
       Path userDir = new Path(activePath, user);
       if (FileSystem.mkdirs(fs, userDir,
           new FsPermission(APP_LOG_DIR_PERMISSIONS))) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("New user directory created - " + userDir);
-        }
+        LOG.debug("New user directory created - {}", userDir);
       }
       return userDir;
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java
index f52479d..1f4f201 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineWriter.java
@@ -133,11 +133,8 @@ public abstract class TimelineWriter implements Flushable {
       LOG.error(msg);
       if (resp != null) {
         msg += " HTTP error code: " + resp.getStatus();
-        if (LOG.isDebugEnabled()) {
-          String output = resp.getEntity(String.class);
-          LOG.debug("HTTP error code: " + resp.getStatus()
-              + " Server response : \n" + output);
-        }
+        LOG.debug("HTTP error code: {} Server response : \n{}",
+            resp.getStatus(), resp.getEntity(String.class));
       }
       throw new YarnException(msg);
     }
@@ -149,18 +146,14 @@ public abstract class TimelineWriter implements Flushable {
   public ClientResponse doPostingObject(Object object, String path) {
     WebResource webResource = client.resource(resURI);
     if (path == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("POST to " + resURI);
-      }
+      LOG.debug("POST to {}", resURI);
       ClientResponse r = webResource.accept(MediaType.APPLICATION_JSON)
           .type(MediaType.APPLICATION_JSON)
           .post(ClientResponse.class, object);
       r.bufferEntity();
       return r;
     } else if (path.equals("domain")) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("PUT to " + resURI +"/" + path);
-      }
+      LOG.debug("PUT to {}/{}", resURI, path);
       ClientResponse r = webResource.path(path).accept(MediaType.APPLICATION_JSON)
           .type(MediaType.APPLICATION_JSON)
           .put(ClientResponse.class, object);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index 333faa5..5019369 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -189,10 +189,8 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
   @SuppressWarnings("unchecked")
   protected void dispatch(Event event) {
     //all events go thru this loop
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Dispatching the event " + event.getClass().getName() + "."
-          + event.toString());
-    }
+    LOG.debug("Dispatching the event {}.{}", event.getClass().getName(),
+        event);
 
     Class<? extends Enum> type = event.getType().getDeclaringClass();
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java
index d9d999f..e14ed44 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnProtoRPC.java
@@ -45,7 +45,7 @@ public class HadoopYarnProtoRPC extends YarnRPC {
   @Override
   public Object getProxy(Class protocol, InetSocketAddress addr,
       Configuration conf) {
-    LOG.debug("Creating a HadoopYarnProtoRpc proxy for protocol " + protocol);
+    LOG.debug("Creating a HadoopYarnProtoRpc proxy for protocol {}", protocol);
     return RpcFactoryProvider.getClientFactory(conf).getClient(protocol, 1,
         addr, conf);
   }
@@ -60,8 +60,8 @@ public class HadoopYarnProtoRPC extends YarnRPC {
       InetSocketAddress addr, Configuration conf,
       SecretManager<? extends TokenIdentifier> secretManager,
       int numHandlers, String portRangeConfig) {
-    LOG.debug("Creating a HadoopYarnProtoRpc server for protocol " + protocol + 
-        " with " + numHandlers + " handlers");
+    LOG.debug("Creating a HadoopYarnProtoRpc server for protocol {} with {}"
+        + " handlers", protocol, numHandlers);
     
     return RpcFactoryProvider.getServerFactory(conf).getServer(protocol, 
         instance, addr, conf, secretManager, numHandlers, portRangeConfig);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java
index 436445f..54e3c58 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/YarnRPC.java
@@ -57,7 +57,7 @@ public abstract class YarnRPC {
   }
   
   public static YarnRPC create(Configuration conf) {
-    LOG.debug("Creating YarnRPC for " + 
+    LOG.debug("Creating YarnRPC for {}",
         conf.get(YarnConfiguration.IPC_RPC_IMPL));
     String clazzName = conf.get(YarnConfiguration.IPC_RPC_IMPL);
     if (clazzName == null) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
index 78b0c13..9ab3e37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
@@ -850,10 +850,10 @@ public class LogAggregationIndexedFileController
       }
       if (uuidReadLen != UUID_LENGTH || !Arrays.equals(this.uuid, uuidRead)) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("the length of loaded UUID:" + uuidReadLen);
-          LOG.debug("the loaded UUID:" + new String(uuidRead,
+          LOG.debug("the length of loaded UUID:{}", uuidReadLen);
+          LOG.debug("the loaded UUID:{}", new String(uuidRead,
               Charset.forName("UTF-8")));
-          LOG.debug("the expected UUID:" + new String(this.uuid,
+          LOG.debug("the expected UUID:{}", new String(this.uuid,
               Charset.forName("UTF-8")));
         }
         throw new IOException("The UUID from "
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
index afacac7..0686f12 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
@@ -61,9 +61,7 @@ public class NonAppendableFSNodeLabelStore extends FileSystemNodeLabelsStore {
         fs.delete(oldMirrorPath, false);
       } catch (IOException e) {
         // do nothing
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Exception while removing old mirror", e);
-        }
+        LOG.debug("Exception while removing old mirror", e);
       }
       
       // rename new to old
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenSelector.java
index a041334..b9e6e2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AMRMTokenSelector.java
@@ -43,10 +43,10 @@ public class AMRMTokenSelector implements
     if (service == null) {
       return null;
     }
-    LOG.debug("Looking for a token with service " + service.toString());
+    LOG.debug("Looking for a token with service {}", service);
     for (Token<? extends TokenIdentifier> token : tokens) {
-      LOG.debug("Token kind is " + token.getKind().toString()
-          + " and the token's service name is " + token.getService());
+      LOG.debug("Token kind is {} and the token's service name is {}",
+          token.getKind(), token.getService());
       if (AMRMTokenIdentifier.KIND_NAME.equals(token.getKind())
           && checkService(service, token)) {
         return (Token<AMRMTokenIdentifier>) token;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
index 7852b9c..d6ec20b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -326,7 +326,7 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
 
   @Override
   public void write(DataOutput out) throws IOException {
-    LOG.debug("Writing ContainerTokenIdentifier to RPC layer: " + this);
+    LOG.debug("Writing ContainerTokenIdentifier to RPC layer: {}", this);
     out.write(proto.toByteArray());
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java
index 65c59ac..a6d094f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenSelector.java
@@ -45,10 +45,8 @@ public class ContainerTokenSelector implements
       return null;
     }
     for (Token<? extends TokenIdentifier> token : tokens) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Looking for service: " + service + ". Current token is "
-            + token);
-      }
+      LOG.debug("Looking for service: {}. Current token is {}", service,
+          token);
       if (ContainerTokenIdentifier.KIND.equals(token.getKind()) && 
           service.equals(token.getService())) {
         return (Token<ContainerTokenIdentifier>) token;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenIdentifier.java
index 1a7323f..7e87c88 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenIdentifier.java
@@ -98,7 +98,7 @@ public class NMTokenIdentifier extends TokenIdentifier {
   
   @Override
   public void write(DataOutput out) throws IOException {
-    LOG.debug("Writing NMTokenIdentifier to RPC layer: " + this);
+    LOG.debug("Writing NMTokenIdentifier to RPC layer: {}", this);
     out.write(proto.toByteArray());
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenSelector.java
index ecc65c5..9eac75a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/NMTokenSelector.java
@@ -41,10 +41,8 @@ public class NMTokenSelector implements
       return null;
     }
     for (Token<? extends TokenIdentifier> token : tokens) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Looking for service: " + service + ". Current token is "
-            + token);
-      }
+      LOG.debug("Looking for service: {}. Current token is {}", service,
+          token);
       if (NMTokenIdentifier.KIND.equals(token.getKind()) && 
           service.equals(token.getService())) {
         return (Token<NMTokenIdentifier>) token;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
index 7f39fa4..d608779 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
@@ -70,7 +70,7 @@ public abstract class YarnAuthorizationProvider {
   public static void destroy() {
     synchronized (YarnAuthorizationProvider.class) {
       if (authorizer != null) {
-        LOG.debug(authorizer.getClass().getName() + " is destroyed.");
+        LOG.debug("{} is destroyed.", authorizer.getClass().getName());
         authorizer = null;
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSelector.java
index 5718965..a515cdd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSelector.java
@@ -39,10 +39,10 @@ public class ClientToAMTokenSelector implements
     if (service == null) {
       return null;
     }
-    LOG.debug("Looking for a token with service " + service.toString());
+    LOG.debug("Looking for a token with service {}", service);
     for (Token<? extends TokenIdentifier> token : tokens) {
-      LOG.debug("Token kind is " + token.getKind().toString()
-          + " and the token's service name is " + token.getService());
+      LOG.debug("Token kind is {} and the token's service name is {}",
+          token.getKind(), token.getService());
       if (ClientToAMTokenIdentifier.KIND_NAME.equals(token.getKind())
           && service.equals(token.getService())) {
         return (Token<ClientToAMTokenIdentifier>) token;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenSelector.java
index cfeb62f..6aeac86 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenSelector.java
@@ -51,10 +51,10 @@ public class RMDelegationTokenSelector implements
     if (service == null) {
       return null;
     }
-    LOG.debug("Looking for a token with service " + service.toString());
+    LOG.debug("Looking for a token with service {}", service);
     for (Token<? extends TokenIdentifier> token : tokens) {
-      LOG.debug("Token kind is " + token.getKind().toString()
-          + " and the token's service name is " + token.getService());
+      LOG.debug("Token kind is {} and the token's service name is {}",
+          token.getKind(), token.getService());
       if (RMDelegationTokenIdentifier.KIND_NAME.equals(token.getKind())
           && checkService(service, token)) {
         return (Token<RMDelegationTokenIdentifier>) token;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/TimelineDelegationTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/TimelineDelegationTokenSelector.java
index b75f288..ee2e0f1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/TimelineDelegationTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/TimelineDelegationTokenSelector.java
@@ -43,14 +43,10 @@ public class TimelineDelegationTokenSelector
     if (service == null) {
       return null;
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Looking for a token with service " + service.toString());
-    }
+    LOG.debug("Looking for a token with service {}", service);
     for (Token<? extends TokenIdentifier> token : tokens) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Token kind is " + token.getKind().toString()
-            + " and the token's service name is " + token.getService());
-      }
+      LOG.debug("Token kind is {} and the token's service name is {}",
+          token.getKind(), token.getService());
       if (TimelineDelegationTokenIdentifier.KIND_NAME.equals(token.getKind())
           && service.equals(token.getService())) {
         return (Token<TimelineDelegationTokenIdentifier>) token;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
index 8cf34e9..952ad77 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
@@ -98,11 +98,8 @@ public class ApplicationACLsManager {
       ApplicationAccessType applicationAccessType, String applicationOwner,
       ApplicationId applicationId) {
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Verifying access-type " + applicationAccessType + " for "
-          + callerUGI + " on application " + applicationId + " owned by "
-          + applicationOwner);
-    }
+    LOG.debug("Verifying access-type {} for {} on application {} owned by {}",
+            applicationAccessType, callerUGI, applicationId, applicationOwner);
 
     String user = callerUGI.getShortUserName();
     if (!areACLsEnabled()) {
@@ -112,21 +109,18 @@ public class ApplicationACLsManager {
     Map<ApplicationAccessType, AccessControlList> acls = this.applicationACLS
         .get(applicationId);
     if (acls == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("ACL not found for application "
-            + applicationId + " owned by "
-            + applicationOwner + ". Using default ["
-            + YarnConfiguration.DEFAULT_YARN_APP_ACL + "]");
-      }
+      LOG.debug("ACL not found for application {} owned by {}."
+          + " Using default [{}]", applicationId, applicationOwner,
+          YarnConfiguration.DEFAULT_YARN_APP_ACL);
     } else {
       AccessControlList applicationACLInMap = acls.get(applicationAccessType);
       if (applicationACLInMap != null) {
         applicationACL = applicationACLInMap;
-      } else if (LOG.isDebugEnabled()) {
-        LOG.debug("ACL not found for access-type " + applicationAccessType
-            + " for application " + applicationId + " owned by "
-            + applicationOwner + ". Using default ["
-            + YarnConfiguration.DEFAULT_YARN_APP_ACL + "]");
+      } else {
+        LOG.debug("ACL not found for access-type {} for application {}"
+            + " owned by {}. Using default [{}]", applicationAccessType,
+            applicationId, applicationOwner,
+            YarnConfiguration.DEFAULT_YARN_APP_ACL);
       }
     }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
index 8ec4deb..ac2ce2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
@@ -141,7 +141,7 @@ public final class DockerClientConfigHandler {
     tokens.rewind();
     if (LOG.isDebugEnabled()) {
       for (Token token : credentials.getAllTokens()) {
-        LOG.debug("Token read from token storage: " + token.toString());
+        LOG.debug("Token read from token storage: {}", token);
       }
     }
     return credentials;
@@ -172,9 +172,7 @@ public final class DockerClientConfigHandler {
           registryUrlNode.put(ti.getRegistryUrl(), registryCredNode);
           registryCredNode.put(CONFIG_AUTH_KEY,
               new String(tk.getPassword(), Charset.forName("UTF-8")));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Prepared token for write: " + tk.toString());
-          }
+          LOG.debug("Prepared token for write: {}", tk);
         }
       }
       if (foundDockerCred) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
index 08a5724..e7369a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/FSDownload.java
@@ -394,12 +394,8 @@ public class FSDownload implements Callable<Path> {
       throw new IOException("Invalid resource", e);
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(String.format("Starting to download %s %s %s",
-          sCopy,
-          resource.getType(),
-          resource.getPattern()));
-    }
+    LOG.debug("Starting to download {} {} {}", sCopy,
+        resource.getType(), resource.getPattern());
 
     final Path destinationTmp = new Path(destDirPath + "_tmp");
     createDir(destinationTmp, cachePerms);
@@ -420,10 +416,8 @@ public class FSDownload implements Callable<Path> {
       changePermissions(dFinal.getFileSystem(conf), dFinal);
       files.rename(destinationTmp, destDirPath, Rename.OVERWRITE);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(String.format("File has been downloaded to %s from %s",
-            new Path(destDirPath, sCopy.getName()), sCopy));
-      }
+      LOG.debug("File has been downloaded to {} from {}",
+          new Path(destDirPath, sCopy.getName()), sCopy);
     } catch (Exception e) {
       try {
         files.delete(destDirPath, true);
@@ -470,9 +464,7 @@ public class FSDownload implements Callable<Path> {
       perm = isDir ? PRIVATE_DIR_PERMS : PRIVATE_FILE_PERMS;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Changing permissions for path " + path + " to perm " + perm);
-    }
+    LOG.debug("Changing permissions for path {} to perm {}", path, perm);
 
     final FsPermission fPerm = perm;
     if (null == userUgi) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 0bfd40b..8bf54b5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -264,7 +264,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
         }
       }
 
-      LOG.debug(this.toString());
+      LOG.debug("{}", this);
 
       if (smapsEnabled) {
         // Update smaps info
@@ -403,13 +403,10 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
               // memory reclaimable by killing the process
               total += info.anonymous;
 
-              if (LOG.isDebugEnabled()) {
-                LOG.debug(" total(" + olderThanAge + "): PID : " + p.getPid()
-                    + ", info : " + info.toString()
-                    + ", total : " + (total * KB_TO_BYTES));
-              }
+              LOG.debug(" total({}): PID : {}, info : {}, total : {}",
+                  olderThanAge, p.getPid(), info, (total * KB_TO_BYTES));
             }
-            LOG.debug(procMemInfo.toString());
+            LOG.debug("{}", procMemInfo);
           }
         }
       }
@@ -468,9 +465,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
   @Override
   public float getCpuUsagePercent() {
     BigInteger processTotalJiffies = getTotalProcessJiffies();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Process " + pid + " jiffies:" + processTotalJiffies);
-    }
+    LOG.debug("Process {} jiffies:{}", pid, processTotalJiffies);
     cpuTimeTracker.updateElapsedJiffies(processTotalJiffies,
         clock.getTime());
     return cpuTimeTracker.getCpuTrackerUsagePercent();
@@ -793,9 +788,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
           if (memInfo.find()) {
             String key = memInfo.group(1).trim();
             String value = memInfo.group(2).replace(KB, "").trim();
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("MemInfo : " + key + " : Value  : " + value);
-            }
+            LOG.debug("MemInfo : {} : Value  : {}", key, value);
 
             if (memoryMappingInfo != null) {
               memoryMappingInfo.setMemInfo(key, value);
@@ -941,9 +934,7 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
       if (info == null) {
         return;
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("setMemInfo : memInfo : " + info);
-      }
+      LOG.debug("setMemInfo : memInfo : {}", info);
       switch (info) {
       case SIZE:
         size = val;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
index d7a92f0..2eb043e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
@@ -133,11 +133,11 @@ public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
             pInfo.cpuTimeMs = Long.parseLong(procInfo[3]);
             allProcs.put(pInfo.pid, pInfo);
           } catch (NumberFormatException nfe) {
-            LOG.debug("Error parsing procInfo." + nfe);
+            LOG.debug("Error parsing procInfo.", nfe);
           }
         } else {
-          LOG.debug("Expected split length of proc info to be "
-              + procInfoSplitCount + ". Got " + procInfo.length);
+          LOG.debug("Expected split length of proc info to be {}. Got {}",
+              procInfoSplitCount, procInfo.length);
         }
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java
index c6399d9..1865492 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java
@@ -102,7 +102,7 @@ public class YarnVersionInfo extends VersionInfo {
   }
   
   public static void main(String[] args) {
-    LOG.debug("version: "+ getVersion());
+    LOG.debug("version: {}", getVersion());
     System.out.println("YARN " + getVersion());
     System.out.println("Subversion " + getUrl() + " -r " + getRevision());
     System.out.println("Compiled by " + getUser() + " on " + getDate());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index 56baa89..8065f40 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -271,7 +271,7 @@ public class TestProcfsBasedProcessTree {
       fReader = new FileReader(pidFileName);
       pidFile = new BufferedReader(fReader);
     } catch (FileNotFoundException f) {
-      LOG.debug("PidFile doesn't exist : " + pidFileName);
+      LOG.debug("PidFile doesn't exist : {}", pidFileName);
       return pid;
     }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/adaptor/DefaultCsiAdaptorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/adaptor/DefaultCsiAdaptorImpl.java
index a203587..26ffae4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/adaptor/DefaultCsiAdaptorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/adaptor/DefaultCsiAdaptorImpl.java
@@ -99,16 +99,12 @@ public class DefaultCsiAdaptorImpl implements CsiAdaptorPlugin {
   @Override
   public NodePublishVolumeResponse nodePublishVolume(
       NodePublishVolumeRequest request) throws YarnException, IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received nodePublishVolume call, request: {}",
-          request.toString());
-    }
+    LOG.debug("Received nodePublishVolume call, request: {}",
+        request);
     Csi.NodePublishVolumeRequest req = ProtoTranslatorFactory
         .getTranslator(NodePublishVolumeRequest.class,
             Csi.NodePublishVolumeRequest.class).convertTo(request);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Translate to CSI proto message: {}", req.toString());
-    }
+    LOG.debug("Translate to CSI proto message: {}", req);
     csiClient.nodePublishVolume(req);
     return NodePublishVolumeResponse.newInstance();
   }
@@ -116,16 +112,12 @@ public class DefaultCsiAdaptorImpl implements CsiAdaptorPlugin {
   @Override
   public NodeUnpublishVolumeResponse nodeUnpublishVolume(
       NodeUnpublishVolumeRequest request) throws YarnException, IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Received nodeUnpublishVolume call, request: {}",
-          request.toString());
-    }
+    LOG.debug("Received nodeUnpublishVolume call, request: {}",
+        request);
     Csi.NodeUnpublishVolumeRequest req = ProtoTranslatorFactory
         .getTranslator(NodeUnpublishVolumeRequest.class,
             Csi.NodeUnpublishVolumeRequest.class).convertTo(request);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Translate to CSI proto message: {}", req.toString());
-    }
+    LOG.debug("Translate to CSI proto message: {}", req);
     csiClient.nodeUnpublishVolume(req);
     return NodeUnpublishVolumeResponse.newInstance();
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index d94605f..e30d6c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -275,9 +275,7 @@ public class AHSWebServices extends WebServices {
         try {
           nodeHttpAddress = getNMWebAddressFromRM(conf, nmId);
         } catch (Exception ex) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(ex.getMessage());
-          }
+          LOG.debug("{}", ex);
         }
       }
       if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {
@@ -420,9 +418,7 @@ public class AHSWebServices extends WebServices {
         try {
           nodeHttpAddress = getNMWebAddressFromRM(conf, nmId);
         } catch (Exception ex) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(ex.getMessage());
-          }
+          LOG.debug("{}", ex);
         }
       }
       if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
index c9ce936..5d1a81a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java
@@ -1424,9 +1424,7 @@ public class LeveldbTimelineStore extends AbstractService
 
       writeBatch = db.createWriteBatch();
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Deleting entity type:" + entityType + " id:" + entityId);
-      }
+      LOG.debug("Deleting entity type:{} id:{}", entityType, entityId);
       // remove start time from cache and db
       writeBatch.delete(createStartTimeLookupKey(entityId, entityType));
       EntityIdentifier entityIdentifier =
@@ -1452,11 +1450,8 @@ public class LeveldbTimelineStore extends AbstractService
           Object value = GenericObjectMapper.read(key, kp.getOffset());
           deleteKeysWithPrefix(writeBatch, addPrimaryFilterToKey(name, value,
               deletePrefix), pfIterator);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Deleting entity type:" + entityType + " id:" +
-                entityId + " primary filter entry " + name + " " +
-                value);
-          }
+          LOG.debug("Deleting entity type:{} id:{} primary filter entry {} {}",
+              entityType, entityId, name, value);
         } else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
           kp = new KeyParser(key,
               prefixlen + RELATED_ENTITIES_COLUMN.length);
@@ -1471,11 +1466,9 @@ public class LeveldbTimelineStore extends AbstractService
           }
           writeBatch.delete(createReverseRelatedEntityKey(id, type,
               relatedEntityStartTime, entityId, entityType));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Deleting entity type:" + entityType + " id:" +
-                entityId + " from invisible reverse related entity " +
-                "entry of type:" + type + " id:" + id);
-          }
+          LOG.debug("Deleting entity type:{} id:{} from invisible reverse"
+              + " related entity entry of type:{} id:{}", entityType,
+              entityId, type, id);
         } else if (key[prefixlen] ==
             INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN[0]) {
           kp = new KeyParser(key, prefixlen +
@@ -1491,11 +1484,8 @@ public class LeveldbTimelineStore extends AbstractService
           }
           writeBatch.delete(createRelatedEntityKey(id, type,
               relatedEntityStartTime, entityId, entityType));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Deleting entity type:" + entityType + " id:" +
-                entityId + " from related entity entry of type:" +
-                type + " id:" + id);
-          }
+          LOG.debug("Deleting entity type:{} id:{} from related entity entry"
+              +" of type:{} id:{}", entityType, entityId, type, id);
         }
       }
       WriteOptions writeOptions = new WriteOptions();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index 255547b..9ebcc23 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -413,9 +413,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
       EnumSet<Field> fields) throws IOException {
     Long revStartTime = getStartTimeLong(entityId, entityType);
     if (revStartTime == null) {
-      if ( LOG.isDebugEnabled()) {
-        LOG.debug("Could not find start time for {} {} ", entityType, entityId);
-      }
+      LOG.debug("Could not find start time for {} {} ", entityType, entityId);
       return null;
     }
     byte[] prefix = KeyBuilder.newInstance().add(entityType)
@@ -424,9 +422,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
 
     DB db = entitydb.getDBForStartTime(revStartTime);
     if (db == null) {
-      if ( LOG.isDebugEnabled()) {
-        LOG.debug("Could not find db for {} {} ", entityType, entityId);
-      }
+      LOG.debug("Could not find db for {} {} ", entityType, entityId);
       return null;
     }
     try (DBIterator iterator = db.iterator()) {
@@ -1163,9 +1159,7 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
 
   @Override
   public TimelinePutResponse put(TimelineEntities entities) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Starting put");
-    }
+    LOG.debug("Starting put");
     TimelinePutResponse response = new TimelinePutResponse();
     TreeMap<Long, RollingWriteBatch> entityUpdates =
         new TreeMap<Long, RollingWriteBatch>();
@@ -1199,11 +1193,9 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
         indexRollingWriteBatch.close();
       }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Put " + entityCount + " new leveldb entity entries and "
-          + indexCount + " new leveldb index entries from "
-          + entities.getEntities().size() + " timeline entities");
-    }
+    LOG.debug("Put {} new leveldb entity entries and {} new leveldb index"
+        + " entries from {} timeline entities", entityCount, indexCount,
+        entities.getEntities().size());
     return response;
   }
 
@@ -1521,16 +1513,11 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
 
           // a large delete will hold the lock for too long
           if (batchSize >= writeBatchSize) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Preparing to delete a batch of " + batchSize
-                  + " old start times");
-            }
+            LOG.debug("Preparing to delete a batch of {} old start times",
+                batchSize);
             starttimedb.write(writeBatch);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Deleted batch of " + batchSize
-                  + ". Total start times deleted so far this cycle: "
-                  + startTimesCount);
-            }
+            LOG.debug("Deleted batch of {}. Total start times deleted"
+                + " so far this cycle: {}", batchSize, startTimesCount);
             IOUtils.cleanupWithLogger(LOG, writeBatch);
             writeBatch = starttimedb.createWriteBatch();
             batchSize = 0;
@@ -1538,16 +1525,11 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
         }
         ++totalCount;
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Preparing to delete a batch of " + batchSize
-            + " old start times");
-      }
+      LOG.debug("Preparing to delete a batch of {} old start times",
+          batchSize);
       starttimedb.write(writeBatch);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Deleted batch of " + batchSize
-            + ". Total start times deleted so far this cycle: "
-            + startTimesCount);
-      }
+      LOG.debug("Deleted batch of {}. Total start times deleted so far"
+          + " this cycle: {}", batchSize, startTimesCount);
       LOG.info("Deleted " + startTimesCount + "/" + totalCount
           + " start time entities earlier than " + minStartTime);
     } finally {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
index 6c32eec..47f075f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineACLsManager.java
@@ -127,12 +127,9 @@ public class TimelineACLsManager {
     String owner = aclExt.owner;
     AccessControlList domainACL = aclExt.acls.get(applicationAccessType);
     if (domainACL == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("ACL not found for access-type " + applicationAccessType
-            + " for domain " + entity.getDomainId() + " owned by "
-            + owner + ". Using default ["
-            + YarnConfiguration.DEFAULT_YARN_APP_ACL + "]");
-      }
+      LOG.debug("ACL not found for access-type {} for domain {} owned by {}."
+          + " Using default [{}]", applicationAccessType,
+          entity.getDomainId(), owner, YarnConfiguration.DEFAULT_YARN_APP_ACL);
       domainACL =
           new AccessControlList(YarnConfiguration.DEFAULT_YARN_APP_ACL);
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java
index 85d8cca..3d4d3c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java
@@ -139,9 +139,7 @@ public class TimelineV1DelegationTokenSecretManagerService extends
 
     @Override
     protected void storeNewMasterKey(DelegationKey key) throws IOException {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing master key " + key.getKeyId());
-      }
+      LOG.debug("Storing master key {}", key.getKeyId());
       try {
         if (stateStore != null) {
           stateStore.storeTokenMasterKey(key);
@@ -153,9 +151,7 @@ public class TimelineV1DelegationTokenSecretManagerService extends
 
     @Override
     protected void removeStoredMasterKey(DelegationKey key) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Removing master key " + key.getKeyId());
-      }
+      LOG.debug("Removing master key {}", key.getKeyId());
       try {
         if (stateStore != null) {
           stateStore.removeTokenMasterKey(key);
@@ -168,9 +164,7 @@ public class TimelineV1DelegationTokenSecretManagerService extends
     @Override
     protected void storeNewToken(TimelineDelegationTokenIdentifier tokenId,
         long renewDate) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing token " + tokenId.getSequenceNumber());
-      }
+      LOG.debug("Storing token {}", tokenId.getSequenceNumber());
       try {
         if (stateStore != null) {
           stateStore.storeToken(tokenId, renewDate);
@@ -183,9 +177,7 @@ public class TimelineV1DelegationTokenSecretManagerService extends
     @Override
     protected void removeStoredToken(TimelineDelegationTokenIdentifier tokenId)
         throws IOException {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing token " + tokenId.getSequenceNumber());
-      }
+      LOG.debug("Storing token {}", tokenId.getSequenceNumber());
       try {
         if (stateStore != null) {
           stateStore.removeToken(tokenId);
@@ -198,9 +190,7 @@ public class TimelineV1DelegationTokenSecretManagerService extends
     @Override
     protected void updateStoredToken(TimelineDelegationTokenIdentifier tokenId,
         long renewDate) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Updating token " + tokenId.getSequenceNumber());
-      }
+      LOG.debug("Updating token {}", tokenId.getSequenceNumber());
       try {
         if (stateStore != null) {
           stateStore.updateToken(tokenId, renewDate);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
index 1534354..7e8addd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
@@ -105,11 +105,9 @@ public class AMHeartbeatRequestHandler extends Thread {
         if (request == null) {
           throw new YarnException("Null allocateRequest from requestInfo");
         }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Sending Heartbeat to RM. AskList:"
-              + ((request.getAskList() == null) ? " empty"
-                  : request.getAskList().size()));
-        }
+        LOG.debug("Sending Heartbeat to RM. AskList:{}",
+            ((request.getAskList() == null) ? " empty" :
+            request.getAskList().size()));
 
         request.setResponseId(lastResponseId);
         AllocateResponse response = rmProxyRelayer.allocate(request);
@@ -125,20 +123,16 @@ public class AMHeartbeatRequestHandler extends Thread {
               userUgi, conf);
         }
 
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Received Heartbeat reply from RM. Allocated Containers:"
-              + ((response.getAllocatedContainers() == null) ? " empty"
-                  : response.getAllocatedContainers().size()));
-        }
+        LOG.debug("Received Heartbeat reply from RM. Allocated Containers:{}",
+            ((response.getAllocatedContainers() == null) ? " empty"
+            : response.getAllocatedContainers().size()));
 
         if (requestInfo.getCallback() == null) {
           throw new YarnException("Null callback from requestInfo");
         }
         requestInfo.getCallback().callback(response);
       } catch (InterruptedException ex) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Interrupted while waiting for queue", ex);
-        }
+        LOG.debug("Interrupted while waiting for queue", ex);
       } catch (Throwable ex) {
         LOG.warn(
             "Error occurred while processing heart beat for " + applicationId,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
index 47d23e0..75f0cee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
@@ -265,11 +265,8 @@ public class LocalityMulticastAMRMProxyPolicy extends AbstractAMRMProxyPolicy {
       // any cluster. Pick a random sub-cluster from active and enabled ones.
       targetId = getSubClusterForUnResolvedRequest(bookkeeper,
           rr.getAllocationRequestId());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("ERROR resolving sub-cluster for resourceName: "
-            + rr.getResourceName() + ", picked a random subcluster to forward:"
-            + targetId);
-      }
+      LOG.debug("ERROR resolving sub-cluster for resourceName: {}, picked a "
+          + "random subcluster to forward:{}", rr.getResourceName(), targetId);
       if (targetIds != null && targetIds.size() > 0) {
         bookkeeper.addRackRR(targetId, rr);
       } else {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
index 273118a..07dc7e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
@@ -436,10 +436,8 @@ public class SQLFederationStateStore implements FederationStateStore {
             "SubCluster " + subClusterId.toString() + " does not exist";
         FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Got the information about the specified SubCluster "
-            + subClusterInfo.toString());
-      }
+      LOG.debug("Got the information about the specified SubCluster {}",
+          subClusterInfo);
     } catch (SQLException e) {
       FederationStateStoreClientMetrics.failedStateStoreCall();
       FederationStateStoreUtils.logAndThrowRetriableException(LOG,
@@ -700,10 +698,8 @@ public class SQLFederationStateStore implements FederationStateStore {
         FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Got the information about the specified application  "
-            + request.getApplicationId() + ". The AM is running in " + homeRM);
-      }
+      LOG.debug("Got the information about the specified application {}."
+          + " The AM is running in {}", request.getApplicationId(), homeRM);
 
       FederationStateStoreClientMetrics
           .succeededStateStoreCall(stopTime - startTime);
@@ -852,10 +848,8 @@ public class SQLFederationStateStore implements FederationStateStore {
         subClusterPolicyConfiguration =
             SubClusterPolicyConfiguration.newInstance(request.getQueue(),
                 cstmt.getString(2), ByteBuffer.wrap(cstmt.getBytes(3)));
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Selected from StateStore the policy for the queue: "
-              + subClusterPolicyConfiguration.toString());
-        }
+        LOG.debug("Selected from StateStore the policy for the queue: {}",
+            subClusterPolicyConfiguration);
       } else {
         LOG.warn("Policy for queue: {} does not exist.", request.getQueue());
         return null;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java
index 420c899..a5590fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java
@@ -112,11 +112,9 @@ public class BaseContainerTokenSecretManager extends
   protected byte[] retrievePasswordInternal(ContainerTokenIdentifier identifier,
       MasterKeyData masterKey)
       throws org.apache.hadoop.security.token.SecretManager.InvalidToken {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Retrieving password for {} for user {} to be run on NM {}",
-          identifier.getContainerID(), identifier.getUser(),
-          identifier.getNmHostAddress());
-    }
+    LOG.debug("Retrieving password for {} for user {} to be run on NM {}",
+        identifier.getContainerID(), identifier.getUser(),
+        identifier.getNmHostAddress());
     return createPassword(identifier.getBytes(), masterKey.getSecretKey());
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
index cc1d21f..47d7830 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
@@ -225,12 +225,12 @@ public class UnmanagedApplicationManager {
     this.heartbeatHandler.resetLastResponseId();
 
     for (Container container : response.getContainersFromPreviousAttempts()) {
-      LOG.debug("RegisterUAM returned existing running container "
-          + container.getId());
+      LOG.debug("RegisterUAM returned existing running container {}",
+          container.getId());
     }
     for (NMToken nmToken : response.getNMTokensFromPreviousAttempts()) {
-      LOG.debug("RegisterUAM returned existing NM token for node "
-          + nmToken.getNodeId());
+      LOG.debug("RegisterUAM returned existing NM token for node {}",
+          nmToken.getNodeId());
     }
     LOG.info(
         "RegisterUAM returned {} existing running container and {} NM tokens",
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/YarnServerSecurityUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/YarnServerSecurityUtils.java
index e61798d..c5ae56f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/YarnServerSecurityUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/YarnServerSecurityUtils.java
@@ -153,7 +153,7 @@ public final class YarnServerSecurityUtils {
       credentials.readTokenStorageStream(buf);
       if (LOG.isDebugEnabled()) {
         for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
-          LOG.debug(tk.getService() + " = " + tk.toString());
+          LOG.debug("{}={}", tk.getService(), tk);
         }
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebService.java
index 246ee9c..b513751 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebService.java
@@ -179,9 +179,7 @@ import java.security.PrivilegedExceptionAction;
           nodeHttpAddress =
               LogWebServiceUtils.getNMWebAddressFromRM(yarnConf, nmId);
         } catch (Exception ex) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(ex.getMessage());
-          }
+          LOG.debug("{}", ex);
         }
       }
       if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {
@@ -384,9 +382,7 @@ import java.security.PrivilegedExceptionAction;
           nodeHttpAddress =
               LogWebServiceUtils.getNMWebAddressFromRM(yarnConf, nmId);
         } catch (Exception ex) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(ex.getMessage());
-          }
+          LOG.debug("{}", ex);
         }
       }
       if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index 90b12e9..f151528 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -573,10 +573,8 @@ public class DefaultContainerExecutor extends ContainerExecutor {
     String user = ctx.getUser();
     String pid = ctx.getPid();
     Signal signal = ctx.getSignal();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sending signal " + signal.getValue() + " to pid " + pid
-          + " as user " + user);
-    }
+    LOG.debug("Sending signal {} to pid {} as user {}",
+        signal.getValue(), pid, user);
     if (!containerIsAlive(pid)) {
       return false;
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index e665c5a..77bc123 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -85,11 +85,8 @@ public class DeletionService extends AbstractService {
 
   public void delete(DeletionTask deletionTask) {
     if (debugDelay != -1) {
-      if (LOG.isDebugEnabled()) {
-        String msg = String.format("Scheduling DeletionTask (delay %d) : %s",
-            debugDelay, deletionTask.toString());
-        LOG.debug(msg);
-      }
+      LOG.debug("Scheduling DeletionTask (delay {}) : {}", debugDelay,
+          deletionTask);
       recordDeletionTaskInStateStore(deletionTask);
       sched.schedule(deletionTask, debugDelay, TimeUnit.SECONDS);
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 600f802..039a510 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -314,12 +314,10 @@ public class LinuxContainerExecutor extends ContainerExecutor {
     try {
       resourceHandlerChain = ResourceHandlerModule
           .getConfiguredResourceHandlerChain(conf, nmContext);
-      if (LOG.isDebugEnabled()) {
-        final boolean enabled = resourceHandlerChain != null;
-        LOG.debug("Resource handler chain enabled = " + enabled);
-      }
+      LOG.debug("Resource handler chain enabled = {}",
+          (resourceHandlerChain != null));
       if (resourceHandlerChain != null) {
-        LOG.debug("Bootstrapping resource handler chain: " +
+        LOG.debug("Bootstrapping resource handler chain: {}",
             resourceHandlerChain);
         resourceHandlerChain.bootstrap(conf);
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index c6719d1..1ed1fda 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -200,11 +200,8 @@ public class NodeManager extends CompositeService
                 + e.getMessage(), e);
       }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Distributed Node Attributes is enabled"
-          + " with provider class as : "
-          + attributesProvider.getClass().toString());
-    }
+    LOG.debug("Distributed Node Attributes is enabled with provider class"
+        + " as : {}", attributesProvider.getClass());
     return attributesProvider;
   }
 
@@ -238,10 +235,8 @@ public class NodeManager extends CompositeService
             "Failed to create NodeLabelsProvider : " + e.getMessage(), e);
       }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Distributed Node Labels is enabled"
-          + " with provider class as : " + provider.getClass().toString());
-    }
+    LOG.debug("Distributed Node Labels is enabled"
+        + " with provider class as : {}", provider.getClass());
     return provider;
   }
 
@@ -617,14 +612,10 @@ public class NodeManager extends CompositeService
           && !ApplicationState.FINISHED.equals(app.getApplicationState())) {
         registeringCollectors.putIfAbsent(entry.getKey(), entry.getValue());
         AppCollectorData data = entry.getValue();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(entry.getKey() + " : " + data.getCollectorAddr() + "@<"
-              + data.getRMIdentifier() + ", " + data.getVersion() + ">");
-        }
+        LOG.debug("{} : {}@<{}, {}>", entry.getKey(), data.getCollectorAddr(),
+            data.getRMIdentifier(), data.getVersion());
       } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Remove collector data for done app " + entry.getKey());
-        }
+        LOG.debug("Remove collector data for done app {}", entry.getKey());
       }
     }
     knownCollectors.clear();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 1f13eb8..8022a07 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -243,10 +243,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
       LOG.error(message);
       throw new YarnException(message);
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS + " :"
-        + durationToTrackStoppedContainers);
-    }
+    LOG.debug("{} :{}", YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS,
+        durationToTrackStoppedContainers);
     super.serviceInit(conf);
     LOG.info("Initialized nodemanager with :" +
         " physical-memory=" + memoryMb + " virtual-memory=" + virtualMemoryMb +
@@ -406,10 +404,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
         List<LogAggregationReport> logAggregationReports =
             context.getNMLogAggregationStatusTracker()
                 .pullCachedLogAggregationReports();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("The cache log aggregation status size:"
-              + logAggregationReports.size());
-        }
+        LOG.debug("The cache log aggregation status size:{}",
+            logAggregationReports.size());
         if (logAggregationReports != null
             && !logAggregationReports.isEmpty()) {
           request.setLogAggregationReportsForApps(logAggregationReports);
@@ -519,10 +515,9 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
     nodeHealthStatus.setIsNodeHealthy(healthChecker.isHealthy());
     nodeHealthStatus.setLastHealthReportTime(healthChecker
       .getLastHealthReportTime());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Node's health-status : " + nodeHealthStatus.getIsNodeHealthy()
-          + ", " + nodeHealthStatus.getHealthReport());
-    }
+    LOG.debug("Node's health-status : {}, {}",
+        nodeHealthStatus.getIsNodeHealthy(),
+        nodeHealthStatus.getHealthReport());
     List<ContainerStatus> containersStatuses = getContainerStatuses();
     ResourceUtilization containersUtilization = getContainersUtilization();
     ResourceUtilization nodeUtilization = getNodeUtilization();
@@ -603,10 +598,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
           container.cloneAndGetContainerStatus();
       if (containerStatus.getState() == ContainerState.COMPLETE) {
         if (isApplicationStopped(applicationId)) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(applicationId + " is completing, " + " remove "
-                + containerId + " from NM context.");
-          }
+          LOG.debug("{} is completing, remove {} from NM context.",
+              applicationId, containerId);
           context.getContainers().remove(containerId);
           pendingCompletedContainers.put(containerId, containerStatus);
         } else {
@@ -624,11 +617,9 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
     }
 
     containerStatuses.addAll(pendingCompletedContainers.values());
+    LOG.debug("Sending out {} container statuses: {}",
+        containerStatuses.size(), containerStatuses);
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sending out " + containerStatuses.size()
-          + " container statuses: " + containerStatuses);
-    }
     return containerStatuses;
   }
 
@@ -815,8 +806,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
     }
     if (LOG.isDebugEnabled()) {
       for (Map.Entry<ApplicationId, Credentials> entry : map.entrySet()) {
-        LOG.debug("Retrieved credentials form RM for " + entry.getKey() + ": "
-            + entry.getValue().getAllTokens());
+        LOG.debug("Retrieved credentials form RM for {}: {}",
+            entry.getKey(), entry.getValue().getAllTokens());
       }
     }
     return map;
@@ -1126,10 +1117,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
         NodeHeartbeatResponse response) {
       if (isValueSented()) {
         if (response.getAreNodeAttributesAcceptedByRM()) {
-          if(LOG.isDebugEnabled()){
-            LOG.debug("Node attributes {" + getPreviousValue()
-                + "} were Accepted by RM ");
-          }
+          LOG.debug("Node attributes {{}} were Accepted by RM ",
+              getPreviousValue());
         } else {
           // case where updated node attributes from NodeAttributesProvider
           // is sent to RM and RM rejected the attributes
@@ -1279,11 +1268,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
         NodeHeartbeatResponse response) {
       if (isValueSented()) {
         if (response.getAreNodeLabelsAcceptedByRM()) {
-          if(LOG.isDebugEnabled()){
-            LOG.debug(
-                "Node Labels {" + StringUtils.join(",", getPreviousValue())
-                    + "} were Accepted by RM ");
-          }
+          LOG.debug("Node Labels {{}} were Accepted by RM",
+              StringUtils.join(",", getPreviousValue()));
         } else {
           // case where updated labels from NodeLabelsProvider is sent to RM and
           // RM rejected the labels
@@ -1410,10 +1396,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
           Resource newResource = response.getResource();
           if (newResource != null) {
             updateNMResource(newResource);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Node's resource is updated to " +
-                  newResource.toString());
-            }
+            LOG.debug("Node's resource is updated to {}", newResource);
           }
           if (timelineServiceV2Enabled) {
             updateTimelineCollectorData(response);
@@ -1453,9 +1436,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
       Map<ApplicationId, AppCollectorData> incomingCollectorsMap =
           response.getAppCollectors();
       if (incomingCollectorsMap == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("No collectors to update RM");
-        }
+        LOG.debug("No collectors to update RM");
         return;
       }
       Map<ApplicationId, AppCollectorData> knownCollectors =
@@ -1472,11 +1453,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
           // the known data (updates the known data).
           AppCollectorData existingData = knownCollectors.get(appId);
           if (AppCollectorData.happensBefore(existingData, collectorData)) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Sync a new collector address: "
-                  + collectorData.getCollectorAddr()
-                  + " for application: " + appId + " from RM.");
-            }
+            LOG.debug("Sync a new collector address: {} for application: {}"
+                + " from RM.", collectorData.getCollectorAddr(), appId);
             // Update information for clients.
             NMTimelinePublisher nmTimelinePublisher =
                 context.getNMTimelinePublisher();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 02ff432..52f3313 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -247,11 +247,11 @@ public class AMRMProxyService extends CompositeService implements
         // Retrieve the AM container credentials from NM context
         Credentials amCred = null;
         for (Container container : this.nmContext.getContainers().values()) {
-          LOG.debug("From NM Context container " + container.getContainerId());
+          LOG.debug("From NM Context container {}", container.getContainerId());
           if (container.getContainerId().getApplicationAttemptId().equals(
               attemptId) && container.getContainerTokenIdentifier() != null) {
-            LOG.debug("Container type "
-                + container.getContainerTokenIdentifier().getContainerType());
+            LOG.debug("Container type {}",
+                container.getContainerTokenIdentifier().getContainerType());
             if (container.getContainerTokenIdentifier()
                 .getContainerType() == ContainerType.APPLICATION_MASTER) {
               LOG.info("AM container {} found in context, has credentials: {}",
@@ -764,9 +764,7 @@ public class AMRMProxyService extends CompositeService implements
           AMRMProxyService.this.stopApplication(event.getApplicationID());
           break;
         default:
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("AMRMProxy is ignoring event: " + event.getType());
-          }
+          LOG.debug("AMRMProxy is ignoring event: {}", event.getType());
           break;
         }
       } else {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java
index f36d4da..711682d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java
@@ -248,10 +248,7 @@ public class AMRMProxyTokenSecretManager extends
     try {
       ApplicationAttemptId applicationAttemptId =
           identifier.getApplicationAttemptId();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Trying to retrieve password for "
-            + applicationAttemptId);
-      }
+      LOG.debug("Trying to retrieve password for {}", applicationAttemptId);
       if (!appAttemptSet.contains(applicationAttemptId)) {
         throw new InvalidToken(applicationAttemptId
             + " not found in AMRMProxyTokenSecretManager.");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java
index 3ba4d20..d6deca0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java
@@ -129,9 +129,7 @@ public final class DefaultRequestInterceptor extends
   @Override
   public AllocateResponse allocate(final AllocateRequest request)
       throws YarnException, IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Forwarding allocate request to the real YARN RM");
-    }
+    LOG.debug("Forwarding allocate request to the real YARN RM");
     AllocateResponse allocateResponse = rmClient.allocate(request);
     if (allocateResponse.getAMRMToken() != null) {
       YarnServerSecurityUtils.updateAMRMToken(allocateResponse.getAMRMToken(),
@@ -161,10 +159,8 @@ public final class DefaultRequestInterceptor extends
   public DistributedSchedulingAllocateResponse allocateForDistributedScheduling(
       DistributedSchedulingAllocateRequest request)
       throws YarnException, IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Forwarding allocateForDistributedScheduling request" +
-          "to the real YARN RM");
-    }
+    LOG.debug("Forwarding allocateForDistributedScheduling request" +
+        "to the real YARN RM");
     if (getApplicationContext().getNMCotext()
         .isDistributedSchedulingEnabled()) {
       DistributedSchedulingAllocateResponse allocateResponse =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
index ccbb035..992dc82 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
@@ -401,7 +401,7 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
             amrmToken.decodeFromUrlString(
                 new String(entry.getValue(), STRING_TO_BYTE_FORMAT));
             uamMap.put(scId, amrmToken);
-            LOG.debug("Recovered UAM in " + scId + " from NMSS");
+            LOG.debug("Recovered UAM in {} from NMSS", scId);
           }
         }
         LOG.info("Found {} existing UAMs for application {} in NMStateStore",
@@ -443,8 +443,8 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
               .getContainersFromPreviousAttempts()) {
             containerIdToSubClusterIdMap.put(container.getId(), subClusterId);
             containers++;
-            LOG.debug("  From subcluster " + subClusterId
-                + " running container " + container.getId());
+            LOG.debug("  From subcluster {} running container {}",
+                subClusterId, container.getId());
           }
           LOG.info("Recovered {} running containers from UAM in {}",
               response.getContainersFromPreviousAttempts().size(),
@@ -471,8 +471,8 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
         containerIdToSubClusterIdMap.put(container.getContainerId(),
             this.homeSubClusterId);
         containers++;
-        LOG.debug("  From home RM " + this.homeSubClusterId
-            + " running container " + container.getContainerId());
+        LOG.debug("  From home RM {} running container {}",
+            this.homeSubClusterId, container.getContainerId());
       }
       LOG.info("{} running containers including AM recovered from home RM {}",
           response.getContainerList().size(), this.homeSubClusterId);
@@ -797,10 +797,8 @@ public class FederationInterceptor extends AbstractRequestInterceptor {
         try {
           Future<FinishApplicationMasterResponseInfo> future = compSvc.take();
           FinishApplicationMasterResponseInfo uamResponse = future.get();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Received finish application response from RM: "
-                + uamResponse.getSubClusterId());
-          }
+          LOG.debug("Received finish application response from RM: {}",
+              uamResponse.getSubClusterId());
           if (uamResponse.getResponse() == null
               || !uamResponse.getResponse().getIsUnregistered()) {
             failedToUnRegister = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
index dd129f5..d61112f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
@@ -52,22 +52,16 @@ public final class NMProtoUtils {
     int taskId = proto.getId();
     if (proto.hasTaskType() && proto.getTaskType() != null) {
       if (proto.getTaskType().equals(DeletionTaskType.FILE.name())) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Converting recovered FileDeletionTask");
-        }
+        LOG.debug("Converting recovered FileDeletionTask");
         return convertProtoToFileDeletionTask(proto, deletionService, taskId);
       } else if (proto.getTaskType().equals(
           DeletionTaskType.DOCKER_CONTAINER.name())) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Converting recovered DockerContainerDeletionTask");
-        }
+        LOG.debug("Converting recovered DockerContainerDeletionTask");
         return convertProtoToDockerContainerDeletionTask(proto, deletionService,
             taskId);
       }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Unable to get task type, trying FileDeletionTask");
-    }
+    LOG.debug("Unable to get task type, trying FileDeletionTask");
     return convertProtoToFileDeletionTask(proto, deletionService, taskId);
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index 01611e9..74cd84f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -638,9 +638,7 @@ public class AuxServices extends AbstractService
             .getName());
         loadedAuxServices.add(service.getName());
         if (existingService != null && existingService.equals(service)) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Auxiliary service already loaded: " + service.getName());
-          }
+          LOG.debug("Auxiliary service already loaded: {}", service.getName());
           continue;
         }
         foundChanges = true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index f3f5f8b..c43b825 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -368,9 +368,7 @@ public class ContainerManagerImpl extends CompositeService implements
                appsState.getIterator()) {
         while (rasIterator.hasNext()) {
           ContainerManagerApplicationProto proto = rasIterator.next();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Recovering application with state: " + proto.toString());
-          }
+          LOG.debug("Recovering application with state: {}", proto);
           recoverApplication(proto);
         }
       }
@@ -379,9 +377,7 @@ public class ContainerManagerImpl extends CompositeService implements
                stateStore.getContainerStateIterator()) {
         while (rcsIterator.hasNext()) {
           RecoveredContainerState rcs = rcsIterator.next();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Recovering container with state: " + rcs);
-          }
+          LOG.debug("Recovering container with state: {}", rcs);
           recoverContainer(rcs);
         }
       }
@@ -428,20 +424,16 @@ public class ContainerManagerImpl extends CompositeService implements
       FlowContextProto fcp = p.getFlowContext();
       fc = new FlowContext(fcp.getFlowName(), fcp.getFlowVersion(),
           fcp.getFlowRunId());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Recovering Flow context: " + fc + " for an application " + appId);
-      }
+      LOG.debug(
+          "Recovering Flow context: {} for an application {}", fc, appId);
     } else {
       // in upgrade situations, where there is no prior existing flow context,
       // default would be used.
       fc = new FlowContext(TimelineUtils.generateDefaultFlowName(null, appId),
           YarnConfiguration.DEFAULT_FLOW_VERSION, appId.getClusterTimestamp());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "No prior existing flow context found. Using default Flow context: "
-                + fc + " for an application " + appId);
-      }
+      LOG.debug(
+          "No prior existing flow context found. Using default Flow context: "
+          + "{} for an application {}", fc, appId);
     }
 
     LOG.info("Recovering application " + appId);
@@ -1206,11 +1198,8 @@ public class ContainerManagerImpl extends CompositeService implements
         flowRunId = Long.parseLong(flowRunIdStr);
       }
       flowContext = new FlowContext(flowName, flowVersion, flowRunId);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Flow context: " + flowContext + " created for an application "
-                + applicationID);
-      }
+      LOG.debug("Flow context: {} created for an application {}",
+          flowContext, applicationID);
     }
     return flowContext;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
index 5f02e33..f3d4e51 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -639,10 +639,7 @@ public class ApplicationImpl implements Application {
 
     try {
       ApplicationId applicationID = event.getApplicationID();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Processing " + applicationID + " of type " + event.getType());
-      }
+      LOG.debug("Processing {} of type {}", applicationID, event.getType());
       ApplicationState oldState = stateMachine.getCurrentState();
       ApplicationState newState = null;
       try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index d25206c..00e6aa7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -2110,9 +2110,7 @@ public class ContainerImpl implements Container {
     this.writeLock.lock();
     try {
       ContainerId containerID = event.getContainerID();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Processing " + containerID + " of type " + event.getType());
-      }
+      LOG.debug("Processing {} of type {}", containerID, event.getType());
       ContainerState oldState = stateMachine.getCurrentState();
       ContainerState newState = null;
       try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DockerContainerDeletionTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DockerContainerDeletionTask.java
index 70b918a..8882b45 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DockerContainerDeletionTask.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DockerContainerDeletionTask.java
@@ -52,10 +52,7 @@ public class DockerContainerDeletionTask extends DeletionTask
    */
   @Override
   public void run() {
-    if (LOG.isDebugEnabled()) {
-      String msg = String.format("Running DeletionTask : %s", toString());
-      LOG.debug(msg);
-    }
+    LOG.debug("Running DeletionTask : {}", this);
     LinuxContainerExecutor exec = ((LinuxContainerExecutor)
         getDeletionService().getContainerExecutor());
     exec.removeDockerContainer(containerId);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java
index fd07f16..a8aab72 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java
@@ -95,16 +95,11 @@ public class FileDeletionTask extends DeletionTask implements Runnable {
    */
   @Override
   public void run() {
-    if (LOG.isDebugEnabled()) {
-      String msg = String.format("Running DeletionTask : %s", toString());
-      LOG.debug(msg);
-    }
+    LOG.debug("Running DeletionTask : {}", this);
     boolean error = false;
     if (null == getUser()) {
       if (baseDirs == null || baseDirs.size() == 0) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("NM deleting absolute path : " + subDir);
-        }
+        LOG.debug("NM deleting absolute path : {}", subDir);
         try {
           lfs.delete(subDir, true);
         } catch (IOException e) {
@@ -114,9 +109,7 @@ public class FileDeletionTask extends DeletionTask implements Runnable {
       } else {
         for (Path baseDir : baseDirs) {
           Path del = subDir == null? baseDir : new Path(baseDir, subDir);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("NM deleting path : " + del);
-          }
+          LOG.debug("NM deleting path : {}", del);
           try {
             lfs.delete(del, true);
           } catch (IOException e) {
@@ -127,10 +120,7 @@ public class FileDeletionTask extends DeletionTask implements Runnable {
       }
     } else {
       try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "Deleting path: [" + subDir + "] as user: [" + getUser() + "]");
-        }
+        LOG.debug("Deleting path: [{}] as user [{}]", subDir, getUser());
         if (baseDirs == null || baseDirs.size() == 0) {
           getDeletionService().getContainerExecutor().deleteAsUser(
               new DeletionAsUserContext.Builder()
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
index 5800ef5..b63becf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java
@@ -102,19 +102,14 @@ public class ContainerCleanup implements Runnable {
           + " No cleanup needed to be done");
       return;
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Marking container " + containerIdStr + " as inactive");
-    }
+    LOG.debug("Marking container {} as inactive", containerIdStr);
     // this should ensure that if the container process has not launched
     // by this time, it will never be launched
     exec.deactivateContainer(containerId);
     Path pidFilePath = launch.getPidFilePath();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Getting pid for container {} to kill"
-              + " from pid file {}", containerIdStr, pidFilePath != null ?
-          pidFilePath : "null");
-    }
-
+    LOG.debug("Getting pid for container {} to kill"
+        + " from pid file {}", containerIdStr, pidFilePath != null ?
+        pidFilePath : "null");
     // however the container process may have already started
     try {
 
@@ -194,20 +189,17 @@ public class ContainerCleanup implements Runnable {
 
   private void signalProcess(String processId, String user,
       String containerIdStr) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sending signal to pid " + processId + " as user " + user
-          + " for container " + containerIdStr);
-    }
+    LOG.debug("Sending signal to pid {} as user {} for container {}",
+        processId, user, containerIdStr);
     final ContainerExecutor.Signal signal =
         sleepDelayBeforeSigKill > 0 ? ContainerExecutor.Signal.TERM :
             ContainerExecutor.Signal.KILL;
 
     boolean result = sendSignal(user, processId, signal);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sent signal " + signal + " to pid " + processId + " as user "
-          + user + " for container " + containerIdStr + ", result="
-          + (result ? "success" : "failed"));
-    }
+    LOG.debug("Sent signal {} to pid {} as user {} for container {},"
+        + " result={}", signal, processId, user, containerIdStr,
+        (result ? "success" : "failed"));
+
     if (sleepDelayBeforeSigKill > 0) {
       new ContainerExecutor.DelayedProcessKiller(container, user, processId,
           sleepDelayBeforeSigKill, ContainerExecutor.Signal.KILL, exec).start();
@@ -232,9 +224,7 @@ public class ContainerCleanup implements Runnable {
             .setContainer(container)
             .setUser(container.getUser())
             .build());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sent signal to docker container " + containerIdStr
-          + " as user " + user + ", result=" + (result ? "success" : "failed"));
-    }
+    LOG.debug("Sent signal to docker container {} as user {}, result={}",
+        containerIdStr, user, (result ? "success" : "failed"));
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 9b6fae7..8516543 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -647,11 +647,8 @@ public class ContainerLaunch implements Callable<Integer> {
 
   protected void handleContainerExitCode(int exitCode, Path containerLogDir) {
     ContainerId containerId = container.getContainerId();
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Container " + containerId + " completed with exit code "
-          + exitCode);
-    }
+    LOG.debug("Container {} completed with exit code {}", containerId,
+        exitCode);
 
     StringBuilder diagnosticInfo =
         new StringBuilder("Container exited with a non-zero exit code ");
@@ -840,22 +837,17 @@ public class ContainerLaunch implements Callable<Integer> {
       return;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Getting pid for container " + containerIdStr
-          + " to send signal to from pid file "
-          + (pidFilePath != null ? pidFilePath.toString() : "null"));
-    }
+    LOG.debug("Getting pid for container {} to send signal to from pid"
+        + " file {}", containerIdStr,
+        (pidFilePath != null ? pidFilePath.toString() : "null"));
 
     try {
       // get process id from pid file if available
       // else if shell is still active, get it from the shell
       String processId = getContainerPid();
       if (processId != null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Sending signal to pid " + processId
-              + " as user " + user
-              + " for container " + containerIdStr);
-        }
+        LOG.debug("Sending signal to pid {} as user {} for container {}",
+            processId, user, containerIdStr);
 
         boolean result = exec.signalContainer(
             new ContainerSignalContext.Builder()
@@ -1013,10 +1005,8 @@ public class ContainerLaunch implements Callable<Integer> {
     String containerIdStr = 
         container.getContainerId().toString();
     String processId;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Accessing pid for container " + containerIdStr
-          + " from pid file " + pidFilePath);
-    }
+    LOG.debug("Accessing pid for container {} from pid file {}",
+        containerIdStr, pidFilePath);
     int sleepCounter = 0;
     final int sleepInterval = 100;
 
@@ -1025,10 +1015,7 @@ public class ContainerLaunch implements Callable<Integer> {
     while (true) {
       processId = ProcessIdFileReader.getProcessId(pidFilePath);
       if (processId != null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "Got pid " + processId + " for container " + containerIdStr);
-        }
+        LOG.debug("Got pid {} for container {}", processId, containerIdStr);
         break;
       }
       else if ((sleepCounter*sleepInterval) > maxKillWaitTime) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index 4fa6c02..20e0fc1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -464,10 +464,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   public String createCGroup(CGroupController controller, String cGroupId)
       throws ResourceHandlerException {
     String path = getPathForCGroup(controller, cGroupId);
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("createCgroup: " + path);
-    }
+    LOG.debug("createCgroup: {}", path);
 
     if (!new File(path).mkdir()) {
       throw new ResourceHandlerException("Failed to create cgroup at " + path);
@@ -487,7 +484,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
               + "/tasks"), "UTF-8"))) {
         str = inl.readLine();
         if (str != null) {
-          LOG.debug("First line in cgroup tasks file: " + cgf + " " + str);
+          LOG.debug("First line in cgroup tasks file: {} {}", cgf, str);
         }
       } catch (IOException e) {
         LOG.warn("Failed to read cgroup tasks file. ", e);
@@ -537,9 +534,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
     boolean deleted = false;
     String cGroupPath = getPathForCGroup(controller, cGroupId);
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("deleteCGroup: " + cGroupPath);
-    }
+    LOG.debug("deleteCGroup: {}", cGroupPath);
 
     long start = clock.getTime();
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java
index 9ef89f3..082eb09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkPacketTaggingHandlerImpl.java
@@ -153,9 +153,7 @@ public class NetworkPacketTaggingHandlerImpl
   @Override
   public List<PrivilegedOperation> teardown()
       throws ResourceHandlerException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("teardown(): Nothing to do");
-    }
+    LOG.debug("teardown(): Nothing to do");
 
     return null;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
index c2f0402..2019417 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourceHandlerModule.java
@@ -84,9 +84,7 @@ public class ResourceHandlerModule {
         if (cGroupsHandler == null) {
           cGroupsHandler = new CGroupsHandlerImpl(conf,
               PrivilegedOperationExecutor.getInstance(conf));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Value of CGroupsHandler is: " + cGroupsHandler);
-          }
+          LOG.debug("Value of CGroupsHandler is: {}", cGroupsHandler);
         }
       }
     }
@@ -318,16 +316,12 @@ public class ResourceHandlerModule {
 
     Map<String, ResourcePlugin> pluginMap = pluginManager.getNameToPlugins();
     if (pluginMap == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("List of plugins of ResourcePluginManager was empty " +
-            "while trying to add ResourceHandlers from configuration!");
-      }
+      LOG.debug("List of plugins of ResourcePluginManager was empty " +
+          "while trying to add ResourceHandlers from configuration!");
       return;
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("List of plugins of ResourcePluginManager: " +
-            pluginManager.getNameToPlugins());
-      }
+      LOG.debug("List of plugins of ResourcePluginManager: {}",
+          pluginManager.getNameToPlugins());
     }
 
     for (ResourcePlugin plugin : pluginMap.values()) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
index d1dac4b..efe9db3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficControlBandwidthHandlerImpl.java
@@ -185,10 +185,8 @@ public class TrafficControlBandwidthHandlerImpl
       throws ResourceHandlerException {
     String containerIdStr = containerId.toString();
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Attempting to reacquire classId for container: " +
-          containerIdStr);
-    }
+    LOG.debug("Attempting to reacquire classId for container: {}",
+        containerIdStr);
 
     String classIdStrFromFile = cGroupsHandler.getCGroupParam(
         CGroupsHandler.CGroupController.NET_CLS, containerIdStr,
@@ -277,9 +275,7 @@ public class TrafficControlBandwidthHandlerImpl
   @Override
   public List<PrivilegedOperation> teardown()
       throws ResourceHandlerException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("teardown(): Nothing to do");
-    }
+    LOG.debug("teardown(): Nothing to do");
 
     return null;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
index 83db5fc..b171ed0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TrafficController.java
@@ -222,9 +222,7 @@ import java.util.regex.Pattern;
       Pattern pattern = Pattern.compile(regex, Pattern.MULTILINE);
 
       if (pattern.matcher(state).find()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Matched regex: " + regex);
-        }
+        LOG.debug("Matched regex: {}", regex);
       } else {
         String logLine = new StringBuffer("Failed to match regex: ")
               .append(regex).append(" Current state: ").append(state).toString();
@@ -258,9 +256,7 @@ import java.util.regex.Pattern;
       String output =
           privilegedOperationExecutor.executePrivilegedOperation(op, true);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("TC state: %n" + output);
-      }
+      LOG.debug("TC state: {}" + output);
 
       return output;
     } catch (PrivilegedOperationException e) {
@@ -332,15 +328,11 @@ import java.util.regex.Pattern;
       String output =
           privilegedOperationExecutor.executePrivilegedOperation(op, true);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("TC stats output:" + output);
-      }
+      LOG.debug("TC stats output:{}", output);
 
       Map<Integer, Integer> classIdBytesStats = parseStatsString(output);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("classId -> bytes sent %n" + classIdBytesStats);
-      }
+      LOG.debug("classId -> bytes sent {}", classIdBytesStats);
 
       return classIdBytesStats;
     } catch (PrivilegedOperationException e) {
@@ -467,9 +459,7 @@ import java.util.regex.Pattern;
     //e.g 4325381 -> 00420005
     String classIdStr = String.format("%08x", Integer.parseInt(input));
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("ClassId hex string : " + classIdStr);
-    }
+    LOG.debug("ClassId hex string : {}", classIdStr);
 
     //extract and return 4 digits
     //e.g 00420005 -> 0005
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
index 9842c38..acbfe9c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
@@ -129,10 +129,8 @@ public class DelegatingLinuxContainerRuntime implements LinuxContainerRuntime {
       }
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Using container runtime: " + runtime.getClass()
+    LOG.debug("Using container runtime: {}", runtime.getClass()
           .getSimpleName());
-    }
 
     return runtime;
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index f1da846..384bc5e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -511,11 +511,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
             + ", please check error message in log to understand "
             + "why this happens.";
     LOG.error(message);
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("All docker volumes in the system, command="
-          + dockerVolumeInspectCommand.toString());
-    }
+    LOG.debug("All docker volumes in the system, command={}",
+        dockerVolumeInspectCommand);
 
     throw new ContainerExecutionException(message);
   }
@@ -630,30 +627,22 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   protected void addCGroupParentIfRequired(String resourcesOptions,
       String containerIdStr, DockerRunCommand runCommand) {
     if (cGroupsHandler == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("cGroupsHandler is null. cgroups are not in use. nothing to"
+      LOG.debug("cGroupsHandler is null. cgroups are not in use. nothing to"
             + " do.");
-      }
       return;
     }
 
     if (resourcesOptions.equals(PrivilegedOperation.CGROUP_ARG_PREFIX
             + PrivilegedOperation.CGROUP_ARG_NO_TASKS)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("no resource restrictions specified. not using docker's "
-            + "cgroup options");
-      }
+      LOG.debug("no resource restrictions specified. not using docker's "
+          + "cgroup options");
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("using docker's cgroups options");
-      }
+      LOG.debug("using docker's cgroups options");
 
       String cGroupPath = "/"
           + cGroupsHandler.getRelativePathForCGroup(containerIdStr);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("using cgroup parent: " + cGroupPath);
-      }
+      LOG.debug("using cgroup parent: {}", cGroupPath);
 
       runCommand.setCGroupParent(cGroupPath);
     }
@@ -1368,9 +1357,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     if (tcCommandFile != null) {
       launchOp.appendArgs(tcCommandFile);
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Launching container with cmd: " + command);
-    }
+    LOG.debug("Launching container with cmd: {}", command);
 
     return launchOp;
   }
@@ -1391,8 +1378,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       throws ContainerExecutionException {
     long start = System.currentTimeMillis();
     DockerPullCommand dockerPullCommand = new DockerPullCommand(imageName);
-    LOG.debug("now pulling docker image." + " image name: " + imageName + ","
-        + " container: " + containerIdStr);
+    LOG.debug("now pulling docker image. image name: {}, container: {}",
+        imageName, containerIdStr);
 
     DockerCommandExecutor.executeDockerCommand(dockerPullCommand,
         containerIdStr, null,
@@ -1400,10 +1387,9 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
 
     long end = System.currentTimeMillis();
     long pullImageTimeMs = end - start;
-    LOG.debug("pull docker image done with "
-        + String.valueOf(pullImageTimeMs) + "ms spent."
-        + " image name: " + imageName + ","
-        + " container: " + containerIdStr);
+
+    LOG.debug("pull docker image done with {}ms specnt. image name: {},"
+        + " container: {}", pullImageTimeMs, imageName, containerIdStr);
   }
 
   private void executeLivelinessCheck(ContainerRuntimeContext ctx)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
index f449f73..18f6c05 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
@@ -83,9 +83,8 @@ public final class DockerCommandExecutor {
     if (disableFailureLogging) {
       dockerOp.disableFailureLogging();
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Running docker command: " + dockerCommand);
-    }
+    LOG.debug("Running docker command: {}", dockerCommand);
+
     try {
       String result = privilegedOperationExecutor
           .executePrivilegedOperation(null, dockerOp, null,
@@ -118,17 +117,13 @@ public final class DockerCommandExecutor {
           privilegedOperationExecutor, nmContext);
       DockerContainerStatus dockerContainerStatus = parseContainerStatus(
           currentContainerStatus);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Container Status: " + dockerContainerStatus.getName()
-            + " ContainerId: " + containerId);
-      }
+      LOG.debug("Container Status: {} ContainerId: {}",
+          dockerContainerStatus.getName(), containerId);
+
       return dockerContainerStatus;
     } catch (ContainerExecutionException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Container Status: "
-            + DockerContainerStatus.NONEXISTENT.getName()
-            + " ContainerId: " + containerId);
-      }
+      LOG.debug("Container Status: {} ContainerId: {}",
+          DockerContainerStatus.NONEXISTENT.getName(), containerId);
       return DockerContainerStatus.NONEXISTENT;
     }
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
index 25990d6..279efd0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalizedResource.java
@@ -190,9 +190,7 @@ public class LocalizedResource implements EventHandler<ResourceEvent> {
     this.writeLock.lock();
     try {
       Path resourcePath = event.getLocalResourceRequest().getPath();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Processing " + resourcePath + " of type " + event.getType());
-      }
+      LOG.debug("Processing {} of type {}", resourcePath, event.getType());
       ResourceState oldState = this.stateMachine.getCurrentState();
       ResourceState newState = null;
       try {
@@ -201,11 +199,9 @@ public class LocalizedResource implements EventHandler<ResourceEvent> {
         LOG.warn("Can't handle this event at current state", e);
       }
       if (newState != null && oldState != newState) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Resource " + resourcePath + (localPath != null ?
-              "(->" + localPath + ")": "") + " size : " + getSize()
-              + " transitioned from " + oldState + " to " + newState);
-        }
+        LOG.debug("Resource {}{} size : {} transitioned from {} to {}",
+            resourcePath, (localPath != null ? "(->" + localPath + ")": ""),
+            getSize(), oldState, newState);
       }
     } finally {
       this.writeLock.unlock();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 8944ba9..0494c2d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -345,10 +345,8 @@ public class ResourceLocalizationService extends CompositeService
         LocalizedResourceProto proto = it.next();
         LocalResource rsrc = new LocalResourcePBImpl(proto.getResource());
         LocalResourceRequest req = new LocalResourceRequest(rsrc);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Recovering localized resource " + req + " at "
-              + proto.getLocalPath());
-        }
+        LOG.debug("Recovering localized resource {} at {}",
+            req, proto.getLocalPath());
         tracker.handle(new ResourceRecoveredEvent(req,
             new Path(proto.getLocalPath()), proto.getSize()));
       }
@@ -514,10 +512,8 @@ public class ResourceLocalizationService extends CompositeService
                   .getApplicationId());
       for (LocalResourceRequest req : e.getValue()) {
         tracker.handle(new ResourceRequestEvent(req, e.getKey(), ctxt));
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Localizing " + req.getPath() +
-              " for container " + c.getContainerId());
-        }
+        LOG.debug("Localizing {} for container {}",
+            req.getPath(), c.getContainerId());
       }
     }
   }
@@ -930,17 +926,13 @@ public class ResourceLocalizationService extends CompositeService
                 + " Either queue is full or threadpool is shutdown.", re);
           }
         } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Skip downloading resource: " + key + " since it's in"
-                + " state: " + rsrc.getState());
-          }
+          LOG.debug("Skip downloading resource: {} since it's in"
+                + " state: {}", key, rsrc.getState());
           rsrc.unlock();
         }
       } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Skip downloading resource: " + key + " since it is locked"
-              + " by other threads");
-        }
+        LOG.debug("Skip downloading resource: {} since it is locked"
+              + " by other threads", key);
       }
     }
 
@@ -1302,10 +1294,10 @@ public class ResourceLocalizationService extends CompositeService
       if (systemCredentials == null) {
         return null;
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding new framework-token for " + appId
-            + " for localization: " + systemCredentials.getAllTokens());
-      }
+
+      LOG.debug("Adding new framework-token for {} for localization: {}",
+          appId, systemCredentials.getAllTokens());
+
       return systemCredentials;
     }
     
@@ -1328,11 +1320,10 @@ public class ResourceLocalizationService extends CompositeService
         LOG.info("Writing credentials to the nmPrivate file "
             + nmPrivateCTokensPath.toString());
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Credentials list in " + nmPrivateCTokensPath.toString()
-              + ": ");
+          LOG.debug("Credentials list in {}: " + nmPrivateCTokensPath);
           for (Token<? extends TokenIdentifier> tk : credentials
               .getAllTokens()) {
-            LOG.debug(tk + " : " + buildTokenFingerprint(tk));
+            LOG.debug("{} : {}", tk, buildTokenFingerprint(tk));
           }
         }
         if (UserGroupInformation.isSecurityEnabled()) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java
index 89787af..81723fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerTokenSelector.java
@@ -41,9 +41,7 @@ public class LocalizerTokenSelector implements
     LOG.debug("Using localizerTokenSelector.");
 
     for (Token<? extends TokenIdentifier> token : tokens) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Token of kind " + token.getKind() + " is found");
-      }
+      LOG.debug("Token of kind {} is found", token.getKind());
       if (LocalizerTokenIdentifier.KIND.equals(token.getKind())) {
         return (Token<LocalizerTokenIdentifier>) token;
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 04503ef..fdac2e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -383,11 +383,9 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
       Credentials systemCredentials =
           context.getSystemCredentialsForApps().get(appId);
       if (systemCredentials != null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding new framework-token for " + appId
-              + " for log-aggregation: " + systemCredentials.getAllTokens()
-              + "; userUgi=" + userUgi);
-        }
+        LOG.debug("Adding new framework-token for {} for log-aggregation:"
+            + " {}; userUgi={}", appId, systemCredentials.getAllTokens(),
+            userUgi);
         // this will replace old token
         userUgi.addCredentials(systemCredentials);
       }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
index d66aa12..9898f8f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
@@ -132,10 +132,8 @@ public class NonAggregatingLogHandler extends AbstractService implements
         ApplicationId appId = entry.getKey();
         LogDeleterProto proto = entry.getValue();
         long deleteDelayMsec = proto.getDeletionTime() - now;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Scheduling deletion of " + appId + " logs in "
-              + deleteDelayMsec + " msec");
-        }
+        LOG.debug("Scheduling deletion of {} logs in {} msec", appId,
+            deleteDelayMsec);
         LogDeleterRunnable logDeleter =
             new LogDeleterRunnable(proto.getUser(), appId);
         try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 525b598..b46e620 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -468,8 +468,8 @@ public class ContainersMonitorImpl extends AbstractService implements
             tmp.append(p.getPID());
             tmp.append(" ");
           }
-          LOG.debug("Current ProcessTree list : "
-              + tmp.substring(0, tmp.length()) + "]");
+          LOG.debug("Current ProcessTree list : {}",
+              tmp.substring(0, tmp.length()) + "]");
         }
 
         // Temporary structure to calculate the total resource utilization of
@@ -495,10 +495,8 @@ public class ContainersMonitorImpl extends AbstractService implements
             if (pId == null || !isResourceCalculatorAvailable()) {
               continue; // processTree cannot be tracked
             }
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Constructing ProcessTree for : PID = " + pId
-                  + " ContainerId = " + containerId);
-            }
+            LOG.debug("Constructing ProcessTree for : PID = {}"
+                +" ContainerId = {}", pId, containerId);
             ResourceCalculatorProcessTree pTree = ptInfo.getProcessTree();
             pTree.updateProcessTree();    // update process-tree
             long currentVmemUsage = pTree.getVirtualMemorySize();
@@ -536,13 +534,11 @@ public class ContainersMonitorImpl extends AbstractService implements
                 + "while monitoring resource of {}", containerId, e);
           }
         }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Total Resource Usage stats in NM by all containers : "
-              + "Virtual Memory= " + vmemUsageByAllContainers
-              + ", Physical Memory= " + pmemByAllContainers
-              + ", Total CPU usage(% per core)= "
-              + cpuUsagePercentPerCoreByAllContainers);
-        }
+        LOG.debug("Total Resource Usage stats in NM by all containers : "
+            + "Virtual Memory= {}, Physical Memory= {}, "
+            + "Total CPU usage(% per core)= {}", vmemUsageByAllContainers,
+            pmemByAllContainers, cpuUsagePercentPerCoreByAllContainers);
+
 
         // Save the aggregated utilization of the containers
         setContainersUtilization(trackedContainersUtilization);
@@ -587,9 +583,7 @@ public class ContainersMonitorImpl extends AbstractService implements
         if (pId != null) {
           // pId will be null, either if the container is not spawned yet
           // or if the container's pid is removed from ContainerExecutor
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Tracking ProcessTree " + pId + " for the first time");
-          }
+          LOG.debug("Tracking ProcessTree {} for the first time", pId);
           ResourceCalculatorProcessTree pt =
               getResourceCalculatorProcessTree(pId);
           ptInfo.setPid(pId);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nvidia/NvidiaGPUPluginForRuntimeV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nvidia/NvidiaGPUPluginForRuntimeV2.java
index ee3f54b..2ee44b2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nvidia/NvidiaGPUPluginForRuntimeV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/com/nvidia/NvidiaGPUPluginForRuntimeV2.java
@@ -159,9 +159,7 @@ public class NvidiaGPUPluginForRuntimeV2 implements DevicePlugin,
       lastTimeFoundDevices = r;
       return r;
     } catch (IOException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Failed to get output from " + pathOfGpuBinary);
-      }
+      LOG.debug("Failed to get output from {}", pathOfGpuBinary);
       throw new YarnException(e);
     }
   }
@@ -169,10 +167,8 @@ public class NvidiaGPUPluginForRuntimeV2 implements DevicePlugin,
   @Override
   public DeviceRuntimeSpec onDevicesAllocated(Set<Device> allocatedDevices,
       YarnRuntimeType yarnRuntime) throws Exception {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Generating runtime spec for allocated devices: "
-          + allocatedDevices + ", " + yarnRuntime.getName());
-    }
+    LOG.debug("Generating runtime spec for allocated devices: {}, {}",
+        allocatedDevices, yarnRuntime.getName());
     if (yarnRuntime == YarnRuntimeType.RUNTIME_DOCKER) {
       String nvidiaRuntime = "nvidia";
       String nvidiaVisibleDevices = "NVIDIA_VISIBLE_DEVICES";
@@ -201,14 +197,10 @@ public class NvidiaGPUPluginForRuntimeV2 implements DevicePlugin,
     String output = null;
     // output "major:minor" in hex
     try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Get major numbers from /dev/" + devName);
-      }
+      LOG.debug("Get major numbers from /dev/{}", devName);
       output = shellExecutor.getMajorMinorInfo(devName);
       String[] strs = output.trim().split(":");
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("stat output:" + output);
-      }
+      LOG.debug("stat output:{}", output);
       output = Integer.toString(Integer.parseInt(strs[0], 16));
     } catch (IOException e) {
       String msg =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
index 33c39ae..f6a8cff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/IntelFpgaOpenclPlugin.java
@@ -164,10 +164,10 @@ public class IntelFpgaOpenclPlugin implements AbstractFpgaVendorPlugin {
       Shell.ShellCommandExecutor shexec = new Shell.ShellCommandExecutor(
           new String[]{"stat", "-c", "%t:%T", "/dev/" + devName});
       try {
-        LOG.debug("Get FPGA major-minor numbers from /dev/" + devName);
+        LOG.debug("Get FPGA major-minor numbers from /dev/{}", devName);
         shexec.execute();
         String[] strs = shexec.getOutput().trim().split(":");
-        LOG.debug("stat output:" + shexec.getOutput());
+        LOG.debug("stat output:{}", shexec.getOutput());
         output = Integer.parseInt(strs[0], 16) + ":" +
             Integer.parseInt(strs[1], 16);
       } catch (IOException e) {
@@ -192,7 +192,7 @@ public class IntelFpgaOpenclPlugin implements AbstractFpgaVendorPlugin {
             "Failed to execute " + binary + " diagnose, exception message:" + e
                 .getMessage() +", output:" + output + ", continue ...";
         LOG.warn(msg);
-        LOG.debug(shexec.getOutput());
+        LOG.debug("{}", shexec.getOutput());
       }
       return shexec.getOutput();
     }
@@ -241,7 +241,7 @@ public class IntelFpgaOpenclPlugin implements AbstractFpgaVendorPlugin {
 
       if (aocxPath.isPresent()) {
         ipFilePath = aocxPath.get().toUri().toString();
-        LOG.debug("Found: " + ipFilePath);
+        LOG.debug("Found: {}", ipFilePath);
       }
     } else {
       LOG.warn("Localized resource is null!");
@@ -278,7 +278,7 @@ public class IntelFpgaOpenclPlugin implements AbstractFpgaVendorPlugin {
     try {
       shexec.execute();
       if (0 == shexec.getExitCode()) {
-        LOG.debug(shexec.getOutput());
+        LOG.debug("{}", shexec.getOutput());
         LOG.info("Intel aocl program " + ipPath + " to " +
             aclName + " successfully");
       } else {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
index 6cf6a8d..0c55478 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
@@ -129,9 +129,7 @@ public class GpuDiscoverer {
     } catch (IOException e) {
       numOfErrorExecutionSinceLastSucceed++;
       String msg = getErrorMessageOfScriptExecution(e.getMessage());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(msg);
-      }
+      LOG.debug(msg);
       throw new YarnException(msg, e);
     } catch (YarnException e) {
       numOfErrorExecutionSinceLastSucceed++;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java
index 4343b45..100676d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java
@@ -118,11 +118,9 @@ public class AllocationBasedResourceUtilizationTracker implements
       return false;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("before cpuCheck [asked={} > allowed={}]",
-          this.containersAllocation.getCPU(),
-          getContainersMonitor().getVCoresAllocatedForContainers());
-    }
+    LOG.debug("before cpuCheck [asked={} > allowed={}]",
+        this.containersAllocation.getCPU(),
+        getContainersMonitor().getVCoresAllocatedForContainers());
     // Check CPU.
     if (this.containersAllocation.getCPU() + cpuVcores >
         getContainersMonitor().getVCoresAllocatedForContainers()) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index 560ebd4..cfbde87 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -137,10 +137,8 @@ public class ContainerScheduler extends AbstractService implements
         resourceHandlerChain = ResourceHandlerModule
             .getConfiguredResourceHandlerChain(conf, context);
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Resource handler chain enabled = " + (resourceHandlerChain
-            != null));
-      }
+      LOG.debug("Resource handler chain enabled = {}",
+          (resourceHandlerChain != null));
       if (resourceHandlerChain != null) {
         LOG.debug("Bootstrapping resource handler chain");
         resourceHandlerChain.bootstrap(conf);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index 1d7771a..951adbe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -447,10 +447,8 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   public void storeContainer(ContainerId containerId, int containerVersion,
       long startTime, StartContainerRequest startRequest) throws IOException {
     String idStr = containerId.toString();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainer: containerId= " + idStr
-          + ", startRequest= " + startRequest);
-    }
+    LOG.debug("storeContainer: containerId= {}, startRequest= {}",
+        idStr, startRequest);
     final String keyVersion = getContainerVersionKey(idStr);
     final String keyRequest =
         getContainerKey(idStr, CONTAINER_REQUEST_KEY_SUFFIX);
@@ -488,9 +486,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
 
   @Override
   public void storeContainerQueued(ContainerId containerId) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerQueued: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerQueued: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_QUEUED_KEY_SUFFIX;
@@ -504,9 +500,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
 
   private void removeContainerQueued(ContainerId containerId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("removeContainerQueued: containerId=" + containerId);
-    }
+    LOG.debug("removeContainerQueued: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_QUEUED_KEY_SUFFIX;
@@ -520,9 +514,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
 
   @Override
   public void storeContainerPaused(ContainerId containerId) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerPaused: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerPaused: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_PAUSED_KEY_SUFFIX;
@@ -537,9 +529,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void removeContainerPaused(ContainerId containerId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("removeContainerPaused: containerId=" + containerId);
-    }
+    LOG.debug("removeContainerPaused: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_PAUSED_KEY_SUFFIX;
@@ -554,10 +544,8 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeContainerDiagnostics(ContainerId containerId,
       StringBuilder diagnostics) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerDiagnostics: containerId=" + containerId
-          + ", diagnostics=" + diagnostics);
-    }
+    LOG.debug("storeContainerDiagnostics: containerId={}, diagnostics=",
+        containerId, diagnostics);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_DIAGS_KEY_SUFFIX;
@@ -572,9 +560,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeContainerLaunched(ContainerId containerId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerLaunched: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerLaunched: containerId={}", containerId);
 
     // Removing the container if queued for backward compatibility reasons
     removeContainerQueued(containerId);
@@ -591,9 +577,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeContainerUpdateToken(ContainerId containerId,
       ContainerTokenIdentifier containerTokenIdentifier) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerUpdateToken: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerUpdateToken: containerId={}", containerId);
 
     String keyUpdateToken = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_UPDATE_TOKEN_SUFFIX;
@@ -621,9 +605,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeContainerKilled(ContainerId containerId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerKilled: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerKilled: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_KILLED_KEY_SUFFIX;
@@ -638,9 +620,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeContainerCompleted(ContainerId containerId,
       int exitCode) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeContainerCompleted: containerId=" + containerId);
-    }
+    LOG.debug("storeContainerCompleted: containerId={}", containerId);
 
     String key = CONTAINERS_KEY_PREFIX + containerId.toString()
         + CONTAINER_EXIT_CODE_KEY_SUFFIX;
@@ -706,9 +686,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void removeContainer(ContainerId containerId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("removeContainer: containerId=" + containerId);
-    }
+    LOG.debug("removeContainer: containerId={}", containerId);
 
     String keyPrefix = CONTAINERS_KEY_PREFIX + containerId.toString();
     try {
@@ -789,10 +767,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeApplication(ApplicationId appId,
       ContainerManagerApplicationProto p) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("storeApplication: appId=" + appId
-          + ", proto=" + p);
-    }
+    LOG.debug("storeApplication: appId={}, proto={}", appId, p);
 
     String key = APPLICATIONS_KEY_PREFIX + appId;
     try {
@@ -806,9 +781,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void removeApplication(ApplicationId appId)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("removeApplication: appId=" + appId);
-    }
+    LOG.debug("removeApplication: appId={}", appId);
 
     try {
       WriteBatch batch = db.createWriteBatch();
@@ -917,9 +890,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
         return null;
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Loading completed resource from " + key);
-      }
+      LOG.debug("Loading completed resource from {}", key);
       nextCompletedResource = LocalizedResourceProto.parseFrom(
           entry.getValue());
     }
@@ -952,9 +923,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
       }
 
       Path localPath = new Path(key.substring(keyPrefix.length()));
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Loading in-progress resource at " + localPath);
-      }
+      LOG.debug("Loading in-progress resource at {}", localPath);
       nextStartedResource = new SimpleEntry<LocalResourceProto, Path>(
           LocalResourceProto.parseFrom(entry.getValue()), localPath);
     }
@@ -1042,9 +1011,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     String localPath = proto.getLocalPath();
     String startedKey = getResourceStartedKey(user, appId, localPath);
     String completedKey = getResourceCompletedKey(user, appId, localPath);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Storing localized resource to " + completedKey);
-    }
+    LOG.debug("Storing localized resource to {}", completedKey);
     try {
       WriteBatch batch = db.createWriteBatch();
       try {
@@ -1066,9 +1033,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
     String localPathStr = localPath.toString();
     String startedKey = getResourceStartedKey(user, appId, localPathStr);
     String completedKey = getResourceCompletedKey(user, appId, localPathStr);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Removing local resource at " + localPathStr);
-    }
+    LOG.debug("Removing local resource at {}", localPathStr);
     try {
       WriteBatch batch = db.createWriteBatch();
       try {
@@ -1505,9 +1470,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
             break;
           }
           batch.delete(key);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("cleanup " + keyStr + " from leveldb");
-          }
+          LOG.debug("cleanup {} from leveldb", keyStr);
         }
         db.write(batch);
       } catch (DBException e) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java
index a9b5ed4..b4be05a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java
@@ -237,10 +237,8 @@ public final class DistributedScheduler extends AbstractRequestInterceptor {
     request.setAllocatedContainers(allocatedContainers);
     request.getAllocateRequest().setAskList(partitionedAsks.getGuaranteed());
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Forwarding allocate request to the" +
+    LOG.debug("Forwarding allocate request to the" +
           "Distributed Scheduler Service on YARN RM");
-    }
 
     DistributedSchedulingAllocateResponse dsResp =
         getNextInterceptor().allocateForDistributedScheduling(request);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java
index f895791..23f5fd8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java
@@ -196,10 +196,8 @@ public class NMTokenSecretManagerInNM extends BaseNMTokenSecretManager {
   public synchronized void appFinished(ApplicationId appId) {
     List<ApplicationAttemptId> appAttemptList = appToAppAttemptMap.get(appId);
     if (appAttemptList != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Removing application attempts NMToken keys for application "
-            + appId);
-      }
+      LOG.debug("Removing application attempts NMToken keys for"
+          + " application {}", appId);
       for (ApplicationAttemptId appAttemptId : appAttemptList) {
         removeAppAttemptKey(appAttemptId);
       }
@@ -233,10 +231,8 @@ public class NMTokenSecretManagerInNM extends BaseNMTokenSecretManager {
     if (oldKey == null
         || oldKey.getMasterKey().getKeyId() != identifier.getKeyId()) {
       // Update key only if it is modified.
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("NMToken key updated for application attempt : "
-            + identifier.getApplicationAttemptId().toString());
-      }
+      LOG.debug("NMToken key updated for application attempt : {}",
+          identifier.getApplicationAttemptId().toString());
       if (identifier.getKeyId() == currentMasterKey.getMasterKey()
         .getKeyId()) {
         updateAppAttemptKey(appAttemptId, currentMasterKey);
@@ -252,9 +248,7 @@ public class NMTokenSecretManagerInNM extends BaseNMTokenSecretManager {
   }
   
   public synchronized void setNodeId(NodeId nodeId) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("updating nodeId : " + nodeId);
-    }
+    LOG.debug("updating nodeId : {}", nodeId);
     this.nodeId = nodeId;
   }
   
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index e9bd965..79443f4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -205,18 +205,14 @@ public class NMTimelinePublisher extends CompositeService {
           LOG.error(
               "Failed to publish Container metrics for container " +
                   container.getContainerId());
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Failed to publish Container metrics for container " +
-                container.getContainerId(), e);
-          }
+          LOG.debug("Failed to publish Container metrics for container {}",
+              container.getContainerId(), e);
         } catch (YarnException e) {
           LOG.error(
               "Failed to publish Container metrics for container " +
                   container.getContainerId(), e.getMessage());
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Failed to publish Container metrics for container " +
-                container.getContainerId(), e);
-          }
+          LOG.debug("Failed to publish Container metrics for container {}",
+              container.getContainerId(), e);
         }
       }
     }
@@ -317,17 +313,13 @@ public class NMTimelinePublisher extends CompositeService {
       } catch (IOException e) {
         LOG.error("Failed to publish Container metrics for container "
             + container.getContainerId());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Failed to publish Container metrics for container "
-              + container.getContainerId(), e);
-        }
+        LOG.debug("Failed to publish Container metrics for container {}",
+            container.getContainerId(), e);
       } catch (YarnException e) {
         LOG.error("Failed to publish Container metrics for container "
             + container.getContainerId(), e.getMessage());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Failed to publish Container metrics for container "
-              + container.getContainerId(), e);
-        }
+        LOG.debug("Failed to publish Container metrics for container {}",
+            container.getContainerId(), e);
       }
     }
   }
@@ -347,8 +339,8 @@ public class NMTimelinePublisher extends CompositeService {
   private void putEntity(TimelineEntity entity, ApplicationId appId) {
     try {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Publishing the entity " + entity + ", JSON-style content: "
-            + TimelineUtils.dumpTimelineRecordtoJSON(entity));
+        LOG.debug("Publishing the entity {} JSON-style content: {}",
+            entity, TimelineUtils.dumpTimelineRecordtoJSON(entity));
       }
       TimelineV2Client timelineClient = getTimelineClient(appId);
       if (timelineClient != null) {
@@ -359,14 +351,10 @@ public class NMTimelinePublisher extends CompositeService {
       }
     } catch (IOException e) {
       LOG.error("Error when publishing entity " + entity);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Error when publishing entity " + entity, e);
-      }
+      LOG.debug("Error when publishing entity {}", entity, e);
     } catch (YarnException e) {
       LOG.error("Error when publishing entity " + entity, e.getMessage());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Error when publishing entity " + entity, e);
-      }
+      LOG.debug("Error when publishing entity {}", entity, e);
     }
   }
 
@@ -388,10 +376,8 @@ public class NMTimelinePublisher extends CompositeService {
       break;
 
     default:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(event.getType() + " is not a desired ApplicationEvent which"
-            + " needs to be published by NMTimelinePublisher");
-      }
+      LOG.debug("{} is not a desired ApplicationEvent which"
+          + " needs to be published by NMTimelinePublisher", event.getType());
       break;
     }
   }
@@ -404,11 +390,8 @@ public class NMTimelinePublisher extends CompositeService {
       break;
 
     default:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(event.getType()
-            + " is not a desired ContainerEvent which needs to be published by"
-            + " NMTimelinePublisher");
-      }
+      LOG.debug("{} is not a desired ContainerEvent which needs to be "
+            + " published by NMTimelinePublisher", event.getType());
       break;
     }
   }
@@ -425,11 +408,8 @@ public class NMTimelinePublisher extends CompositeService {
           ContainerMetricsConstants.LOCALIZATION_START_EVENT_TYPE);
       break;
     default:
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(event.getType()
-            + " is not a desired LocalizationEvent which needs to be published"
-            + " by NMTimelinePublisher");
-      }
+      LOG.debug("{} is not a desired LocalizationEvent which needs to be"
+            + " published by NMTimelinePublisher", event.getType());
       break;
     }
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index 54b6e1c..6025260 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -206,9 +206,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
         throws IOException {
     String path = pathForCgroup(controller, groupName);
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("createCgroup: " + path);
-    }
+    LOG.debug("createCgroup: {}", path);
 
     if (!new File(path).mkdir()) {
       throw new IOException("Failed to create cgroup at " + path);
@@ -220,9 +218,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
     String path = pathForCgroup(controller, groupName);
     param = controller + "." + param;
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("updateCgroup: " + path + ": " + param + "=" + value);
-    }
+    LOG.debug("updateCgroup: {}: {}={}", path, param, value);
 
     PrintWriter pw = null;
     try {
@@ -259,7 +255,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
               + "/tasks"), "UTF-8"))) {
         str = inl.readLine();
         if (str != null) {
-          LOG.debug("First line in cgroup tasks file: " + cgf + " " + str);
+          LOG.debug("First line in cgroup tasks file: {} {}", cgf, str);
         }
       } catch (IOException e) {
         LOG.warn("Failed to read cgroup tasks file. ", e);
@@ -302,9 +298,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
   boolean deleteCgroup(String cgroupPath) {
     boolean deleted = false;
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("deleteCgroup: " + cgroupPath);
-    }
+    LOG.debug("deleteCgroup: {}", cgroupPath);
     long start = clock.getTime();
     do {
       try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
index f53362a..685ed1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
@@ -353,7 +353,7 @@ public class NodeManagerHardwareUtils {
     for (Map.Entry<String, ResourceInformation> entry : resourceInformation
         .entrySet()) {
       ret.setResourceInformation(entry.getKey(), entry.getValue());
-      LOG.debug("Setting key " + entry.getKey() + " to " + entry.getValue());
+      LOG.debug("Setting key {} to {}", entry.getKey(), entry.getValue());
     }
     if (resourceInformation.containsKey(memory)) {
       Long value = resourceInformation.get(memory).getValue();
@@ -364,7 +364,7 @@ public class NodeManagerHardwareUtils {
       ResourceInformation memResInfo = resourceInformation.get(memory);
       if(memResInfo.getValue() == 0) {
         ret.setMemorySize(getContainerMemoryMB(conf));
-        LOG.debug("Set memory to " + ret.getMemorySize());
+        LOG.debug("Set memory to {}", ret.getMemorySize());
       }
     }
     if (resourceInformation.containsKey(vcores)) {
@@ -376,10 +376,10 @@ public class NodeManagerHardwareUtils {
       ResourceInformation vcoresResInfo = resourceInformation.get(vcores);
       if(vcoresResInfo.getValue() == 0) {
         ret.setVirtualCores(getVCores(conf));
-        LOG.debug("Set vcores to " + ret.getVirtualCores());
+        LOG.debug("Set vcores to {}", ret.getVirtualCores());
       }
     }
-    LOG.debug("Node resource information map is " + ret);
+    LOG.debug("Node resource information map is {}", ret);
     return ret;
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java
index a4c04ab..c492ee4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java
@@ -49,9 +49,7 @@ public class ProcessIdFileReader {
     if (path == null) {
       throw new IOException("Trying to access process id from a null path");
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Accessing pid from pid file " + path);
-    }
+    LOG.debug("Accessing pid from pid file {}", path);
     String processId = null;
     BufferedReader bufReader = null;
 
@@ -99,10 +97,8 @@ public class ProcessIdFileReader {
         bufReader.close();
       }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Got pid " + (processId != null ? processId : "null")
-          + " from path " + path);
-    }
+    LOG.debug("Got pid {} from path {}",
+        (processId != null ? processId : "null"), path);
     return processId;
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
index 301810a..2769788 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
@@ -151,9 +151,7 @@ public class ContainerLogsPage extends NMView {
                 printAggregatedLogFileDirectory(html, containersLogMeta);
               }
             } catch (Exception ex) {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug(ex.getMessage());
-              }
+              LOG.debug("{}", ex);
             }
           }
         } else {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
index 106144f..d485c55 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
@@ -328,9 +328,7 @@ public class NMWebServices {
       } catch (IOException ex) {
         // Something wrong with we tries to access the remote fs for the logs.
         // Skip it and do nothing
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(ex.getMessage());
-        }
+        LOG.debug("{}", ex);
       }
       GenericEntity<List<ContainerLogsInfo>> meta = new GenericEntity<List<
           ContainerLogsInfo>>(containersLogsInfo){};
@@ -433,10 +431,8 @@ public class NMWebServices {
     } catch (Exception ex) {
       // This NM does not have this container any more. We
       // assume the container has already finished.
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Can not find the container:" + containerId
-            + " in this node.");
-      }
+      LOG.debug("Can not find the container:{} in this node.",
+          containerId);
     }
     final boolean isRunning = tempIsRunning;
     File logFile = null;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java
index d1ef5c8..9ad9a7b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ActiveStandbyElectorBasedElectorService.java
@@ -213,10 +213,8 @@ public class ActiveStandbyElectorBasedElectorService extends AbstractService
 
   @Override
   public void fenceOldActive(byte[] oldActiveData) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Request to fence old active being ignored, " +
-          "as embedded leader election doesn't support fencing");
-    }
+    LOG.debug("Request to fence old active being ignored, " +
+        "as embedded leader election doesn't support fencing");
   }
 
   private static byte[] createActiveNodeInfo(String clusterId, String rmId)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 55ea556..298bea7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -1470,10 +1470,8 @@ public class ClientRMService extends AbstractService implements
       ReservationDefinition contract, String reservationId) {
     if ((contract.getArrival() - clock.getTime()) < reservationSystem
         .getPlanFollowerTimeStep()) {
-      LOG.debug(MessageFormat
-          .format(
-              "Reservation {0} is within threshold so attempting to create synchronously.",
-              reservationId));
+      LOG.debug("Reservation {} is within threshold so attempting to"
+          + " create synchronously.", reservationId);
       reservationSystem.synchronizePlan(planName, true);
       LOG.info(MessageFormat.format("Created reservation {0} synchronously.",
           reservationId));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java
index 83c1916..b0cec5a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DecommissioningNodesWatcher.java
@@ -292,7 +292,7 @@ public class DecommissioningNodesWatcher {
         }
         // Remove stale non-DECOMMISSIONING node
         if (d.nodeState != NodeState.DECOMMISSIONING) {
-          LOG.debug("remove " + d.nodeState + " " + d.nodeId);
+          LOG.debug("remove {} {}", d.nodeState, d.nodeId);
           it.remove();
           continue;
         } else if (now - d.lastUpdateTime > 60000L) {
@@ -300,7 +300,7 @@ public class DecommissioningNodesWatcher {
           RMNode rmNode = getRmNode(d.nodeId);
           if (rmNode != null &&
               rmNode.getState() == NodeState.DECOMMISSIONED) {
-            LOG.debug("remove " + rmNode.getState() + " " + d.nodeId);
+            LOG.debug("remove {} {}", rmNode.getState(), d.nodeId);
             it.remove();
             continue;
           }
@@ -308,7 +308,7 @@ public class DecommissioningNodesWatcher {
         if (d.timeoutMs >= 0 &&
             d.decommissioningStartTime + d.timeoutMs < now) {
           staleNodes.add(d.nodeId);
-          LOG.debug("Identified stale and timeout node " + d.nodeId);
+          LOG.debug("Identified stale and timeout node {}", d.nodeId);
         }
       }
 
@@ -342,14 +342,14 @@ public class DecommissioningNodesWatcher {
       ApplicationId appId = it.next();
       RMApp rmApp = rmContext.getRMApps().get(appId);
       if (rmApp == null) {
-        LOG.debug("Consider non-existing app " + appId + " as completed");
+        LOG.debug("Consider non-existing app {} as completed", appId);
         it.remove();
         continue;
       }
       if (rmApp.getState() == RMAppState.FINISHED ||
           rmApp.getState() == RMAppState.FAILED ||
           rmApp.getState() == RMAppState.KILLED) {
-        LOG.debug("Remove " + rmApp.getState() + " app " + appId);
+        LOG.debug("Remove {} app {}", rmApp.getState(), appId);
         it.remove();
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
index f725ac9..b87260e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
@@ -493,17 +493,17 @@ public class NodesListManager extends CompositeService implements
     RMNode eventNode = event.getNode();
     switch (event.getType()) {
     case NODE_UNUSABLE:
-      LOG.debug(eventNode + " reported unusable");
+      LOG.debug("{} reported unusable", eventNode);
       sendRMAppNodeUpdateEventToNonFinalizedApps(eventNode,
           RMAppNodeUpdateType.NODE_UNUSABLE);
       break;
     case NODE_USABLE:
-      LOG.debug(eventNode + " reported usable");
+      LOG.debug("{} reported usable", eventNode);
       sendRMAppNodeUpdateEventToNonFinalizedApps(eventNode,
           RMAppNodeUpdateType.NODE_USABLE);
       break;
     case NODE_DECOMMISSIONING:
-      LOG.debug(eventNode + " reported decommissioning");
+      LOG.debug("{} reported decommissioning", eventNode);
       sendRMAppNodeUpdateEventToNonFinalizedApps(
           eventNode, RMAppNodeUpdateType.NODE_DECOMMISSIONING);
       break;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 10f5774..b92c7f6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -618,8 +618,8 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
   @Override
   public void handle(RMAppManagerEvent event) {
     ApplicationId applicationId = event.getApplicationId();
-    LOG.debug("RMAppManager processing event for " 
-        + applicationId + " of type " + event.getType());
+    LOG.debug("RMAppManager processing event for {} of type {}",
+        applicationId, event.getType());
     switch (event.getType()) {
     case APP_COMPLETED :
       finishApplication(applicationId);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 36a87dc..c795bfa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1095,8 +1095,8 @@ public class ResourceManager extends CompositeService
               rmApp.getAppAttempts().values().iterator().next();
           if (previousFailedAttempt != null) {
             try {
-              LOG.debug("Event " + event.getType() + " handled by "
-                  + previousFailedAttempt);
+              LOG.debug("Event {} handled by {}", event.getType(),
+                  previousFailedAttempt);
               previousFailedAttempt.handle(event);
             } catch (Throwable t) {
               LOG.error("Error in handling event type " + event.getType()
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index 0cf7697..f021ebb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -296,10 +296,8 @@ public class ResourceTrackerService extends AbstractService implements
     }
 
     if (rmApp.getApplicationSubmissionContext().getUnmanagedAM()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Ignoring container completion status for unmanaged AM "
-            + rmApp.getApplicationId());
-      }
+      LOG.debug("Ignoring container completion status for unmanaged AM {}",
+          rmApp.getApplicationId());
       return;
     }
 
@@ -393,11 +391,9 @@ public class ResourceTrackerService extends AbstractService implements
 
     Resource dynamicLoadCapability = loadNodeResourceFromDRConfiguration(nid);
     if (dynamicLoadCapability != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Resource for node: " + nid + " is adjusted from: " +
-            capability + " to: " + dynamicLoadCapability +
-            " due to settings in dynamic-resources.xml.");
-      }
+      LOG.debug("Resource for node: {} is adjusted from: {} to: {} due to"
+          + " settings in dynamic-resources.xml.", nid, capability,
+          dynamicLoadCapability);
       capability = dynamicLoadCapability;
       // sync back with new resource.
       response.setResource(capability);
@@ -750,9 +746,9 @@ public class ResourceTrackerService extends AbstractService implements
       this.rmContext.getNodeAttributesManager()
           .replaceNodeAttributes(NodeAttribute.PREFIX_DISTRIBUTED,
               ImmutableMap.of(nodeId.getHost(), nodeAttributes));
-    } else if (LOG.isDebugEnabled()) {
-      LOG.debug("Skip updating node attributes since there is no change for "
-          + nodeId + " : " + nodeAttributes);
+    } else {
+      LOG.debug("Skip updating node attributes since there is no change"
+          +" for {} : {}", nodeId, nodeAttributes);
     }
   }
 
@@ -775,10 +771,8 @@ public class ResourceTrackerService extends AbstractService implements
         if (appCollectorData != null) {
           liveAppCollectorsMap.put(appId, appCollectorData);
         } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Collector for applicaton: " + appId +
-                " hasn't registered yet!");
-          }
+          LOG.debug("Collector for applicaton: {} hasn't registered yet!",
+              appId);
         }
       }
     }
@@ -960,11 +954,8 @@ public class ResourceTrackerService extends AbstractService implements
     }
     if(request.getTokenSequenceNo() != this.rmContext.getTokenSequenceNo()) {
       if (!rmContext.getSystemCredentialsForApps().isEmpty()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "Sending System credentials for apps as part of NodeHeartbeat "
-                  + "response.");
-        }
+        LOG.debug("Sending System credentials for apps as part of"
+            + " NodeHeartbeat response.");
         nodeHeartBeatResponse
             .setSystemCredentialsForApps(
                 rmContext.getSystemCredentialsForApps().values());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java
index 27289cc..2917ff8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java
@@ -67,11 +67,9 @@ public class SimpleBlacklistManager implements BlacklistManager {
     final double failureThreshold = this.blacklistDisableFailureThreshold *
         numberOfNodeManagerHosts;
     if (currentBlacklistSize < failureThreshold) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("blacklist size " + currentBlacklistSize + " is less than " +
-            "failure threshold ratio " + blacklistDisableFailureThreshold +
-            " out of total usable nodes " + numberOfNodeManagerHosts);
-      }
+      LOG.debug("blacklist size {} is less than failure threshold ratio {}"
+          + " out of total usable nodes {}", currentBlacklistSize,
+          blacklistDisableFailureThreshold, numberOfNodeManagerHosts);
       ret = ResourceBlacklistRequest.newInstance(blacklist, EMPTY_LIST);
     } else {
       LOG.warn("Ignoring Blacklists, blacklist size " + currentBlacklistSize
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index 92e8880..71f88a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -477,9 +477,7 @@ public class TimelineServiceV2Publisher extends AbstractSystemMetricsPublisher {
           UserGroupInformation.getCurrentUser());
     } catch (IOException e) {
       LOG.error("Error when publishing entity " + entity);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Error when publishing entity " + entity, e);
-      }
+      LOG.debug("Error when publishing entity {}", entity, e);
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
index d8150f0..28a7449 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
@@ -80,10 +80,8 @@ public class FifoCandidatesSelector
       // check if preemption disabled for the queue
       if (preemptionContext.getQueueByPartition(queueName,
           RMNodeLabelsManager.NO_LABEL).preemptionDisabled) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("skipping from queue=" + queueName
-              + " because it's a non-preemptable queue");
-        }
+        LOG.debug("skipping from queue={} because it's a"
+            + " non-preemptable queue", queueName);
         continue;
       }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
index 2e69b9f..7c57f9c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
@@ -108,10 +108,8 @@ public class FifoIntraQueuePreemptionPlugin
       Resources.addTo(actualPreemptNeeded, a1.getActuallyToBePreempted());
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Selected to preempt " + actualPreemptNeeded
-          + " resource from partition:" + partition);
-    }
+    LOG.debug("Selected to preempt {} resource from partition:{}",
+        actualPreemptNeeded, partition);
     return resToObtainByPartition;
   }
 
@@ -445,9 +443,7 @@ public class FifoIntraQueuePreemptionPlugin
             tmpUser.amUsed);
         tmpUser.setUserLimit(userLimitResource);
 
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("TempUser:" + tmpUser);
-        }
+        LOG.debug("TempUser:{}", tmpUser);
 
         tmpUser.idealAssigned = Resources.createResource(0, 0);
         tq.addUserPerPartition(userName, tmpUser);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
index 3780c73..8a1b47b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
@@ -201,10 +201,8 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
       // Initialize used resource of a given user for rolling computation.
       rollingResourceUsagePerUser.put(user, Resources.clone(
           leafQueue.getUser(user).getResourceUsage().getUsed(partition)));
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Rolling resource usage for user:" + user + " is : "
-            + rollingResourceUsagePerUser.get(user));
-      }
+      LOG.debug("Rolling resource usage for user:{} is : {}", user,
+          rollingResourceUsagePerUser.get(user));
     }
   }
 
@@ -220,12 +218,8 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
 
     List<RMContainer> liveContainers = new ArrayList<>(app.getLiveContainers());
     sortContainers(liveContainers);
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(
-          "totalPreemptedResourceAllowed for preemption at this round is :"
-              + totalPreemptedResourceAllowed);
-    }
+    LOG.debug("totalPreemptedResourceAllowed for preemption at this"
+        + " round is :{}", totalPreemptedResourceAllowed);
 
     Resource rollingUsedResourcePerUser = rollingResourceUsagePerUser
         .get(app.getUser());
@@ -257,13 +251,11 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
       // UserLimit (or equals to), we must skip such containers.
       if (fifoPreemptionComputePlugin.skipContainerBasedOnIntraQueuePolicy(app,
           clusterResource, rollingUsedResourcePerUser, c)) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "Skipping container: " + c.getContainerId() + " with resource:"
-                  + c.getAllocatedResource() + " as UserLimit for user:"
-                  + app.getUser() + " with resource usage: "
-                  + rollingUsedResourcePerUser + " is going under UL");
-        }
+        LOG.debug("Skipping container: {} with resource:{} as UserLimit for"
+            + " user:{} with resource usage: {} is going under UL",
+            c.getContainerId(), c.getAllocatedResource(), app.getUser(),
+            rollingUsedResourcePerUser);
+
         break;
       }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
index 996d5a0..7563c36 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
@@ -166,10 +166,8 @@ public class PreemptableResourceCalculator
       // check if preemption disabled for the queue
       if (context.getQueueByPartition(queueName,
           RMNodeLabelsManager.NO_LABEL).preemptionDisabled) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("skipping from queue=" + queueName
-              + " because it's a non-preemptable queue");
-        }
+        LOG.debug("skipping from queue={} because it's a non-preemptable"
+            + " queue", queueName);
         continue;
       }
 
@@ -208,10 +206,8 @@ public class PreemptableResourceCalculator
           // Only add resToObtain when it >= 0
           if (Resources.greaterThan(rc, clusterResource, resToObtain,
               Resources.none())) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Queue=" + queueName + " partition=" + qT.partition
-                  + " resource-to-obtain=" + resToObtain);
-            }
+            LOG.debug("Queue={} partition={} resource-to-obtain={}",
+                queueName, qT.partition, resToObtain);
           }
           qT.setActuallyToBePreempted(Resources.clone(resToObtain));
         } else {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 15513d2..352ea3d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -332,11 +332,9 @@ public class ProportionalCapacityPreemptionPolicy
         toPreemptPerSelector.values()) {
       toPreemptCount += containers.size();
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(
-          "Starting to preempt containers for selectedCandidates and size:"
-              + toPreemptCount);
-    }
+    LOG.debug(
+        "Starting to preempt containers for selectedCandidates and size:{}",
+        toPreemptCount);
 
     // preempt (or kill) the selected containers
     // We need toPreemptPerSelector here to match list of containers to
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
index 5e38a9c..fd1e9c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
@@ -118,9 +118,7 @@ public class QueuePriorityContainerCandidateSelector
   }
 
   private void initializePriorityDigraph() {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Initializing priority preemption directed graph:");
-    }
+    LOG.debug("Initializing priority preemption directed graph:");
     // Make sure we iterate all leaf queue combinations
     for (String q1 : preemptionContext.getLeafQueueNames()) {
       for (String q2 : preemptionContext.getLeafQueueNames()) {
@@ -148,14 +146,10 @@ public class QueuePriorityContainerCandidateSelector
           int p2 = path2.get(j).relativePriority;
           if (p1 < p2) {
             priorityDigraph.put(q2, q1, true);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("- Added priority ordering edge: " + q2 + " >> " + q1);
-            }
+            LOG.debug("- Added priority ordering edge: {} >> {}", q2, q1);
           } else if (p2 < p1) {
             priorityDigraph.put(q1, q2, true);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("- Added priority ordering edge: " + q1 + " >> " + q2);
-            }
+            LOG.debug("- Added priority ordering edge: {} >> {}", q1, q2);
           }
         }
       }
@@ -463,21 +457,17 @@ public class QueuePriorityContainerCandidateSelector
       if (canPreempt) {
         touchedNodes.add(node.getNodeID());
 
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Trying to preempt following containers to make reserved "
-              + "container=" + reservedContainer.getContainerId() + " on node="
-              + node.getNodeID() + " can be allocated:");
-        }
+        LOG.debug("Trying to preempt following containers to make reserved "
+            + "container={} on node={} can be allocated:",
+            reservedContainer.getContainerId(), node.getNodeID());
 
         // Update to-be-preempt
         incToPreempt(demandingQueueName, node.getPartition(),
             reservedContainer.getReservedResource());
 
         for (RMContainer c : newlySelectedToBePreemptContainers) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(" --container=" + c.getContainerId() + " resource=" + c
-                .getReservedResource());
-          }
+          LOG.debug(" --container={} resource={}", c.getContainerId(),
+              c.getReservedResource());
 
           // Add to preemptMap
           CapacitySchedulerPreemptionUtils.addToPreemptMap(selectedCandidates,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
index e2f5b4b..1aafbdc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
@@ -105,11 +105,9 @@ public class ReservedContainerCandidatesSelector
           CapacitySchedulerPreemptionUtils.addToPreemptMap(selectedCandidates,
               curCandidates, c.getApplicationAttemptId(), c);
 
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(this.getClass().getName() + " Marked container=" + c
-                .getContainerId() + " from queue=" + c.getQueueName()
-                + " to be preemption candidates");
-          }
+          LOG.debug("{} Marked container={} from queue={} to be preemption"
+              + " candidates", this.getClass().getName(), c.getContainerId(),
+              c.getQueueName());
         }
       }
     }
@@ -215,10 +213,9 @@ public class ReservedContainerCandidatesSelector
       // An alternative approach is add a "penalty cost" if AM container is
       // selected. Here for safety, avoid preempt AM container in any cases
       if (c.isAMContainer()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Skip selecting AM container on host=" + node.getNodeID()
-              + " AM container=" + c.getContainerId());
-        }
+        LOG.debug("Skip selecting AM container on host={} AM container={}",
+            node.getNodeID(), c.getContainerId());
+
         continue;
       }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMDelegatedNodeLabelsUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMDelegatedNodeLabelsUpdater.java
index 03a12d5..9ffeacf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMDelegatedNodeLabelsUpdater.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMDelegatedNodeLabelsUpdater.java
@@ -191,9 +191,9 @@ public class RMDelegatedNodeLabelsUpdater extends CompositeService {
           + "delegated-centralized node label configuration is enabled";
       LOG.error(msg);
       throw new IOException(msg);
-    } else if (LOG.isDebugEnabled()) {
-      LOG.debug("RM Node labels mapping provider class is : "
-          + nodeLabelsMappingProvider.getClass().toString());
+    } else {
+      LOG.debug("RM Node labels mapping provider class is : {}",
+          nodeLabelsMappingProvider.getClass());
     }
 
     return nodeLabelsMappingProvider;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index 8387665..efff7e5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -380,10 +380,8 @@ public class FileSystemRMStateStore extends RMStateStore {
           DelegationKey key = new DelegationKey();
           key.readFields(fsIn);
           rmState.rmSecretManagerState.masterKeyState.add(key);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Loaded delegation key: keyId=" + key.getKeyId()
-                + ", expirationDate=" + key.getExpiryDate());
-          }
+          LOG.debug("Loaded delegation key: keyId={}, expirationDate={}",
+              key.getKeyId(), key.getExpiryDate());
         } else if (childNodeName.startsWith(DELEGATION_TOKEN_PREFIX)) {
           RMDelegationTokenIdentifierData identifierData =
               RMStateStoreUtils.readRMDelegationTokenIdentifierData(fsIn);
@@ -392,10 +390,8 @@ public class FileSystemRMStateStore extends RMStateStore {
           long renewDate = identifierData.getRenewDate();
           rmState.rmSecretManagerState.delegationTokenState.put(identifier,
             renewDate);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Loaded RMDelegationTokenIdentifier: " + identifier
-                + " renewDate=" + renewDate);
-          }
+          LOG.debug("Loaded RMDelegationTokenIdentifier: {} renewDate={}",
+              identifier, renewDate);
         } else {
           LOG.warn("Unknown file for recovering RMDelegationTokenSecretManager");
         }
@@ -992,9 +988,7 @@ public class FileSystemRMStateStore extends RMStateStore {
         throws com.google.protobuf.InvalidProtocolBufferException {
       if (childNodeName.startsWith(ApplicationId.appIdStrPrefix)) {
         // application
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Loading application from node: " + childNodeName);
-        }
+        LOG.debug("Loading application from node: {}", childNodeName);
         ApplicationStateDataPBImpl appState =
             new ApplicationStateDataPBImpl(
                 ApplicationStateDataProto.parseFrom(childData));
@@ -1004,10 +998,7 @@ public class FileSystemRMStateStore extends RMStateStore {
       } else if (childNodeName.startsWith(
           ApplicationAttemptId.appAttemptIdStrPrefix)) {
         // attempt
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Loading application attempt from node: "
-              + childNodeName);
-        }
+        LOG.debug("Loading application attempt from node: {}", childNodeName);
         ApplicationAttemptStateDataPBImpl attemptState =
             new ApplicationAttemptStateDataPBImpl(
                 ApplicationAttemptStateDataProto.parseFrom(childData));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
index 6ac19bc..2420735 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/LeveldbRMStateStore.java
@@ -355,11 +355,9 @@ public class LeveldbRMStateStore extends RMStateStore {
         DelegationKey masterKey = loadDelegationKey(entry.getValue());
         state.rmSecretManagerState.masterKeyState.add(masterKey);
         ++numKeys;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Loaded RM delegation key from " + key
-              + ": keyId=" + masterKey.getKeyId()
-              + ", expirationDate=" + masterKey.getExpiryDate());
-        }
+        LOG.debug("Loaded RM delegation key from {}: keyId={},"
+            + " expirationDate={}", key, masterKey.getKeyId(),
+            masterKey.getExpiryDate());
       }
     } catch (DBException e) {
       throw new IOException(e);
@@ -401,10 +399,8 @@ public class LeveldbRMStateStore extends RMStateStore {
         state.rmSecretManagerState.delegationTokenState.put(tokenId,
             renewDate);
         ++numTokens;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Loaded RM delegation token from " + key
-              + ": tokenId=" + tokenId + ", renewDate=" + renewDate);
-        }
+        LOG.debug("Loaded RM delegation token from {}: tokenId={},"
+            + " renewDate={}", key, tokenId, renewDate);
       }
     } catch (DBException e) {
       throw new IOException(e);
@@ -505,10 +501,7 @@ public class LeveldbRMStateStore extends RMStateStore {
       iter.next();
     }
     int numAttempts = appState.attempts.size();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Loaded application " + appId + " with " + numAttempts
-          + " attempts");
-    }
+    LOG.debug("Loaded application {} with {} attempts", appId, numAttempts);
     return numAttempts;
   }
 
@@ -621,9 +614,7 @@ public class LeveldbRMStateStore extends RMStateStore {
   protected void storeApplicationStateInternal(ApplicationId appId,
       ApplicationStateData appStateData) throws IOException {
     String key = getApplicationNodeKey(appId);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Storing state for app " + appId + " at " + key);
-    }
+    LOG.debug("Storing state for app {} at {}", appId, key);
     try {
       db.put(bytes(key), appStateData.getProto().toByteArray());
     } catch (DBException e) {
@@ -642,9 +633,7 @@ public class LeveldbRMStateStore extends RMStateStore {
       ApplicationAttemptId attemptId,
       ApplicationAttemptStateData attemptStateData) throws IOException {
     String key = getApplicationAttemptNodeKey(attemptId);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Storing state for attempt " + attemptId + " at " + key);
-    }
+    LOG.debug("Storing state for attempt {} at {}", attemptId, key);
     try {
       db.put(bytes(key), attemptStateData.getProto().toByteArray());
     } catch (DBException e) {
@@ -664,10 +653,7 @@ public class LeveldbRMStateStore extends RMStateStore {
       ApplicationAttemptId attemptId)
       throws IOException {
     String attemptKey = getApplicationAttemptNodeKey(attemptId);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Removing state for attempt " + attemptId + " at "
-          + attemptKey);
-    }
+    LOG.debug("Removing state for attempt {} at {}", attemptId, attemptKey);
     try {
       db.delete(bytes(attemptKey));
     } catch (DBException e) {
@@ -710,10 +696,9 @@ public class LeveldbRMStateStore extends RMStateStore {
       WriteBatch batch = db.createWriteBatch();
       try {
         String key = getReservationNodeKey(planName, reservationIdName);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Storing state for reservation " + reservationIdName
-              + " plan " + planName + " at " + key);
-        }
+        LOG.debug("Storing state for reservation {} plan {} at {}",
+            reservationIdName, planName, key);
+
         batch.put(bytes(key), reservationAllocation.toByteArray());
         db.write(batch);
       } finally {
@@ -733,10 +718,8 @@ public class LeveldbRMStateStore extends RMStateStore {
         String reservationKey =
             getReservationNodeKey(planName, reservationIdName);
         batch.delete(bytes(reservationKey));
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Removing state for reservation " + reservationIdName
-              + " plan " + planName + " at " + reservationKey);
-        }
+        LOG.debug("Removing state for reservation {} plan {} at {}",
+            reservationIdName, planName, reservationKey);
         db.write(batch);
       } finally {
         batch.close();
@@ -751,9 +734,7 @@ public class LeveldbRMStateStore extends RMStateStore {
     String tokenKey = getRMDTTokenNodeKey(tokenId);
     RMDelegationTokenIdentifierData tokenData =
         new RMDelegationTokenIdentifierData(tokenId, renewDate);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Storing token to " + tokenKey);
-    }
+    LOG.debug("Storing token to {}", tokenKey);
     try {
       WriteBatch batch = db.createWriteBatch();
       try {
@@ -763,10 +744,8 @@ public class LeveldbRMStateStore extends RMStateStore {
           try (DataOutputStream ds = new DataOutputStream(bs)) {
             ds.writeInt(tokenId.getSequenceNumber());
           }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Storing " + tokenId.getSequenceNumber() + " to "
-                + RM_DT_SEQUENCE_NUMBER_KEY);   
-          }
+          LOG.debug("Storing {} to {}", tokenId.getSequenceNumber(),
+              RM_DT_SEQUENCE_NUMBER_KEY);
           batch.put(bytes(RM_DT_SEQUENCE_NUMBER_KEY), bs.toByteArray());
         }
         db.write(batch);
@@ -796,9 +775,7 @@ public class LeveldbRMStateStore extends RMStateStore {
   protected void removeRMDelegationTokenState(
       RMDelegationTokenIdentifier tokenId) throws IOException {
     String tokenKey = getRMDTTokenNodeKey(tokenId);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Removing token at " + tokenKey);
-    }
+    LOG.debug("Removing token at {}", tokenKey);
     try {
       db.delete(bytes(tokenKey));
     } catch (DBException e) {
@@ -810,9 +787,7 @@ public class LeveldbRMStateStore extends RMStateStore {
   protected void storeRMDTMasterKeyState(DelegationKey masterKey)
       throws IOException {
     String dbKey = getRMDTMasterKeyNodeKey(masterKey);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Storing token master key to " + dbKey);
-    }
+    LOG.debug("Storing token master key to {}", dbKey);
     ByteArrayOutputStream os = new ByteArrayOutputStream();
     DataOutputStream out = new DataOutputStream(os);
     try {
@@ -831,9 +806,7 @@ public class LeveldbRMStateStore extends RMStateStore {
   protected void removeRMDTMasterKeyState(DelegationKey masterKey)
       throws IOException {
     String dbKey = getRMDTMasterKeyNodeKey(masterKey);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Removing token master key at " + dbKey);
-    }
+    LOG.debug("Removing token master key at {}", dbKey);
     try {
       db.delete(bytes(dbKey));
     } catch (DBException e) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index 98fca0f..a5e7748 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -377,9 +377,7 @@ public abstract class RMStateStore extends AbstractService {
       ApplicationAttemptStateData attemptState =
           ((RMStateStoreAppAttemptEvent) event).getAppAttemptState();
       try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Storing info for attempt: " + attemptState.getAttemptId());
-        }
+        LOG.debug("Storing info for attempt: {}", attemptState.getAttemptId());
         store.storeApplicationAttemptStateInternal(attemptState.getAttemptId(),
             attemptState);
         store.notifyApplicationAttempt(new RMAppAttemptEvent
@@ -408,9 +406,8 @@ public abstract class RMStateStore extends AbstractService {
       ApplicationAttemptStateData attemptState =
           ((RMStateUpdateAppAttemptEvent) event).getAppAttemptState();
       try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Updating info for attempt: " + attemptState.getAttemptId());
-        }
+        LOG.debug("Updating info for attempt: {}",
+            attemptState.getAttemptId());
         store.updateApplicationAttemptStateInternal(attemptState.getAttemptId(),
             attemptState);
         store.notifyApplicationAttempt(new RMAppAttemptEvent
@@ -1235,9 +1232,7 @@ public abstract class RMStateStore extends AbstractService {
     this.writeLock.lock();
     try {
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Processing event of type " + event.getType());
-      }
+      LOG.debug("Processing event of type {}", event.getType());
 
       final RMStateStoreState oldState = getRMStateStoreState();
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index 16b6bdc..dcf891a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -426,7 +426,7 @@ public class ZKRMStateStore extends RMStateStore {
     }
 
     builder.append(getStat.toString());
-    LOG.debug(builder.toString());
+    LOG.debug("{}", builder);
   }
 
   private void setRootNodeAcls() throws Exception {
@@ -536,9 +536,7 @@ public class ZKRMStateStore extends RMStateStore {
     List<String> planNodes = getChildren(reservationRoot);
 
     for (String planName : planNodes) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Loading plan from znode: " + planName);
-      }
+      LOG.debug("Loading plan from znode: {}", planName);
 
       String planNodePath = getNodePath(reservationRoot, planName);
       List<String> reservationNodes = getChildren(planNodePath);
@@ -547,9 +545,7 @@ public class ZKRMStateStore extends RMStateStore {
         String reservationNodePath =
             getNodePath(planNodePath, reservationNodeName);
 
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Loading reservation from znode: " + reservationNodePath);
-        }
+        LOG.debug("Loading reservation from znode: {}", reservationNodePath);
 
         byte[] reservationData = getData(reservationNodePath);
         ReservationAllocationStateProto allocationState =
@@ -610,10 +606,9 @@ public class ZKRMStateStore extends RMStateStore {
           key.readFields(fsIn);
           rmState.rmSecretManagerState.masterKeyState.add(key);
 
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Loaded delegation key: keyId=" + key.getKeyId()
-                + ", expirationDate=" + key.getExpiryDate());
-          }
+          LOG.debug("Loaded delegation key: keyId={}, expirationDate={}",
+              key.getKeyId(), key.getExpiryDate());
+
         }
       }
     }
@@ -656,8 +651,8 @@ public class ZKRMStateStore extends RMStateStore {
         } else if (splitIndex == 0
             && !(childNodeName.equals("1") || childNodeName.equals("2")
             || childNodeName.equals("3") || childNodeName.equals("4"))) {
-          LOG.debug("Unknown child node with name " + childNodeName + " under" +
-              tokenRoot);
+          LOG.debug("Unknown child node with name {} under {}",
+              childNodeName, tokenRoot);
         }
       }
       if (splitIndex != delegationTokenNodeSplitIndex && !dtNodeFound) {
@@ -685,10 +680,8 @@ public class ZKRMStateStore extends RMStateStore {
         long renewDate = identifierData.getRenewDate();
         rmState.rmSecretManagerState.delegationTokenState.put(identifier,
             renewDate);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Loaded RMDelegationTokenIdentifier: " + identifier
-              + " renewDate=" + renewDate);
-        }
+        LOG.debug("Loaded RMDelegationTokenIdentifier: {} renewDate={}",
+            identifier, renewDate);
       }
     }
   }
@@ -696,9 +689,7 @@ public class ZKRMStateStore extends RMStateStore {
   private void loadRMAppStateFromAppNode(RMState rmState, String appNodePath,
       String appIdStr) throws Exception {
     byte[] appData = getData(appNodePath);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Loading application from znode: " + appNodePath);
-    }
+    LOG.debug("Loading application from znode: {}", appNodePath);
     ApplicationId appId = ApplicationId.fromString(appIdStr);
     ApplicationStateDataPBImpl appState = new ApplicationStateDataPBImpl(
         ApplicationStateDataProto.parseFrom(appData));
@@ -736,7 +727,7 @@ public class ZKRMStateStore extends RMStateStore {
             }
           }
         } else if (!childNodeName.equals(RM_APP_ROOT_HIERARCHIES)){
-          LOG.debug("Unknown child node with name " + childNodeName + " under" +
+          LOG.debug("Unknown child node with name {} under {}", childNodeName,
               appRoot);
         }
       }
@@ -798,27 +789,21 @@ public class ZKRMStateStore extends RMStateStore {
       } catch (KeeperException.NoNodeException ke) {
         // It should be fine to swallow this exception as the parent znode we
         // intend to delete is already deleted.
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Unable to remove parent node " + parentZnode +
-              " as it does not exist.");
-        }
+        LOG.debug("Unable to remove parent node {} as it does not exist.",
+            parentZnode);
         return;
       }
       // No apps stored under parent path.
       if (children != null && children.isEmpty()) {
         try {
           zkManager.safeDelete(parentZnode, zkAcl, fencingNodePath);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("No leaf znode exists. Removing parent node " +
-                parentZnode);
-          }
+          LOG.debug("No leaf znode exists. Removing parent node {}",
+              parentZnode);
         } catch (KeeperException.NotEmptyException ke) {
           // It should be fine to swallow this exception as the parent znode
           // has to be deleted only if it has no children. And this node has.
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Unable to remove app parent node " + parentZnode +
-                " as it has children.");
-          }
+          LOG.debug("Unable to remove app parent node {} as it has children.",
+              parentZnode);
         }
       }
     }
@@ -851,19 +836,16 @@ public class ZKRMStateStore extends RMStateStore {
       ApplicationStateData appStateDataPB) throws Exception {
     String nodeCreatePath = getLeafAppIdNodePath(appId.toString(), true);
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Storing info for app: " + appId + " at: " + nodeCreatePath);
-    }
+    LOG.debug("Storing info for app: {} at: {}", appId, nodeCreatePath);
 
     byte[] appStateData = appStateDataPB.getProto().toByteArray();
     if (appStateData.length <= zknodeLimit) {
       zkManager.safeCreate(nodeCreatePath, appStateData, zkAcl,
           CreateMode.PERSISTENT, zkAcl, fencingNodePath);
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Application state data size for " + appId + " is "
-            + appStateData.length);
-      }
+      LOG.debug("Application state data size for {} is {}",
+          appId, appStateData.length);
+
       throw new StoreLimitException("Application " + appId
           + " exceeds the maximum allowed size for application data. "
           + "See yarn.resourcemanager.zk-max-znode-size.bytes.");
@@ -896,10 +878,8 @@ public class ZKRMStateStore extends RMStateStore {
       }
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Storing final state info for app: " + appId + " at: "
-          + nodeUpdatePath);
-    }
+    LOG.debug("Storing final state info for app: {} at: {}", appId,
+        nodeUpdatePath);
 
     byte[] appStateData = appStateDataPB.getProto().toByteArray();
 
@@ -909,10 +889,8 @@ public class ZKRMStateStore extends RMStateStore {
     } else {
       zkManager.safeCreate(nodeUpdatePath, appStateData, zkAcl,
           CreateMode.PERSISTENT, zkAcl, fencingNodePath);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Path " + nodeUpdatePath + " for " + appId + " didn't " +
-            "exist. Creating a new znode to update the application state.");
-      }
+      LOG.debug("Path {} for {} didn't exist. Creating a new znode to update"
+          + " the application state.", nodeUpdatePath, appId);
     }
   }
 
@@ -944,10 +922,8 @@ public class ZKRMStateStore extends RMStateStore {
     String path = getNodePath(appDirPath, appAttemptId.toString());
     byte[] attemptStateData = (attemptStateDataPB == null) ? null :
         attemptStateDataPB.getProto().toByteArray();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(operation + " info for attempt: " + appAttemptId + " at: "
-          + path);
-    }
+    LOG.debug("{} info for attempt: {} at: {}", operation, appAttemptId, path);
+
     switch (operation) {
     case UPDATE:
       if (exists(path)) {
@@ -956,10 +932,9 @@ public class ZKRMStateStore extends RMStateStore {
       } else {
         zkManager.safeCreate(path, attemptStateData, zkAcl,
             CreateMode.PERSISTENT, zkAcl, fencingNodePath);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Path " + path + " for " + appAttemptId + " didn't exist." +
-              " Created a new znode to update the application attempt state.");
-        }
+        LOG.debug("Path {} for {} didn't exist. Created a new znode to update"
+            + " the application attempt state.", path, appAttemptId);
+
       }
       break;
     case STORE:
@@ -1037,10 +1012,9 @@ public class ZKRMStateStore extends RMStateStore {
       }
     }
     if (safeRemove) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Removing info for app: " + removeAppId + " at: " +
-            appIdRemovePath + " and its attempts.");
-      }
+      LOG.debug("Removing info for app: {} at: {} and its attempts.",
+          removeAppId, appIdRemovePath);
+
       if (attempts != null) {
         for (ApplicationAttemptId attemptId : attempts) {
           String attemptRemovePath =
@@ -1064,10 +1038,8 @@ public class ZKRMStateStore extends RMStateStore {
       throws Exception {
     String nodeCreatePath = getLeafDelegationTokenNodePath(
         rmDTIdentifier.getSequenceNumber(), true);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Storing " + DELEGATION_TOKEN_PREFIX
-          + rmDTIdentifier.getSequenceNumber());
-    }
+    LOG.debug("Storing {}{}", DELEGATION_TOKEN_PREFIX,
+        rmDTIdentifier.getSequenceNumber());
 
     RMDelegationTokenIdentifierData identifierData =
         new RMDelegationTokenIdentifierData(rmDTIdentifier, renewDate);
@@ -1080,10 +1052,8 @@ public class ZKRMStateStore extends RMStateStore {
       // Update Sequence number only while storing DT
       seqOut.writeInt(rmDTIdentifier.getSequenceNumber());
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing " + dtSequenceNumberPath + ". SequenceNumber: "
-            + rmDTIdentifier.getSequenceNumber());
-      }
+      LOG.debug("Storing {}. SequenceNumber: {}", dtSequenceNumberPath,
+          rmDTIdentifier.getSequenceNumber());
 
       trx.setData(dtSequenceNumberPath, seqOs.toByteArray(), -1);
       trx.commit();
@@ -1110,10 +1080,8 @@ public class ZKRMStateStore extends RMStateStore {
       }
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Removing RMDelegationToken_"
-          + rmDTIdentifier.getSequenceNumber());
-    }
+    LOG.debug("Removing RMDelegationToken_{}",
+        rmDTIdentifier.getSequenceNumber());
 
     zkManager.safeDelete(nodeRemovePath, zkAcl, fencingNodePath);
 
@@ -1141,10 +1109,9 @@ public class ZKRMStateStore extends RMStateStore {
     }
 
     if (pathExists) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Updating " + DELEGATION_TOKEN_PREFIX
-            + rmDTIdentifier.getSequenceNumber());
-      }
+      LOG.debug("Updating {}{}", DELEGATION_TOKEN_PREFIX,
+          rmDTIdentifier.getSequenceNumber());
+
       RMDelegationTokenIdentifierData identifierData =
           new RMDelegationTokenIdentifierData(rmDTIdentifier, renewDate);
       zkManager.safeSetData(nodeUpdatePath, identifierData.toByteArray(), -1,
@@ -1159,9 +1126,7 @@ public class ZKRMStateStore extends RMStateStore {
       DelegationKey delegationKey) throws Exception {
     String nodeCreatePath = getNodePath(dtMasterKeysRootPath,
         DELEGATION_KEY_PREFIX + delegationKey.getKeyId());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Storing RMDelegationKey_" + delegationKey.getKeyId());
-    }
+    LOG.debug("Storing RMDelegationKey_{}", delegationKey.getKeyId());
     ByteArrayOutputStream os = new ByteArrayOutputStream();
     try(DataOutputStream fsOut = new DataOutputStream(os)) {
       delegationKey.write(fsOut);
@@ -1177,9 +1142,7 @@ public class ZKRMStateStore extends RMStateStore {
         getNodePath(dtMasterKeysRootPath, DELEGATION_KEY_PREFIX
             + delegationKey.getKeyId());
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Removing RMDelegationKey_" + delegationKey.getKeyId());
-    }
+    LOG.debug("Removing RMDelegationKey_{}", delegationKey.getKeyId());
 
     zkManager.safeDelete(nodeRemovePath, zkAcl, fencingNodePath);
   }
@@ -1218,10 +1181,8 @@ public class ZKRMStateStore extends RMStateStore {
     String planNodePath = getNodePath(reservationRoot, planName);
     String reservationPath = getNodePath(planNodePath, reservationIdName);
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Removing reservationallocation " + reservationIdName
-          + " for" + " plan " + planName);
-    }
+    LOG.debug("Removing reservationallocation {} for plan {}",
+        reservationIdName, planName);
 
     zkManager.safeDelete(reservationPath, zkAcl, fencingNodePath);
 
@@ -1253,24 +1214,18 @@ public class ZKRMStateStore extends RMStateStore {
     byte[] reservationData = reservationAllocation.toByteArray();
 
     if (!exists(planCreatePath)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Creating plan node: " + planName + " at: " + planCreatePath);
-      }
+      LOG.debug("Creating plan node: {} at: {}", planName, planCreatePath);
 
       trx.create(planCreatePath, null, zkAcl, CreateMode.PERSISTENT);
     }
 
     if (isUpdate) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Updating reservation: " + reservationIdName + " in plan:"
-            + planName + " at: " + reservationPath);
-      }
+      LOG.debug("Updating reservation: {} in plan:{} at: {}",
+          reservationIdName, planName, reservationPath);
       trx.setData(reservationPath, reservationData, -1);
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing reservation: " + reservationIdName + " in plan:"
-            + planName + " at: " + reservationPath);
-      }
+      LOG.debug("Storing reservation: {} in plan:{} at: {}",
+          reservationIdName, planName, reservationPath);
       trx.create(reservationPath, reservationData, zkAcl,
           CreateMode.PERSISTENT);
     }
@@ -1350,10 +1305,8 @@ public class ZKRMStateStore extends RMStateStore {
         zkManager.safeCreate(rootNodePath, null, zkAcl, CreateMode.PERSISTENT,
             zkAcl, fencingNodePath);
       } catch (KeeperException.NodeExistsException e) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Unable to create app parent node " + rootNodePath +
-              " as it already exists.");
-        }
+        LOG.debug("Unable to create app parent node {} as it already exists.",
+            rootNodePath);
       }
     }
     return getNodePath(rootNodePath, nodeName.substring(split));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
index 11811f1..3f35096 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
@@ -73,9 +73,7 @@ public abstract class AbstractSchedulerPlanFollower implements PlanFollower {
   @Override
   public synchronized void synchronizePlan(Plan plan, boolean shouldReplan) {
     String planQueueName = plan.getQueueName();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Running plan follower edit policy for plan: " + planQueueName);
-    }
+    LOG.debug("Running plan follower edit policy for plan: {}", planQueueName);
     // align with plan step
     long step = plan.getStep();
     long now = clock.getTime();
@@ -171,11 +169,9 @@ public abstract class AbstractSchedulerPlanFollower implements PlanFollower {
               calculateReservationToPlanRatio(plan.getResourceCalculator(),
                   clusterResources, planResources, capToAssign);
         }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
+        LOG.debug(
               "Assigning capacity of {} to queue {} with target capacity {}",
               capToAssign, currResId, targetCapacity);
-        }
         // set maxCapacity to 100% unless the job requires gang, in which
         // case we stick to capacity (as running early/before is likely a
         // waste of resources)
@@ -195,12 +191,10 @@ public abstract class AbstractSchedulerPlanFollower implements PlanFollower {
     }
     // compute the default queue capacity
     float defQCap = 1.0f - totalAssignedCapacity;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(
+    LOG.debug(
           "PlanFollowerEditPolicyTask: total Plan Capacity: {} "
               + "currReservation: {} default-queue capacity: {}",
           planResources, numRes, defQCap);
-    }
     // set the default queue to eat-up all remaining capacity
     try {
       setQueueEntitlement(planQueueName, defReservationQueue, defQCap, 1.0f);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/DynamicResourceConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/DynamicResourceConfiguration.java
index b6eeb4c..3869a00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/DynamicResourceConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/DynamicResourceConfiguration.java
@@ -87,8 +87,9 @@ public class DynamicResourceConfiguration extends Configuration {
 
   public void setVcoresPerNode(String node, int vcores) {
     setInt(getNodePrefix(node) + VCORES, vcores);
-    LOG.debug("DRConf - setVcoresPerNode: nodePrefix=" + getNodePrefix(node) +
-      ", vcores=" + vcores);
+    LOG.debug("DRConf - setVcoresPerNode: nodePrefix={}, vcores={}",
+        getNodePrefix(node), vcores);
+
   }
 
   public int getMemoryPerNode(String node) {
@@ -100,8 +101,9 @@ public class DynamicResourceConfiguration extends Configuration {
 
   public void setMemoryPerNode(String node, int memory) {
     setInt(getNodePrefix(node) + MEMORY, memory);
-    LOG.debug("DRConf - setMemoryPerNode: nodePrefix=" + getNodePrefix(node) +
-      ", memory=" + memory);
+    LOG.debug("DRConf - setMemoryPerNode: nodePrefix={}, memory={}",
+        getNodePrefix(node), memory);
+
   }
 
   public int getOverCommitTimeoutPerNode(String node) {
@@ -113,9 +115,8 @@ public class DynamicResourceConfiguration extends Configuration {
 
   public void setOverCommitTimeoutPerNode(String node, int overCommitTimeout) {
     setInt(getNodePrefix(node) + OVERCOMMIT_TIMEOUT, overCommitTimeout);
-    LOG.debug("DRConf - setOverCommitTimeoutPerNode: nodePrefix=" +
-      getNodePrefix(node) +
-        ", overCommitTimeout=" + overCommitTimeout);
+    LOG.debug("DRConf - setOverCommitTimeoutPerNode: nodePrefix={},"
+        + " overCommitTimeout={}", getNodePrefix(node), overCommitTimeout);
   }
 
   public String[] getNodes() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 9a16b77..ce67f2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -908,8 +908,9 @@ public class RMAppImpl implements RMApp, Recoverable {
 
     try {
       ApplicationId appID = event.getApplicationId();
-      LOG.debug("Processing event for " + appID + " of type "
-          + event.getType());
+      LOG.debug("Processing event for {} of type {}",
+          appID, event.getType());
+
       final RMAppState oldState = getState();
       try {
         /* keep the master in sync with the state machine */
@@ -1025,8 +1026,8 @@ public class RMAppImpl implements RMApp, Recoverable {
   private void processNodeUpdate(RMAppNodeUpdateType type, RMNode node) {
     NodeState nodeState = node.getState();
     updatedNodes.put(node, RMAppNodeUpdateType.convertToNodeUpdateType(type));
-    LOG.debug("Received node update event:" + type + " for node:" + node
-        + " with state:" + nodeState);
+    LOG.debug("Received node update event:{} for node:{} with state:",
+        type, node, nodeState);
   }
 
   private static class RMAppTransition implements
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index e951bb0..73c0b6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -541,10 +541,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
 
     final int diagnosticsLimitKC = getDiagnosticsLimitKCOrThrow(conf);
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(YarnConfiguration.APP_ATTEMPT_DIAGNOSTICS_LIMIT_KC + " : " +
-              diagnosticsLimitKC);
-    }
+    LOG.debug("{} : {}", YarnConfiguration.APP_ATTEMPT_DIAGNOSTICS_LIMIT_KC,
+        diagnosticsLimitKC);
 
     this.diagnostics = new BoundedAppender(diagnosticsLimitKC * 1024);
     this.rmApp = rmApp;
@@ -908,8 +906,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
 
     try {
       ApplicationAttemptId appAttemptID = event.getApplicationAttemptId();
-      LOG.debug("Processing event for " + appAttemptID + " of type "
-          + event.getType());
+      LOG.debug("Processing event for {} of type {}",
+          appAttemptID, event.getType());
       final RMAppAttemptState oldState = getAppAttemptState();
       try {
         /* keep the master in sync with the state machine */
@@ -1104,18 +1102,15 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
         int numNodes =
             RMServerUtils.getApplicableNodeCountForAM(appAttempt.rmContext,
                 appAttempt.conf, appAttempt.amReqs);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Setting node count for blacklist to " + numNodes);
-        }
+        LOG.debug("Setting node count for blacklist to {}", numNodes);
         appAttempt.getAMBlacklistManager().refreshNodeHostCount(numNodes);
 
         ResourceBlacklistRequest amBlacklist =
             appAttempt.getAMBlacklistManager().getBlacklistUpdates();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Using blacklist for AM: additions(" +
-              amBlacklist.getBlacklistAdditions() + ") and removals(" +
-              amBlacklist.getBlacklistRemovals() + ")");
-        }
+
+        LOG.debug("Using blacklist for AM: additions({}) and removals({})",
+            amBlacklist.getBlacklistAdditions(),
+            amBlacklist.getBlacklistRemovals());
 
         QueueInfo queueInfo = null;
         for (ResourceRequest amReq : appAttempt.amReqs) {
@@ -1148,10 +1143,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
 
             String labelExp = RMNodeLabelsManager.NO_LABEL;
             if (queueInfo != null) {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Setting default node label expression : " + queueInfo
-                    .getDefaultNodeLabelExpression());
-              }
+              LOG.debug("Setting default node label expression : {}",
+                  queueInfo.getDefaultNodeLabelExpression());
               labelExp = queueInfo.getDefaultNodeLabelExpression();
             }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index a251dcd..1185170 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -467,10 +467,8 @@ public class RMContainerImpl implements RMContainer {
   
   @Override
   public void handle(RMContainerEvent event) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Processing " + event.getContainerId() + " of type " + event
-              .getType());
-    }
+    LOG.debug("Processing {} of type {}", event.getContainerId(),
+        event.getType());
 
     writeLock.lock();
     try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 963b86d..37f3a37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -669,7 +669,7 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
   }
 
   public void handle(RMNodeEvent event) {
-    LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType());
+    LOG.debug("Processing {} of type {}", event.getNodeId(), event.getType());
     writeLock.lock();
     try {
       NodeState oldState = getState();
@@ -1405,11 +1405,8 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
             + " no further processing");
         continue;
       } else if (!runningApplications.contains(containerAppId)) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Container " + containerId
-              + " is the first container get launched for application "
-              + containerAppId);
-        }
+        LOG.debug("Container {} is the first container get launched for"
+            + " application {}", containerId, containerAppId);
         handleRunningAppOnNode(this, context, containerAppId, nodeId);
       }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 7e22fd5..92dde94 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -405,9 +405,7 @@ public abstract class AbstractYarnScheduler
       ApplicationAttemptId appAttemptId) {
     SchedulerApplicationAttempt attempt = getApplicationAttempt(appAttemptId);
     if (attempt == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Request for appInfo of unknown attempt " + appAttemptId);
-      }
+      LOG.debug("Request for appInfo of unknown attempt {}", appAttemptId);
       return null;
     }
     return new SchedulerAppReport(attempt);
@@ -418,9 +416,7 @@ public abstract class AbstractYarnScheduler
       ApplicationAttemptId appAttemptId) {
     SchedulerApplicationAttempt attempt = getApplicationAttempt(appAttemptId);
     if (attempt == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Request for appInfo of unknown attempt " + appAttemptId);
-      }
+      LOG.debug("Request for appInfo of unknown attempt {}", appAttemptId);
       return null;
     }
     return attempt.getResourceUsageReport();
@@ -687,10 +683,9 @@ public abstract class AbstractYarnScheduler
       if (schedulerAttempt != null) {
         schedulerAttempt.removeRMContainer(containerId);
       }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Completed container: " + rmContainer.getContainerId() +
-            " in state: " + rmContainer.getState() + " event:" + event);
-      }
+      LOG.debug("Completed container: {} in state: {} event:{}",
+          rmContainer.getContainerId(), rmContainer.getState(), event);
+
       SchedulerNode node = getSchedulerNode(rmContainer.getNodeId());
       if (node != null) {
         node.releaseContainer(rmContainer.getContainerId(), false);
@@ -1082,7 +1077,7 @@ public abstract class AbstractYarnScheduler
     List<ContainerId> untrackedContainerIdList = new ArrayList<ContainerId>();
     for (ContainerStatus completedContainer : completedContainers) {
       ContainerId containerId = completedContainer.getContainerId();
-      LOG.debug("Container FINISHED: " + containerId);
+      LOG.debug("Container FINISHED: {}", containerId);
       RMContainer container = getRMContainer(containerId);
       completedContainer(container,
           completedContainer, RMContainerEventType.FINISHED);
@@ -1147,10 +1142,8 @@ public abstract class AbstractYarnScheduler
    * @param nm The RMNode corresponding to the NodeManager
    */
   protected void nodeUpdate(RMNode nm) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("nodeUpdate: " + nm +
-          " cluster capacity: " + getClusterResource());
-    }
+    LOG.debug("nodeUpdate: {} cluster capacity: {}",
+        nm, getClusterResource());
 
     // Process new container information
     // NOTICE: it is possible to not find the NodeID as a node can be
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ActiveUsersManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ActiveUsersManager.java
index 15df051..63c9214 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ActiveUsersManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ActiveUsersManager.java
@@ -67,10 +67,8 @@ public class ActiveUsersManager implements AbstractUsersManager {
       usersApplications.put(user, userApps);
       ++activeUsers;
       metrics.incrActiveUsers();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("User " + user + " added to activeUsers, currently: "
-            + activeUsers);
-      }
+      LOG.debug("User {} added to activeUsers, currently: {}", user,
+          activeUsers);
     }
     if (userApps.add(applicationId)) {
       metrics.activateApp(user);
@@ -96,10 +94,8 @@ public class ActiveUsersManager implements AbstractUsersManager {
         usersApplications.remove(user);
         --activeUsers;
         metrics.decrActiveUsers();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("User " + user + " removed from activeUsers, currently: "
-              + activeUsers);
-        }
+        LOG.debug("User {} removed from activeUsers, currently: {}", user,
+            activeUsers);
       }
     }
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index bd6e7ca..943f311 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -719,13 +719,10 @@ public class AppSchedulingInfo {
   public static void updateMetrics(ApplicationId applicationId, NodeType type,
       SchedulerNode node, Container containerAllocated, String user,
       Queue queue) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("allocate: applicationId=" + applicationId + " container="
-          + containerAllocated.getId() + " host=" + containerAllocated
-          .getNodeId().toString() + " user=" + user + " resource="
-          + containerAllocated.getResource() + " type="
-          + type);
-    }
+    LOG.debug("allocate: applicationId={} container={} host={} user={}"
+        + " resource={} type={}", applicationId, containerAllocated.getId(),
+        containerAllocated.getNodeId(), user, containerAllocated.getResource(),
+        type);
     if(node != null) {
       queue.getMetrics().allocateResources(node.getPartition(), user, 1,
           containerAllocated.getResource(), true);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerAppUtils.java
index efa410c..7f88369 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerAppUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerAppUtils.java
@@ -26,20 +26,14 @@ public class SchedulerAppUtils {
       SchedulerApplicationAttempt application, SchedulerNode node,
       Logger log) {
     if (application.isPlaceBlacklisted(node.getNodeName())) {
-      if (log.isDebugEnabled()) {
-        log.debug("Skipping 'host' " + node.getNodeName() +
-            " for " + application.getApplicationId() +
-            " since it has been blacklisted");
-      }
+      log.debug("Skipping 'host' {} for {} since it has been blacklisted",
+          node.getNodeName(), application.getApplicationId());
       return true;
     }
 
     if (application.isPlaceBlacklisted(node.getRackName())) {
-      if (log.isDebugEnabled()) {
-        log.debug("Skipping 'rack' " + node.getRackName() +
-            " for " + application.getApplicationId() +
-            " since it has been blacklisted");
-      }
+      log.debug("Skipping 'rack' {} for {} since it has been blacklisted",
+          node.getRackName(), application.getApplicationId());
       return true;
     }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index 73aa2c4..74ac3ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -240,10 +240,8 @@ public class SchedulerUtils {
     // default label expression of queue
     if (labelExp == null && queueInfo != null && ResourceRequest.ANY
         .equals(resReq.getResourceName())) {
-      if ( LOG.isDebugEnabled()) {
-        LOG.debug("Setting default node label expression : " + queueInfo
-            .getDefaultNodeLabelExpression());
-      }
+      LOG.debug("Setting default node label expression : {}", queueInfo
+          .getDefaultNodeLabelExpression());
       labelExp = queueInfo.getDefaultNodeLabelExpression();
     }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java
index f351119..1ce67d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java
@@ -93,7 +93,7 @@ public class AbstractAutoCreatedLeafQueue extends LeafQueue {
       // this might be revised later
       setMaxCapacity(nodeLabel, entitlement.getMaxCapacity());
       if (LOG.isDebugEnabled()) {
-        LOG.debug("successfully changed to " + capacity + " for queue " + this
+        LOG.debug("successfully changed to {} for queue {}", capacity, this
             .getQueueName());
       }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 358eade..21c385a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -502,19 +502,16 @@ public abstract class AbstractCSQueue implements CSQueue {
       Resource maxResource = conf.getMaximumResourceRequirement(label,
           queuePath, resourceTypes);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("capacityConfigType is '" + capacityConfigType
-            + "' for queue '" + getQueueName());
-      }
+      LOG.debug("capacityConfigType is '{}' for queue {}",
+          capacityConfigType, getQueueName());
+
       if (this.capacityConfigType.equals(CapacityConfigType.NONE)) {
         this.capacityConfigType = (!minResource.equals(Resources.none())
             && queueCapacities.getAbsoluteCapacity(label) == 0f)
                 ? CapacityConfigType.ABSOLUTE_RESOURCE
                 : CapacityConfigType.PERCENTAGE;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("capacityConfigType is updated as '" + capacityConfigType
-              + "' for queue '" + getQueueName());
-        }
+        LOG.debug("capacityConfigType is updated as '{}' for queue {}",
+            capacityConfigType, getQueueName());
       }
 
       validateAbsoluteVsPercentageCapacityConfig(minResource);
@@ -551,11 +548,9 @@ public abstract class AbstractCSQueue implements CSQueue {
         }
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Updating absolute resource configuration for queue:"
-            + getQueueName() + " as minResource=" + minResource
-            + " and maxResource=" + maxResource);
-      }
+      LOG.debug("Updating absolute resource configuration for queue:{} as"
+          + " minResource={} and maxResource={}", getQueueName(), minResource,
+          maxResource);
 
       queueResourceQuotas.setConfiguredMinResource(label, minResource);
       queueResourceQuotas.setConfiguredMaxResource(label, maxResource);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java
index eb3221e..3e90863 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractManagedParentQueue.java
@@ -105,9 +105,7 @@ public abstract class AbstractManagedParentQueue extends ParentQueue {
         CSQueue cs = qiter.next();
         if (cs.equals(childQueue)) {
           qiter.remove();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Removed child queue: {}" + cs.getQueueName());
-          }
+          LOG.debug("Removed child queue: {}", cs.getQueueName());
         }
       }
     } finally {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 7b9a13c..49f1954 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -862,10 +862,8 @@ public class CapacityScheduler extends
       applications.put(applicationId, application);
       LOG.info("Accepted application " + applicationId + " from user: " + user
           + ", in queue: " + queueName);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            applicationId + " is recovering. Skip notifying APP_ACCEPTED");
-      }
+      LOG.debug(
+          applicationId + " is recovering. Skip notifying APP_ACCEPTED");
     } finally {
       writeLock.unlock();
     }
@@ -1254,10 +1252,8 @@ public class CapacityScheduler extends
       updateDemandForQueue.getOrderingPolicy().demandUpdated(application);
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.info("Allocation for application " + applicationAttemptId + " : "
-          + allocation + " with cluster resource : " + getClusterResource());
-    }
+    LOG.debug("Allocation for application {} : {} with cluster resource : {}",
+        applicationAttemptId, allocation, getClusterResource());
     return allocation;
   }
 
@@ -1508,10 +1504,8 @@ public class CapacityScheduler extends
         }
 
         if (offswitchCount >= offswitchPerHeartbeatLimit) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Assigned maximum number of off-switch containers: "
-                + offswitchCount + ", assignments so far: " + assignment);
-          }
+          LOG.debug("Assigned maximum number of off-switch containers: {},"
+              + " assignments so far: {}", offswitchCount, assignment);
         }
       }
     }
@@ -1523,11 +1517,8 @@ public class CapacityScheduler extends
   private CSAssignment allocateContainerOnSingleNode(
       CandidateNodeSet<FiCaSchedulerNode> candidates, FiCaSchedulerNode node,
       boolean withNodeHeartbeat) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(
-          "Trying to schedule on node: " + node.getNodeName() + ", available: "
-              + node.getUnallocatedResource());
-    }
+    LOG.debug("Trying to schedule on node: {}, available: {}",
+        node.getNodeName(), node.getUnallocatedResource());
 
     // Backward compatible way to make sure previous behavior which allocation
     // driven by node heartbeat works.
@@ -1555,11 +1546,8 @@ public class CapacityScheduler extends
       }
 
       // Try to fulfill the reservation
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Trying to fulfill reservation for application "
-            + reservedApplication.getApplicationId() + " on node: " + node
-            .getNodeID());
-      }
+      LOG.debug("Trying to fulfill reservation for application {} on node: {}",
+          reservedApplication.getApplicationId(), node.getNodeID());
 
       LeafQueue queue = ((LeafQueue) reservedApplication.getQueue());
       assignment = queue.assignContainers(getClusterResource(), candidates,
@@ -1602,11 +1590,9 @@ public class CapacityScheduler extends
 
     // Do not schedule if there are any reservations to fulfill on the node
     if (node.getReservedContainer() != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Skipping scheduling since node " + node.getNodeID()
-            + " is reserved by application " + node.getReservedContainer()
-            .getContainerId().getApplicationAttemptId());
-      }
+      LOG.debug("Skipping scheduling since node {} is reserved by"
+          + " application {}", node.getNodeID(), node.getReservedContainer().
+          getContainerId().getApplicationAttemptId());
       return null;
     }
 
@@ -1616,10 +1602,8 @@ public class CapacityScheduler extends
     if (calculator.computeAvailableContainers(Resources
             .add(node.getUnallocatedResource(), node.getTotalKillableResources()),
         minimumAllocation) <= 0) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("This node or node partition doesn't have available or" +
-            " preemptible resource");
-      }
+      LOG.debug("This node or node partition doesn't have available or" +
+          " preemptible resource");
       return null;
     }
 
@@ -1697,10 +1681,8 @@ public class CapacityScheduler extends
         && preemptionManager.getKillableResource(
         CapacitySchedulerConfiguration.ROOT, candidates.getPartition())
         == Resources.none()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("This node or this node partition doesn't have available or"
-            + "killable resource");
-      }
+      LOG.debug("This node or this node partition doesn't have available or"
+          + "killable resource");
       return null;
     }
 
@@ -2128,10 +2110,8 @@ public class CapacityScheduler extends
 
   @Override
   public void killReservedContainer(RMContainer container) {
-    if(LOG.isDebugEnabled()){
-      LOG.debug(SchedulerEventType.KILL_RESERVED_CONTAINER + ":"
-          + container.toString());
-    }
+    LOG.debug("{}:{}", SchedulerEventType.KILL_RESERVED_CONTAINER, container);
+
     // To think: What happens if this is no longer a reserved container, for
     // e.g if the reservation became an allocation.
     super.completedContainer(container,
@@ -2144,11 +2124,8 @@ public class CapacityScheduler extends
   @Override
   public void markContainerForPreemption(ApplicationAttemptId aid,
       RMContainer cont) {
-    if(LOG.isDebugEnabled()){
-      LOG.debug(SchedulerEventType.MARK_CONTAINER_FOR_PREEMPTION
-            + ": appAttempt:" + aid.toString() + " container: "
-            + cont.toString());
-    }
+    LOG.debug("{}: appAttempt:{} container:{}",
+        SchedulerEventType.MARK_CONTAINER_FOR_PREEMPTION, aid, cont);
     FiCaSchedulerApp app = getApplicationAttempt(aid);
     if (app != null) {
       app.markContainerForPreemption(cont.getContainerId());
@@ -2165,10 +2142,8 @@ public class CapacityScheduler extends
       RMContainer killableContainer) {
     writeLock.lock();
     try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(SchedulerEventType.MARK_CONTAINER_FOR_KILLABLE + ": container"
-            + killableContainer.toString());
-      }
+      LOG.debug("{}: container {}",
+          SchedulerEventType.MARK_CONTAINER_FOR_KILLABLE, killableContainer);
 
       if (!isLazyPreemptionEnabled) {
         super.completedContainer(killableContainer, SchedulerUtils
@@ -2201,11 +2176,8 @@ public class CapacityScheduler extends
       RMContainer nonKillableContainer) {
     writeLock.lock();
     try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            SchedulerEventType.MARK_CONTAINER_FOR_NONKILLABLE + ": container"
-                + nonKillableContainer.toString());
-      }
+      LOG.debug("{}: container {}", SchedulerEventType.
+          MARK_CONTAINER_FOR_NONKILLABLE, nonKillableContainer);
 
       FiCaSchedulerNode node = getSchedulerNode(
           nonKillableContainer.getAllocatedNode());
@@ -2233,10 +2205,8 @@ public class CapacityScheduler extends
       QueueACL acl, String queueName) {
     CSQueue queue = getQueue(queueName);
     if (queue == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("ACL not found for queue access-type " + acl + " for queue "
-            + queueName);
-      }
+      LOG.debug("ACL not found for queue access-type {} for queue {}",
+          acl, queueName);
       return false;
     }
     return queue.hasAccess(acl, callerUGI);
@@ -3001,9 +2971,7 @@ public class CapacityScheduler extends
       }
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Try to commit allocation proposal=" + request);
-    }
+    LOG.debug("Try to commit allocation proposal={}", request);
 
     boolean isSuccess = false;
     if (attemptId != null) {
@@ -3026,10 +2994,8 @@ public class CapacityScheduler extends
           LOG.info("Failed to accept allocation proposal");
         }
 
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Allocation proposal accepted=" + isSuccess + ", proposal="
-              + request);
-        }
+        LOG.debug("Allocation proposal accepted={}, proposal={}", isSuccess,
+            request);
 
         // Update unconfirmed allocated resource.
         if (updateUnconfirmedAllocatedResource) {
@@ -3066,46 +3032,35 @@ public class CapacityScheduler extends
       FiCaSchedulerNode targetNode) {
     writeLock.lock();
     try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Trying to move container=" + toBeMovedContainer + " to node="
-            + targetNode.getNodeID());
-      }
+      LOG.debug("Trying to move container={} to node={}",
+          toBeMovedContainer, targetNode.getNodeID());
 
       FiCaSchedulerNode sourceNode = getNode(toBeMovedContainer.getNodeId());
       if (null == sourceNode) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Failed to move reservation, cannot find source node="
-              + toBeMovedContainer.getNodeId());
-        }
+        LOG.debug("Failed to move reservation, cannot find source node={}",
+            toBeMovedContainer.getNodeId());
         return false;
       }
 
       // Target node updated?
       if (getNode(targetNode.getNodeID()) != targetNode) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "Failed to move reservation, node updated or removed, moving "
-                  + "cancelled.");
-        }
+        LOG.debug("Failed to move reservation, node updated or removed,"
+            + " moving cancelled.");
         return false;
       }
 
       // Target node's reservation status changed?
       if (targetNode.getReservedContainer() != null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "Target node's reservation status changed, moving cancelled.");
-        }
+        LOG.debug("Target node's reservation status changed,"
+            + " moving cancelled.");
         return false;
       }
 
       FiCaSchedulerApp app = getApplicationAttempt(
           toBeMovedContainer.getApplicationAttemptId());
       if (null == app) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Cannot find to-be-moved container's application="
-              + toBeMovedContainer.getApplicationAttemptId());
-        }
+        LOG.debug("Cannot find to-be-moved container's application={}",
+            toBeMovedContainer.getApplicationAttemptId());
         return false;
       }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 589a9ce..a88beef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -462,8 +462,9 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
       throw new IllegalArgumentException(
           "Illegal " + "capacity of " + capacity + " for queue " + queue);
     }
-    LOG.debug("CSConf - getCapacity: queuePrefix=" + getQueuePrefix(queue)
-        + ", capacity=" + capacity);
+    LOG.debug("CSConf - getCapacity: queuePrefix={}, capacity={}",
+        getQueuePrefix(queue), capacity);
+
     return capacity;
   }
   
@@ -473,8 +474,9 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
           "Cannot set capacity, root queue has a fixed capacity of 100.0f");
     }
     setFloat(getQueuePrefix(queue) + CAPACITY, capacity);
-    LOG.debug("CSConf - setCapacity: queuePrefix=" + getQueuePrefix(queue) + 
-        ", capacity=" + capacity);
+    LOG.debug("CSConf - setCapacity: queuePrefix={}, capacity={}",
+        getQueuePrefix(queue), capacity);
+
   }
 
   @VisibleForTesting
@@ -484,8 +486,9 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
           "Cannot set capacity, root queue has a fixed capacity");
     }
     set(getQueuePrefix(queue) + CAPACITY, absoluteResourceCapacity);
-    LOG.debug("CSConf - setCapacity: queuePrefix=" + getQueuePrefix(queue)
-        + ", capacity=" + absoluteResourceCapacity);
+    LOG.debug("CSConf - setCapacity: queuePrefix={}, capacity={}",
+        getQueuePrefix(queue), absoluteResourceCapacity);
+
   }
 
   public float getNonLabeledQueueMaximumCapacity(String queue) {
@@ -515,8 +518,8 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
           "maximum-capacity of " + maxCapacity + " for queue " + queue);
     }
     setFloat(getQueuePrefix(queue) + MAXIMUM_CAPACITY, maxCapacity);
-    LOG.debug("CSConf - setMaxCapacity: queuePrefix=" + getQueuePrefix(queue) + 
-        ", maxCapacity=" + maxCapacity);
+    LOG.debug("CSConf - setMaxCapacity: queuePrefix={}, maxCapacity={}",
+        getQueuePrefix(queue), maxCapacity);
   }
   
   public void setCapacityByLabel(String queue, String label, float capacity) {
@@ -579,8 +582,8 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
 
   public void setUserLimit(String queue, int userLimit) {
     setInt(getQueuePrefix(queue) + USER_LIMIT, userLimit);
-    LOG.debug("here setUserLimit: queuePrefix=" + getQueuePrefix(queue) + 
-        ", userLimit=" + getUserLimit(queue));
+    LOG.debug("here setUserLimit: queuePrefix={}, userLimit={}",
+        getQueuePrefix(queue), getUserLimit(queue));
   }
   
   public float getUserLimitFactor(String queue) {
@@ -846,7 +849,8 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   }
 
   public String[] getQueues(String queue) {
-    LOG.debug("CSConf - getQueues called for: queuePrefix=" + getQueuePrefix(queue));
+    LOG.debug("CSConf - getQueues called for: queuePrefix={}",
+        getQueuePrefix(queue));
     String[] queues = getStrings(getQueuePrefix(queue) + QUEUES);
     List<String> trimmedQueueNames = new ArrayList<String>();
     if (null != queues) {
@@ -855,16 +859,18 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
       }
       queues = trimmedQueueNames.toArray(new String[0]);
     }
- 
-    LOG.debug("CSConf - getQueues: queuePrefix=" + getQueuePrefix(queue) + 
-        ", queues=" + ((queues == null) ? "" : StringUtils.arrayToString(queues)));
+
+    LOG.debug("CSConf - getQueues: queuePrefix={}, queues={}",
+        getQueuePrefix(queue),
+        ((queues == null) ? "" : StringUtils.arrayToString(queues)));
+
     return queues;
   }
   
   public void setQueues(String queue, String[] subQueues) {
     set(getQueuePrefix(queue) + QUEUES, StringUtils.arrayToString(subQueues));
-    LOG.debug("CSConf - setQueues: qPrefix=" + getQueuePrefix(queue) + 
-        ", queues=" + StringUtils.arrayToString(subQueues));
+    LOG.debug("CSConf - setQueues: qPrefix={}, queues={}",
+        getQueuePrefix(queue), StringUtils.arrayToString(subQueues));
   }
   
   public Resource getMinimumAllocation() {
@@ -1154,8 +1160,8 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
 
   public void setReservable(String queue, boolean isReservable) {
     setBoolean(getQueuePrefix(queue) + IS_RESERVABLE, isReservable);
-    LOG.debug("here setReservableQueue: queuePrefix=" + getQueuePrefix(queue)
-        + ", isReservableQueue=" + isReservable(queue));
+    LOG.debug("here setReservableQueue: queuePrefix={}, isReservableQueue={}",
+        getQueuePrefix(queue), isReservable(queue));
   }
 
   @Override
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 19a5647..3403544 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -152,10 +152,7 @@ public class LeafQueue extends AbstractCSQueue {
     // One time initialization is enough since it is static ordering policy
     this.pendingOrderingPolicy = new FifoOrderingPolicyForPendingApps();
 
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("LeafQueue:" + " name=" + queueName
-          + ", fullname=" + getQueuePath());
-    }
+    LOG.debug("LeafQueue: name={}, fullname={}", queueName, getQueuePath());
 
     setupQueueConfigs(cs.getClusterResource(), configuration);
 
@@ -727,11 +724,9 @@ public class LeafQueue extends AbstractCSQueue {
               Resources.clone(getAMResourceLimitPerPartition(nodePartition)));
       queueUsage.setUserAMLimit(nodePartition, preWeighteduserAMLimit);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Effective user AM limit for \"" + userName + "\":" +
-            preWeighteduserAMLimit + ". " + "Effective weighted user AM limit: "
-            + userAMLimit + ". User weight: " + userWeight);
-      }
+      LOG.debug("Effective user AM limit for \"{}\":{}. Effective weighted"
+          + " user AM limit: {}. User weight: {}", userName,
+          preWeighteduserAMLimit, userAMLimit, userWeight);
       return userAMLimit;
     } finally {
       readLock.unlock();
@@ -776,17 +771,11 @@ public class LeafQueue extends AbstractCSQueue {
 
       metrics.setAMResouceLimit(nodePartition, amResouceLimit);
       queueUsage.setAMLimit(nodePartition, amResouceLimit);
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Queue: " + getQueueName() + ", node label : " +
-            nodePartition
-            + ", queue "
-            + "partition "
-            + "resource : " + queuePartitionResource + ','
-            + " queue current limit : " + queueCurrentLimit + ","
-            + " queue partition usable resource : "
-            + queuePartitionUsableResource + ","
-            + " amResourceLimit : " + amResouceLimit);
-      }
+      LOG.debug("Queue: {}, node label : {}, queue partition resource : {},"
+          + " queue current limit : {}, queue partition usable resource : {},"
+          + " amResourceLimit : {}", getQueueName(), nodePartition,
+          queuePartitionResource, queueCurrentLimit,
+          queuePartitionUsableResource, amResouceLimit);
       return amResouceLimit;
     } finally {
       writeLock.unlock();
@@ -848,11 +837,8 @@ public class LeafQueue extends AbstractCSQueue {
           } else{
             application.updateAMContainerDiagnostics(AMState.INACTIVATED,
                 CSAMContainerLaunchDiagnosticsConstants.QUEUE_AM_RESOURCE_LIMIT_EXCEED);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Not activating application " + applicationId
-                  + " as  amIfStarted: " + amIfStarted + " exceeds amLimit: "
-                  + amLimit);
-            }
+            LOG.debug("Not activating application {} as  amIfStarted: {}"
+                + " exceeds amLimit: {}", applicationId, amIfStarted, amLimit);
             continue;
           }
         }
@@ -884,11 +870,9 @@ public class LeafQueue extends AbstractCSQueue {
           } else{
             application.updateAMContainerDiagnostics(AMState.INACTIVATED,
                 CSAMContainerLaunchDiagnosticsConstants.USER_AM_RESOURCE_LIMIT_EXCEED);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Not activating application " + applicationId
-                  + " for user: " + user + " as userAmIfStarted: "
-                  + userAmIfStarted + " exceeds userAmLimit: " + userAMLimit);
-            }
+            LOG.debug("Not activating application {} for user: {} as"
+                + " userAmIfStarted: {} exceeds userAmLimit: {}",
+                applicationId, user, userAmIfStarted, userAMLimit);
             continue;
           }
         }
@@ -1242,9 +1226,7 @@ public class LeafQueue extends AbstractCSQueue {
         // Deduct resources that we can release
         User user = getUser(username);
         if (user == null) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("User " + username + " has been removed!");
-          }
+          LOG.debug("User {} has been removed!", username);
           return false;
         }
         Resource usedResource = Resources.clone(user.getUsed(p));
@@ -1253,10 +1235,8 @@ public class LeafQueue extends AbstractCSQueue {
 
         if (Resources.greaterThan(resourceCalculator, cluster, usedResource,
             userLimit)) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Used resource=" + usedResource + " exceeded user-limit="
-                + userLimit);
-          }
+          LOG.debug("Used resource={} exceeded user-limit={}",
+              usedResource, userLimit);
           return false;
         }
       } finally {
@@ -1452,9 +1432,7 @@ public class LeafQueue extends AbstractCSQueue {
     String user = application.getUser();
     User queueUser = getUser(user);
     if (queueUser == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("User " + user + " has been removed!");
-      }
+      LOG.debug("User {} has been removed!", user);
       return Resources.none();
     }
 
@@ -1553,9 +1531,7 @@ public class LeafQueue extends AbstractCSQueue {
     try {
       User user = getUser(userName);
       if (user == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("User " + userName + " has been removed!");
-        }
+        LOG.debug("User {} has been removed!", userName);
         return false;
       }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index c9d0324..cb6fc28 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -857,10 +857,7 @@ public class ParentQueue extends AbstractCSQueue {
       super.releaseResource(clusterResource, releasedResource,
           node.getPartition());
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "completedContainer " + this + ", cluster=" + clusterResource);
-      }
+      LOG.debug("completedContainer {}, cluster={}", this, clusterResource);
 
     } finally {
       writeLock.unlock();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
index 470bb11..b0700e8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
@@ -522,12 +522,10 @@ public class UsersManager implements AbstractUsersManager {
       user.setUserResourceLimit(userSpecificUserLimit);
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("userLimit is fetched. userLimit=" + userLimitResource
-          + ", userSpecificUserLimit=" + userSpecificUserLimit
-          + ", schedulingMode=" + schedulingMode
-          + ", partition=" + nodePartition);
-    }
+    LOG.debug("userLimit is fetched. userLimit={}, userSpecificUserLimit={},"
+        + " schedulingMode={}, partition={}", userLimitResource,
+        userSpecificUserLimit, schedulingMode, nodePartition);
+
     return userSpecificUserLimit;
   }
 
@@ -576,12 +574,9 @@ public class UsersManager implements AbstractUsersManager {
         Resources.multiplyAndNormalizeDown(resourceCalculator,
             userLimitResource, weight, lQueue.getMinimumAllocation());
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("userLimit is fetched. userLimit=" + userLimitResource
-          + ", userSpecificUserLimit=" + userSpecificUserLimit
-          + ", schedulingMode=" + schedulingMode
-          + ", partition=" + nodePartition);
-    }
+    LOG.debug("userLimit is fetched. userLimit={}, userSpecificUserLimit={},"
+        + " schedulingMode={}, partition={}", userLimitResource,
+        userSpecificUserLimit, schedulingMode, nodePartition);
 
     return userSpecificUserLimit;
   }
@@ -870,10 +865,8 @@ public class UsersManager implements AbstractUsersManager {
         // A user is added to active list. Invalidate user-limit cache.
         userLimitNeedsRecompute();
         updateActiveUsersResourceUsage(user);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("User " + user + " added to activeUsers, currently: "
-              + activeUsers);
-        }
+        LOG.debug("User {} added to activeUsers, currently: {}",
+            user, activeUsers);
       }
       if (userApps.add(applicationId)) {
         metrics.activateApp(user);
@@ -901,10 +894,8 @@ public class UsersManager implements AbstractUsersManager {
           // A user is removed from active list. Invalidate user-limit cache.
           userLimitNeedsRecompute();
           updateNonActiveUsersResourceUsage(user);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("User " + user + " removed from activeUsers, currently: "
-                + activeUsers);
-          }
+          LOG.debug("User {} removed from activeUsers, currently: {}",
+              user, activeUsers);
         }
       }
     } finally {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
index 5bf2a0d..9d077b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
@@ -127,11 +127,9 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
     // avoid painful of preempt an AM container
     if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) {
       if (application.isWaitingForAMContainer()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Skip allocating AM container to app_attempt="
-              + application.getApplicationAttemptId()
-              + ", don't allow to allocate AM container in non-exclusive mode");
-        }
+        LOG.debug("Skip allocating AM container to app_attempt={},"
+            + " don't allow to allocate AM container in non-exclusive mode",
+            application.getApplicationAttemptId());
         application.updateAppSkipNodeDiagnostics(
             "Skipping assigning to Node in Ignore Exclusivity mode. ");
         ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(
@@ -153,9 +151,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
 
     if (!application.getCSLeafQueue().getReservationContinueLooking()) {
       if (!shouldAllocOrReserveNewContainer(schedulerKey, required)) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("doesn't need containers based on reservation algo!");
-        }
+        LOG.debug("doesn't need containers based on reservation algo!");
         ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(
             activitiesManager, node, application, priority,
             ActivityDiagnosticConstant.DO_NOT_NEED_ALLOCATIONATTEMPTINFOS);
@@ -165,10 +161,8 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
 
     if (!checkHeadroom(clusterResource, resourceLimits, required,
         node.getPartition())) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("cannot allocate required resource=" + required
-            + " because of headroom");
-      }
+      LOG.debug("cannot allocate required resource={} because of headroom",
+          required);
       ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(
           activitiesManager, node, application, priority,
           ActivityDiagnosticConstant.QUEUE_SKIPPED_HEADROOM);
@@ -621,9 +615,8 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
           // reservationsContinueLooking is set. Make sure we didn't need to
           // unreserve one.
           if (needToUnreserve) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("we needed to unreserve to be able to allocate");
-            }
+            LOG.debug("we needed to unreserve to be able to allocate");
+
             // Skip the locality request
             ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(
                 activitiesManager, node, application, priority,
@@ -781,9 +774,8 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
       // otherwise the app will be delayed for each non-local assignment.
       // This helps apps with many off-cluster requests schedule faster.
       if (allocationResult.containerNodeType != NodeType.OFF_SWITCH) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Resetting scheduling opportunities");
-        }
+        LOG.debug("Resetting scheduling opportunities");
+
         // Only reset scheduling opportunities for RACK_LOCAL if configured
         // to do so. Not resetting means we will continue to schedule
         // RACK_LOCAL without delay.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 2010711..71aa865 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -297,10 +297,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
     for (SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode> c : request
         .getContainersToRelease()) {
       if (rmContainerInFinalState(c.getRmContainer())) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("To-release container=" + c.getRmContainer()
-              + " is in final state");
-        }
+        LOG.debug("To-release container={} is in final state",
+            c.getRmContainer());
         return true;
       }
     }
@@ -310,10 +308,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
       for (SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode> r : c
           .getToRelease()) {
         if (rmContainerInFinalState(r.getRmContainer())) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("To-release container=" + r.getRmContainer()
-                + ", for to a new allocated container, is in final state");
-          }
+          LOG.debug("To-release container={}, for to a new allocated"
+              + " container, is in final state", r.getRmContainer());
           return true;
         }
       }
@@ -321,11 +317,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
       if (null != c.getAllocateFromReservedContainer()) {
         if (rmContainerInFinalState(
             c.getAllocateFromReservedContainer().getRmContainer())) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Allocate from reserved container" + c
-                .getAllocateFromReservedContainer().getRmContainer()
-                + " is in final state");
-          }
+          LOG.debug("Allocate from reserved container {} is in final state",
+              c.getAllocateFromReservedContainer().getRmContainer());
           return true;
         }
       }
@@ -336,10 +329,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
       for (SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode> r : c
           .getToRelease()) {
         if (rmContainerInFinalState(r.getRmContainer())) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("To-release container=" + r.getRmContainer()
-                + ", for a reserved container, is in final state");
-          }
+          LOG.debug("To-release container={}, for a reserved container,"
+              + " is in final state", r.getRmContainer());
           return true;
         }
       }
@@ -364,10 +355,7 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
           allocation.getAllocateFromReservedContainer().getRmContainer();
 
       if (fromReservedContainer != reservedContainerOnNode) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "Try to allocate from a non-existed reserved container");
-        }
+        LOG.debug("Try to allocate from a non-existed reserved container");
         return false;
       }
     }
@@ -858,12 +846,9 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
         // resources, otherwise could affect capacity limits
         if (Resources.fitsIn(resourceCalculator, resourceNeedUnreserve,
             reservedResource)) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(
-                "unreserving node with reservation size: " + reservedResource
-                    + " in order to allocate container with size: "
-                    + resourceNeedUnreserve);
-          }
+          LOG.debug("unreserving node with reservation size: {} in order to"
+              + " allocate container with size: {}", reservedResource,
+              resourceNeedUnreserve);
           return nodeId;
         }
       }
@@ -932,10 +917,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
       NodeId idToUnreserve = getNodeIdToUnreserve(schedulerKey,
           minimumUnreservedResource, rc);
       if (idToUnreserve == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("checked to see if could unreserve for app but nothing "
-              + "reserved that matches for this app");
-        }
+        LOG.debug("checked to see if could unreserve for app but nothing "
+            + "reserved that matches for this app");
         return null;
       }
       FiCaSchedulerNode nodeToUnreserve =
@@ -1178,10 +1161,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
     writeLock.lock();
     try {
       if (!sourceNode.getPartition().equals(targetNode.getPartition())) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "Failed to move reservation, two nodes are in different partition");
-        }
+        LOG.debug("Failed to move reservation, two nodes are in"
+            + " different partition");
         return false;
       }
 
@@ -1189,17 +1170,13 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
       Map<NodeId, RMContainer> map = reservedContainers.get(
           reservedContainer.getReservedSchedulerKey());
       if (null == map) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Cannot find reserved container map.");
-        }
+        LOG.debug("Cannot find reserved container map.");
         return false;
       }
 
       // Check if reserved container changed
       if (sourceNode.getReservedContainer() != reservedContainer) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("To-be-moved container already updated.");
-        }
+        LOG.debug("To-be-moved container already updated.");
         return false;
       }
 
@@ -1207,18 +1184,14 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
       // reservation happens transactional
       synchronized (targetNode){
         if (targetNode.getReservedContainer() != null) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Target node is already occupied before moving");
-          }
+          LOG.debug("Target node is already occupied before moving");
         }
 
         try {
           targetNode.reserveResource(this,
               reservedContainer.getReservedSchedulerKey(), reservedContainer);
         } catch (IllegalStateException e) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Reserve on target node failed, e=", e);
-          }
+          LOG.debug("Reserve on target node failed, e={}", e);
           return false;
         }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java
index 9436138..6308b41 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java
@@ -184,10 +184,8 @@ public class MemoryPlacementConstraintManager
     readLock.lock();
     try {
       if (appConstraints.get(appId) == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Application {} is not registered in the Placement "
-              + "Constraint Manager.", appId);
-        }
+        LOG.debug("Application {} is not registered in the Placement "
+            + "Constraint Manager.", appId);
         return null;
       }
 
@@ -215,10 +213,8 @@ public class MemoryPlacementConstraintManager
     readLock.lock();
     try {
       if (appConstraints.get(appId) == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Application {} is not registered in the Placement "
-              + "Constraint Manager.", appId);
-        }
+        LOG.debug("Application {} is not registered in the Placement "
+            + "Constraint Manager.", appId);
         return null;
       }
       // TODO: Merge this constraint with the global one for this tag, if one
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index 28a5286..8711cb4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -296,11 +296,8 @@ public final class PlacementConstraintsUtil {
       AllocationTagsManager atm)
       throws InvalidAllocationTagsQueryException {
     if (constraint == null) {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Constraint is found empty during constraint validation for app:"
-                + appId);
-      }
+      LOG.debug("Constraint is found empty during constraint validation for"
+          + " app:{}", appId);
       return true;
     }
 
@@ -365,9 +362,7 @@ public final class PlacementConstraintsUtil {
   private static NodeAttribute getNodeConstraintFromRequest(String attrKey,
       String attrString) {
     NodeAttribute nodeAttribute = null;
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Incoming node attribute: " + attrKey + "=" + attrString);
-    }
+    LOG.debug("Incoming node attribute: {}={}", attrKey, attrString);
 
     // Input node attribute could be like 1.8
     String[] name = attrKey.split("/");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java
index 070a004..aea97c3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/LocalAllocationTagsManager.java
@@ -109,9 +109,7 @@ class LocalAllocationTagsManager extends AllocationTagsManager {
         });
       });
       appTempMappings.remove(applicationId);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Removed TEMP containers of app=" + applicationId);
-      }
+      LOG.debug("Removed TEMP containers of app={}", applicationId);
     }
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
index b61297a..ca35886 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
@@ -183,16 +183,15 @@ public class NodeQueueLoadMonitor implements ClusterMonitor {
   @Override
   public void addNode(List<NMContainerStatus> containerStatuses,
       RMNode rmNode) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Node added event from: " + rmNode.getNode().getName());
-    }
+    LOG.debug("Node added event from: {}", rmNode.getNode().getName());
+
     // Ignoring this currently : at least one NODE_UPDATE heartbeat is
     // required to ensure node eligibility.
   }
 
   @Override
   public void removeNode(RMNode removedRMNode) {
-    LOG.debug("Node delete event for: " + removedRMNode.getNode().getName());
+    LOG.debug("Node delete event for: {}", removedRMNode.getNode().getName());
     ReentrantReadWriteLock.WriteLock writeLock = clusterNodesLock.writeLock();
     writeLock.lock();
     ClusterNode node;
@@ -212,7 +211,7 @@ public class NodeQueueLoadMonitor implements ClusterMonitor {
 
   @Override
   public void updateNode(RMNode rmNode) {
-    LOG.debug("Node update event from: " + rmNode.getNodeID());
+    LOG.debug("Node update event from: {}", rmNode.getNodeID());
     OpportunisticContainersStatus opportunisticContainersStatus =
         rmNode.getOpportunisticContainersStatus();
     if (opportunisticContainersStatus == null) {
@@ -253,11 +252,10 @@ public class NodeQueueLoadMonitor implements ClusterMonitor {
               .setQueueWaitTime(estimatedQueueWaitTime)
               .setQueueLength(waitQueueLength)
               .updateTimestamp();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Updating ClusterNode [" + rmNode.getNodeID() + "] " +
-                "with queue wait time [" + estimatedQueueWaitTime + "] and " +
-                "wait queue length [" + waitQueueLength + "]");
-          }
+          LOG.debug("Updating ClusterNode [{}] with queue wait time [{}] and"
+              + " wait queue length [{}]", rmNode.getNodeID(),
+              estimatedQueueWaitTime, waitQueueLength);
+
         } else {
           this.clusterNodes.remove(rmNode.getNodeID());
           LOG.info("Deleting ClusterNode [" + rmNode.getNodeID() + "] " +
@@ -272,7 +270,7 @@ public class NodeQueueLoadMonitor implements ClusterMonitor {
 
   @Override
   public void updateNodeResource(RMNode rmNode, ResourceOption resourceOption) {
-    LOG.debug("Node resource update event from: " + rmNode.getNodeID());
+    LOG.debug("Node resource update event from: {}", rmNode.getNodeID());
     // Ignoring this currently.
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index d5a1f4c..966aa28 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -150,10 +150,9 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
       // Inform the container
       rmContainer.handle(
           new RMContainerFinishedEvent(containerId, containerStatus, event));
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Completed container: " + rmContainer.getContainerId()
-            + " in state: " + rmContainer.getState() + " event:" + event);
-      }
+      LOG.debug("Completed container: {} in state: {} event:{}",
+          rmContainer.getContainerId(), rmContainer.getState(), event);
+
 
       untrackContainerForPreemption(rmContainer);
       if (containerStatus.getDiagnostics().
@@ -247,14 +246,11 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
 
     Resource headroom = policy.getHeadroom(queueFairShare,
         queueUsage, maxAvailableResource);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Headroom calculation for " + this.getName() + ":" +
-          "Min(" +
-          "(queueFairShare=" + queueFairShare +
-          " - queueUsage=" + queueUsage + ")," +
-          " maxAvailableResource=" + maxAvailableResource +
-          "Headroom=" + headroom);
-    }
+    LOG.debug("Headroom calculation for {}:Min((queueFairShare={} -"
+        + " queueUsage={}), maxAvailableResource={} Headroom={}",
+        this.getName(), queueFairShare, queueUsage, maxAvailableResource,
+        headroom);
+
     return headroom;
   }
 
@@ -363,11 +359,8 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
         // add the initial time of priority to prevent comparing with FsApp
         // startTime and allowedLocalityLevel degrade
         lastScheduledContainer.put(schedulerKey, currentTimeMs);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              "Init the lastScheduledContainer time, priority: " + schedulerKey
-                  .getPriority() + ", time: " + currentTimeMs);
-        }
+        LOG.debug("Init the lastScheduledContainer time, priority: {},"
+            + " time: {}", schedulerKey.getPriority(), currentTimeMs);
         allowedLocalityLevel.put(schedulerKey, NodeType.NODE_LOCAL);
         return NodeType.NODE_LOCAL;
       }
@@ -867,12 +860,8 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
         if (reserved) {
           unreserve(schedulerKey, node);
         }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(String.format(
-              "Resource ask %s fits in available node resources %s, " +
-                      "but no container was allocated",
-              capability, available));
-        }
+        LOG.debug("Resource ask {} fits in available node resources {},"
+            + " but no container was allocated", capability, available);
         return Resources.none();
       }
 
@@ -896,10 +885,8 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
       return capability;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Resource request: " + capability + " exceeds the available"
-          + " resources of the node.");
-    }
+    LOG.debug("Resource request: {} exceeds the available"
+          + " resources of the node.", capability);
 
     // The desired container won't fit here, so reserve
     // Reserve only, if app does not wait for preempted resources on the node,
@@ -910,9 +897,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
             type, schedulerKey)) {
       updateAMDiagnosticMsg(capability, " exceeds the available resources of "
           + "the node and the request is reserved)");
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(getName() + "'s resource request is reserved.");
-      }
+      LOG.debug("{}'s resource request is reserved.", getName());
       return FairScheduler.CONTAINER_RESERVED;
     } else {
       updateAMDiagnosticMsg(capability, " exceeds the available resources of "
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index e7ff725..361355b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -356,10 +356,8 @@ public class FSLeafQueue extends FSQueue {
       }
       assigned = sched.assignContainer(node);
       if (!assigned.equals(none())) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Assigned container in queue:" + getName() + " " +
-              "container:" + assigned);
-        }
+        LOG.debug("Assigned container in queue:{} container:{}",
+            getName(), assigned);
         break;
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index 984b2c0..1bf3618 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -297,9 +297,7 @@ public abstract class FSQueue implements Queue, Schedulable {
   public void setFairShare(Resource fairShare) {
     this.fairShare = fairShare;
     metrics.setFairShare(fairShare);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("The updated fairShare for " + getName() + " is " + fairShare);
-    }
+    LOG.debug("The updated fairShare for {} is {}", getName(), fairShare);
   }
 
   /** Get the steady fair share assigned to this Schedulable. */
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index a267639..8324f8d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -530,10 +530,8 @@ public class FairScheduler extends
           + ", in queue: " + queue.getName()
           + ", currently num of applications: " + applications.size());
       if (isAppRecovering) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(applicationId
-              + " is recovering. Skip notifying APP_ACCEPTED");
-        }
+        LOG.debug("{} is recovering. Skip notifying APP_ACCEPTED",
+            applicationId);
       } else {
         // During tests we do not always have an application object, handle
         // it here but we probably should fix the tests
@@ -586,10 +584,8 @@ public class FairScheduler extends
           + " to scheduler from user: " + user);
 
       if (isAttemptRecovering) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(applicationAttemptId
-              + " is recovering. Skipping notifying ATTEMPT_ADDED");
-        }
+        LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED",
+            applicationAttemptId);
       } else{
         rmContext.getDispatcher().getEventHandler().handle(
             new RMAppAttemptEvent(applicationAttemptId,
@@ -758,15 +754,15 @@ public class FairScheduler extends
       if (rmContainer.getState() == RMContainerState.RESERVED) {
         if (node != null) {
           application.unreserve(rmContainer.getReservedSchedulerKey(), node);
-        } else if (LOG.isDebugEnabled()) {
-          LOG.debug("Skipping unreserve on removed node: " + nodeID);
+        } else {
+          LOG.debug("Skipping unreserve on removed node: {}", nodeID);
         }
       } else {
         application.containerCompleted(rmContainer, containerStatus, event);
         if (node != null) {
           node.releaseContainer(rmContainer.getContainerId(), false);
-        } else if (LOG.isDebugEnabled()) {
-          LOG.debug("Skipping container release on removed node: " + nodeID);
+        } else {
+          LOG.debug("Skipping container release on removed node: {}", nodeID);
         }
         updateRootQueueMetrics();
       }
@@ -1170,9 +1166,7 @@ public class FairScheduler extends
           Resource assignment = queueMgr.getRootQueue().assignContainer(node);
 
           if (assignment.equals(Resources.none())) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("No container is allocated on node " + node);
-            }
+            LOG.debug("No container is allocated on node {}", node);
             break;
           }
 
@@ -1611,10 +1605,8 @@ public class FairScheduler extends
     try {
       FSQueue queue = getQueueManager().getQueue(queueName);
       if (queue == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("ACL not found for queue access-type " + acl + " for queue "
-              + queueName);
-        }
+        LOG.debug("ACL not found for queue access-type {} for queue {}",
+            acl, queueName);
         return false;
       }
       return queue.hasAccess(acl, callerUGI);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java
index 84abdf4..7db70e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AMRMTokenSecretManager.java
@@ -243,9 +243,7 @@ public class AMRMTokenSecretManager extends
     try {
       ApplicationAttemptId applicationAttemptId =
           identifier.getApplicationAttemptId();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Trying to retrieve password for " + applicationAttemptId);
-      }
+      LOG.debug("Trying to retrieve password for {}", applicationAttemptId);
       if (!appAttemptSet.contains(applicationAttemptId)) {
         throw new InvalidToken(applicationAttemptId
             + " not found in AMRMTokenSecretManager.");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index 5844c8d..d3ed503 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -438,10 +438,7 @@ public class DelegationTokenRenewer extends AbstractService {
       return; // nothing to add
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Registering tokens for renewal for:" +
-          " appId = " + applicationId);
-    }
+    LOG.debug("Registering tokens for renewal for: appId = {}", applicationId);
 
     Collection<Token<?>> tokens = ts.getAllTokens();
     long now = System.currentTimeMillis();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
index 690340c..0f6614e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/timelineservice/RMTimelineCollectorManager.java
@@ -79,21 +79,15 @@ public class RMTimelineCollectorManager extends TimelineCollectorManager {
       }
       switch (parts[0].toUpperCase()) {
       case TimelineUtils.FLOW_NAME_TAG_PREFIX:
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Setting the flow name: " + parts[1]);
-        }
+        LOG.debug("Setting the flow name: {}", parts[1]);
         context.setFlowName(parts[1]);
         break;
       case TimelineUtils.FLOW_VERSION_TAG_PREFIX:
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Setting the flow version: " + parts[1]);
-        }
+        LOG.debug("Setting the flow version: {}", parts[1]);
         context.setFlowVersion(parts[1]);
         break;
       case TimelineUtils.FLOW_RUN_ID_TAG_PREFIX:
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Setting the flow run id: " + parts[1]);
-        }
+        LOG.debug("Setting the flow run id: {}", parts[1]);
         context.setFlowRunId(Long.parseLong(parts[1]));
         break;
       default:
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
index 85d78c8..b686a9c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
@@ -465,10 +465,8 @@ public class Application {
     // clone to ensure the RM doesn't manipulate the same obj
     ask.add(ResourceRequest.clone(request));
 
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("updateResourceRequest:" + " application=" + applicationId
-        + " request=" + request);
-    }
+    LOG.debug("updateResourceRequest: application={} request={}",
+        applicationId, request);
   }
 
   private ContainerLaunchContext createCLC() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
index 3e7dfcd..1e4b050 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
@@ -212,11 +212,9 @@ public class NodeManager implements ContainerManagementProtocol {
       Resources.subtractFrom(available, tokenId.getResource());
       Resources.addTo(used, tokenId.getResource());
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("startContainer:" + " node=" + containerManagerAddress
-            + " application=" + applicationId + " container=" + container
-            + " available=" + available + " used=" + used);
-      }
+      LOG.debug("startContainer: node={} application={} container={}"
+          +" available={} used={}", containerManagerAddress, applicationId,
+          container, available, used);
 
     }
     StartContainersResponse response =
@@ -279,11 +277,9 @@ public class NodeManager implements ContainerManagementProtocol {
       Resources.addTo(available, container.getResource());
       Resources.subtractFrom(used, container.getResource());
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("stopContainer:" + " node=" + containerManagerAddress
-            + " application=" + applicationId + " container=" + containerID
-            + " available=" + available + " used=" + used);
-      }
+      LOG.debug("stopContainer: node={} application={} container={}"
+          + " available={} used={}", containerManagerAddress, applicationId,
+          containerID, available, used);
     }
     return StopContainersResponse.newInstance(null,null);
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
index 59cd266..03dde04 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
@@ -269,7 +269,7 @@ public class FederationClientInterceptor
     for (int i = 0; i < numSubmitRetries; ++i) {
       SubClusterId subClusterId = getRandomActiveSubCluster(subClustersActive);
       LOG.debug(
-          "getNewApplication try #" + i + " on SubCluster " + subClusterId);
+          "getNewApplication try #{} on SubCluster {}", i, subClusterId);
       ApplicationClientProtocol clientRMProxy =
           getClientRMProxyForSubCluster(subClusterId);
       GetNewApplicationResponse response = null;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/ClientSCMMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/ClientSCMMetrics.java
index 6b45745..a69efaf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/ClientSCMMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/ClientSCMMetrics.java
@@ -44,7 +44,7 @@ public class ClientSCMMetrics {
 
   private ClientSCMMetrics() {
     registry = new MetricsRegistry("clientRequests");
-    LOG.debug("Initialized " + registry);
+    LOG.debug("Initialized {}", registry);
   }
   
   public static ClientSCMMetrics getInstance() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/SharedCacheUploaderMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/SharedCacheUploaderMetrics.java
index 3cf6632..b86620d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/SharedCacheUploaderMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/src/main/java/org/apache/hadoop/yarn/server/sharedcachemanager/metrics/SharedCacheUploaderMetrics.java
@@ -44,7 +44,7 @@ public class SharedCacheUploaderMetrics {
 
   private SharedCacheUploaderMetrics() {
     registry = new MetricsRegistry("SharedCacheUploaderRequests");
-    LOG.debug("Initialized "+ registry);
+    LOG.debug("Initialized {}", registry);
   }
 
   public static SharedCacheUploaderMetrics getInstance() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/ContainerTokenIdentifierForTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/ContainerTokenIdentifierForTest.java
index 068b140..063eef4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/ContainerTokenIdentifierForTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/ContainerTokenIdentifierForTest.java
@@ -172,7 +172,7 @@ public class ContainerTokenIdentifierForTest extends ContainerTokenIdentifier {
 
   @Override
   public void write(DataOutput out) throws IOException {
-    LOG.debug("Writing ContainerTokenIdentifierForTest to RPC layer: " + this);
+    LOG.debug("Writing ContainerTokenIdentifierForTest to RPC layer: {}", this);
     out.write(proto.toByteArray());
   }
   
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/NMTokenIdentifierNewForTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/NMTokenIdentifierNewForTest.java
index 7d21b48..f34d205 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/NMTokenIdentifierNewForTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/NMTokenIdentifierNewForTest.java
@@ -68,7 +68,7 @@ public class NMTokenIdentifierNewForTest extends NMTokenIdentifier {
   
   @Override
   public void write(DataOutput out) throws IOException {
-    LOG.debug("Writing NMTokenIdentifierNewForTest to RPC layer: " + this);
+    LOG.debug("Writing NMTokenIdentifierNewForTest to RPC layer: {}", this);
     out.write(proto.toByteArray());
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
index fadfd14..f50621e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
@@ -175,9 +175,7 @@ public class HBaseTimelineReaderImpl
 
         // on success, reset hbase down flag
         if (hbaseDown.getAndSet(false)) {
-          if(LOG.isDebugEnabled()) {
-            LOG.debug("HBase request succeeded, assuming HBase up");
-          }
+          LOG.debug("HBase request succeeded, assuming HBase up");
         }
       } catch (Exception e) {
         LOG.warn("Got failure attempting to read from timeline storage, " +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java
index a8e5149..f0ea8a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java
@@ -296,9 +296,7 @@ public final class ColumnRWHelper {
             .entrySet()) {
           K converterColumnKey = null;
           if (columnPrefixBytes == null) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("null prefix was specified; returning all columns");
-            }
+            LOG.debug("null prefix was specified; returning all columns");
             try {
               converterColumnKey = keyConverter.decode(entry.getKey());
             } catch (IllegalArgumentException iae) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
index 43ba2af..894867c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
@@ -245,8 +245,8 @@ public abstract class TimelineEntityReader extends
     augmentParams(hbaseConf, conn);
 
     FilterList filterList = constructFilterListBasedOnFields(new HashSet<>(0));
-    if (LOG.isDebugEnabled() && filterList != null) {
-      LOG.debug("FilterList created for get is - " + filterList);
+    if (filterList != null) {
+      LOG.debug("FilterList created for get is - {}", filterList);
     }
     Result result = getResult(hbaseConf, conn, filterList);
     if (result == null || result.isEmpty()) {
@@ -275,8 +275,8 @@ public abstract class TimelineEntityReader extends
 
     Set<TimelineEntity> entities = new LinkedHashSet<>();
     FilterList filterList = createFilterList();
-    if (LOG.isDebugEnabled() && filterList != null) {
-      LOG.debug("FilterList created for scan is - " + filterList);
+    if (filterList != null) {
+      LOG.debug("FilterList created for scan is - {}", filterList);
     }
     ResultScanner results = getResults(hbaseConf, conn, filterList);
     try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/src/main [...]
index b533624..ff8d7ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -107,9 +107,7 @@ class FlowScanner implements RegionScanner, Closeable {
           YarnConfiguration.APP_FINAL_VALUE_RETENTION_THRESHOLD,
           YarnConfiguration.DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD);
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" batch size=" + batchSize);
-    }
+    LOG.debug(" batch size={}", batchSize);
   }
 
 
@@ -443,11 +441,9 @@ class FlowScanner implements RegionScanner, Closeable {
       return finalCells;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("In processSummationMajorCompaction,"
-          + " will drop cells older than " + currentTimestamp
-          + " CurrentColumnCells size=" + currentColumnCells.size());
-    }
+    LOG.debug("In processSummationMajorCompaction, will drop cells older"
+        + " than {} CurrentColumnCells size={}", currentTimestamp,
+        currentColumnCells.size());
 
     for (Cell cell : currentColumnCells) {
       AggregationOperation cellAggOp = getCurrentAggOp(cell);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
index 0371d49..b758da9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
@@ -353,30 +353,22 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
         getNMCollectorService().getTimelineCollectorContext(request);
     String userId = response.getUserId();
     if (userId != null && !userId.isEmpty()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting the user in the context: " + userId);
-      }
+      LOG.debug("Setting the user in the context: {}", userId);
       collector.getTimelineEntityContext().setUserId(userId);
     }
     String flowName = response.getFlowName();
     if (flowName != null && !flowName.isEmpty()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting the flow name: " + flowName);
-      }
+      LOG.debug("Setting the flow name: {}", flowName);
       collector.getTimelineEntityContext().setFlowName(flowName);
     }
     String flowVersion = response.getFlowVersion();
     if (flowVersion != null && !flowVersion.isEmpty()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting the flow version: " + flowVersion);
-      }
+      LOG.debug("Setting the flow version: {}", flowVersion);
       collector.getTimelineEntityContext().setFlowVersion(flowVersion);
     }
     long flowRunId = response.getFlowRunId();
     if (flowRunId != 0L) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting the flow run id: " + flowRunId);
-      }
+      LOG.debug("Setting the flow run id: {}", flowRunId);
       collector.getTimelineEntityContext().setFlowRunId(flowRunId);
     }
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
index e9eeb43..6c83665 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
@@ -137,10 +137,7 @@ public abstract class TimelineCollector extends CompositeService {
    */
   public TimelineWriteResponse putEntities(TimelineEntities entities,
       UserGroupInformation callerUgi) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("putEntities(entities=" + entities + ", callerUgi="
-          + callerUgi + ")");
-    }
+    LOG.debug("putEntities(entities={}, callerUgi={})", entities, callerUgi);
 
     TimelineWriteResponse response;
     // synchronize on the writer object so that no other threads can
@@ -166,10 +163,7 @@ public abstract class TimelineCollector extends CompositeService {
    */
   public TimelineWriteResponse putDomain(TimelineDomain domain,
       UserGroupInformation callerUgi) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(
-          "putDomain(domain=" + domain + ", callerUgi=" + callerUgi + ")");
-    }
+    LOG.debug("putDomain(domain={}, callerUgi={})", domain, callerUgi);
 
     TimelineWriteResponse response;
     synchronized (writer) {
@@ -216,10 +210,8 @@ public abstract class TimelineCollector extends CompositeService {
    */
   public void putEntitiesAsync(TimelineEntities entities,
       UserGroupInformation callerUgi) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("putEntitiesAsync(entities=" + entities + ", callerUgi=" +
-          callerUgi + ")");
-    }
+    LOG.debug("putEntitiesAsync(entities={}, callerUgi={})", entities,
+        callerUgi);
 
     writeTimelineEntities(entities, callerUgi);
   }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index 279cfdc..330e1f4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -3594,10 +3594,8 @@ public class TimelineReaderWebServices {
       String entityUser) {
     String authUser = TimelineReaderWebServicesUtils.getUserName(ugi);
     String requestedUser = TimelineReaderWebServicesUtils.parseStr(entityUser);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(
-          "Authenticated User: " + authUser + " Requested User:" + entityUser);
-    }
+    LOG.debug(
+          "Authenticated User: {} Requested User:{}", authUser, entityUser);
     return (readerManager.checkAccess(ugi) || authUser.equals(requestedUser));
   }
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java
index 53bf058..bfa5309 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineReaderImpl.java
@@ -50,10 +50,8 @@ public class NoOpTimelineReaderImpl extends AbstractService
   @Override
   public TimelineEntity getEntity(TimelineReaderContext context,
        TimelineDataToRetrieve dataToRetrieve) throws IOException {
-    if(LOG.isDebugEnabled()){
-      LOG.debug("NoOpTimelineReader is configured. Response to all the read " +
+    LOG.debug("NoOpTimelineReader is configured. Response to all the read " +
               "requests would be empty");
-    }
     return new TimelineEntity();
   }
 
@@ -61,20 +59,16 @@ public class NoOpTimelineReaderImpl extends AbstractService
   public Set<TimelineEntity> getEntities(TimelineReaderContext context,
           TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve)
           throws IOException {
-    if(LOG.isDebugEnabled()){
-      LOG.debug("NoOpTimelineReader is configured. Response to all the read " +
+    LOG.debug("NoOpTimelineReader is configured. Response to all the read " +
               "requests would be empty");
-    }
     return new HashSet<>();
   }
 
   @Override
   public Set<String> getEntityTypes(TimelineReaderContext context)
           throws IOException {
-    if(LOG.isDebugEnabled()){
-      LOG.debug("NoOpTimelineReader is configured. Response to all the read " +
+    LOG.debug("NoOpTimelineReader is configured. Response to all the read " +
               "requests would be empty");
-    }
     return new HashSet<>();
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineWriterImpl.java
index 44d2cfa..48b3348 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineWriterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/NoOpTimelineWriterImpl.java
@@ -51,20 +51,16 @@ public class NoOpTimelineWriterImpl extends AbstractService implements
                                      TimelineEntities data,
                                      UserGroupInformation callerUgi)
                                      throws IOException {
-    if(LOG.isDebugEnabled()){
-      LOG.debug("NoOpTimelineWriter is configured. Not storing " +
+    LOG.debug("NoOpTimelineWriter is configured. Not storing " +
               "TimelineEntities.");
-    }
     return new TimelineWriteResponse();
   }
 
   @Override
   public TimelineWriteResponse write(TimelineCollectorContext context,
                                      TimelineDomain domain) throws IOException {
-    if(LOG.isDebugEnabled()){
-      LOG.debug("NoOpTimelineWriter is configured. Not storing " +
+    LOG.debug("NoOpTimelineWriter is configured. Not storing " +
               "TimelineEntities.");
-    }
     return new TimelineWriteResponse();
   }
 
@@ -72,17 +68,13 @@ public class NoOpTimelineWriterImpl extends AbstractService implements
   public TimelineWriteResponse aggregate(TimelineEntity data,
                                          TimelineAggregationTrack track)
                                          throws IOException {
-    if(LOG.isDebugEnabled()){
-      LOG.debug("NoOpTimelineWriter is configured. Not aggregating " +
+    LOG.debug("NoOpTimelineWriter is configured. Not aggregating " +
               "TimelineEntities.");
-    }
     return new TimelineWriteResponse();
   }
 
   @Override
   public void flush() throws IOException {
-    if(LOG.isDebugEnabled()){
-      LOG.debug("NoOpTimelineWriter is configured. Ignoring flush call");
-    }
+    LOG.debug("NoOpTimelineWriter is configured. Ignoring flush call");
   }
 }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java
index 303ad97..a44d764 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyCA.java
@@ -194,9 +194,7 @@ public class ProxyCA {
     String subject = "OU=YARN-" + UUID.randomUUID();
     caCert = createCert(true, subject, subject, from, to,
         caKeyPair.getPublic(), caKeyPair.getPrivate());
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("CA Certificate: \n{}", caCert);
-    }
+    LOG.debug("CA Certificate: \n{}", caCert);
   }
 
   public byte[] createChildKeyStore(ApplicationId appId, String ksPassword)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java
index 4886c55..9516d37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUtils.java
@@ -73,12 +73,10 @@ public class ProxyUtils {
       HttpServletResponse response,
       String target)
       throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Redirecting {} {} to {}",
+    LOG.debug("Redirecting {} {} to {}",
           request.getMethod(), 
           request.getRequestURI(),
           target);
-    }
     String location = response.encodeRedirectURL(target);
     response.setStatus(HttpServletResponse.SC_FOUND);
     response.setHeader(LOCATION, location);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
index c804f72..ae9a01f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
@@ -248,9 +248,7 @@ public class WebAppProxyServlet extends HttpServlet {
     // since that is what the AM filter checks against. IP aliasing or
     // similar could cause issues otherwise.
     InetAddress localAddress = InetAddress.getByName(proxyHost);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("local InetAddress for proxy host: {}", localAddress);
-    }
+    LOG.debug("local InetAddress for proxy host: {}", localAddress);
     httpClientBuilder.setDefaultRequestConfig(
         RequestConfig.custom()
         .setCircularRedirectsAllowed(true)
@@ -284,9 +282,7 @@ public class WebAppProxyServlet extends HttpServlet {
       String name = names.nextElement();
       if (PASS_THROUGH_HEADERS.contains(name)) {
         String value = req.getHeader(name);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("REQ HEADER: {} : {}", name, value);
-        }
+        LOG.debug("REQ HEADER: {} : {}", name, value);
         base.setHeader(name, value);
       }
     }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
index c965283..be8e10c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
@@ -108,17 +108,15 @@ public class AmIpFilter implements Filter {
         proxyAddresses = new HashSet<>();
         for (String proxyHost : proxyHosts) {
           try {
-              for(InetAddress add : InetAddress.getAllByName(proxyHost)) {
-                if (LOG.isDebugEnabled()) {
-                  LOG.debug("proxy address is: {}", add.getHostAddress());
-                }
-                proxyAddresses.add(add.getHostAddress());
-              }
-              lastUpdate = now;
-            } catch (UnknownHostException e) {
-              LOG.warn("Could not locate {} - skipping", proxyHost, e);
+            for (InetAddress add : InetAddress.getAllByName(proxyHost)) {
+              LOG.debug("proxy address is: {}", add.getHostAddress());
+              proxyAddresses.add(add.getHostAddress());
             }
+            lastUpdate = now;
+          } catch (UnknownHostException e) {
+            LOG.warn("Could not locate {} - skipping", proxyHost, e);
           }
+        }
         if (proxyAddresses.isEmpty()) {
           throw new ServletException("Could not locate any of the proxy hosts");
         }
@@ -140,9 +138,7 @@ public class AmIpFilter implements Filter {
     HttpServletRequest httpReq = (HttpServletRequest)req;
     HttpServletResponse httpResp = (HttpServletResponse)resp;
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Remote address for request is: {}", httpReq.getRemoteAddr());
-    }
+    LOG.debug("Remote address for request is: {}", httpReq.getRemoteAddr());
 
     if (!getProxyAddresses().contains(httpReq.getRemoteAddr())) {
       StringBuilder redirect = new StringBuilder(findRedirectUrl());
@@ -177,11 +173,8 @@ public class AmIpFilter implements Filter {
         }
       }
       if (user == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Could not find "
-              + WebAppProxyServlet.PROXY_USER_COOKIE_NAME
-              + " cookie, so user will not be set");
-        }
+        LOG.debug("Could not find {} cookie, so user will not be set",
+            WebAppProxyServlet.PROXY_USER_COOKIE_NAME);
 
         chain.doFilter(req, resp);
       } else {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org