You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ey...@apache.org on 2019/03/18 17:58:54 UTC
[hadoop] branch trunk updated: YARN-9363. Replaced debug logging
with SLF4J parameterized log message. Contributed by Prabhu Joseph
This is an automated email from the ASF dual-hosted git repository.
eyang pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 5f6e225 YARN-9363. Replaced debug logging with SLF4J parameterized log message. Contributed by Prabhu Joseph
5f6e225 is described below
commit 5f6e22516668ff94a76737ad5e2cdcb2ff9f6dfd
Author: Eric Yang <ey...@apache.org>
AuthorDate: Mon Mar 18 13:57:18 2019 -0400
YARN-9363. Replaced debug logging with SLF4J parameterized log message.
Contributed by Prabhu Joseph
---
.../apache/hadoop/fs/DelegationTokenRenewer.java | 4 +-
.../yarn/service/provider/ProviderUtils.java | 7 +---
.../client/api/impl/FileSystemTimelineWriter.java | 4 +-
.../hadoop/yarn/csi/client/CsiGrpcClient.java | 7 ++--
.../hadoop/yarn/csi/client/FakeCsiDriver.java | 5 ++-
.../server/util/timeline/TimelineServerUtils.java | 7 ++--
.../WindowsSecureContainerExecutor.java | 32 +++++----------
.../launcher/RecoverPausedContainerLaunch.java | 6 +--
.../resources/CGroupElasticMemoryController.java | 8 ++--
.../linux/resources/CGroupsHandlerImpl.java | 7 +---
.../linux/resources/CGroupsResourceCalculator.java | 18 ++++-----
.../resources/CombinedResourceCalculator.java | 8 ++--
.../linux/resources/DefaultOOMHandler.java | 8 ++--
.../resources/NetworkTagMappingManagerFactory.java | 6 +--
.../resources/fpga/FpgaResourceAllocator.java | 7 ++--
.../resources/fpga/FpgaResourceHandlerImpl.java | 7 ++--
.../linux/resources/gpu/GpuResourceAllocator.java | 7 ++--
.../resources/gpu/GpuResourceHandlerImpl.java | 8 ++--
.../linux/resources/numa/NumaNodeResource.java | 7 ++--
.../resources/numa/NumaResourceAllocator.java | 7 ++--
.../resources/numa/NumaResourceHandlerImpl.java | 8 ++--
.../deviceframework/DeviceMappingManager.java | 24 +++++------
.../deviceframework/DevicePluginAdapter.java | 7 ++--
.../DeviceResourceDockerRuntimePluginImpl.java | 47 ++++++++--------------
.../deviceframework/DeviceResourceHandlerImpl.java | 20 ++++-----
.../deviceframework/DeviceResourceUpdaterImpl.java | 7 ++--
.../resourceplugin/fpga/FpgaResourcePlugin.java | 7 ++--
.../gpu/NvidiaDockerV1CommandPlugin.java | 21 ++++------
.../gpu/NvidiaDockerV2CommandPlugin.java | 7 ++--
.../server/nodemanager/TestNodeManagerMXBean.java | 8 ++--
.../TestCGroupElasticMemoryController.java | 8 ++--
.../capacity/AbstractAutoCreatedLeafQueue.java | 4 +-
.../scheduler/capacity/CapacityScheduler.java | 6 +--
.../scheduler/capacity/ParentQueue.java | 12 ++----
.../capacity/QueueManagementDynamicEditPolicy.java | 13 ++----
.../GuaranteedOrZeroCapacityOverTimePolicy.java | 12 ++----
.../constraint/PlacementConstraintsUtil.java | 28 +++++--------
.../resourcemanager/scheduler/fair/FSQueue.java | 6 +--
.../scheduler/fifo/FifoScheduler.java | 11 ++---
.../placement/LocalityAppPlacementAllocator.java | 6 +--
.../yarn/server/resourcemanager/Application.java | 29 +++++--------
.../timelineservice/storage/flow/FlowScanner.java | 4 +-
42 files changed, 188 insertions(+), 277 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
index 09c3a8a..2feb937 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
@@ -242,9 +242,7 @@ public class DelegationTokenRenewer
} catch (InterruptedException ie) {
LOG.error("Interrupted while canceling token for " + fs.getUri()
+ "filesystem");
- if (LOG.isDebugEnabled()) {
- LOG.debug("Exception in removeRenewAction: ", ie);
- }
+ LOG.debug("Exception in removeRenewAction: {}", ie);
}
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
index ea1fb0c..5fc96a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
@@ -212,11 +212,8 @@ public class ProviderUtils implements YarnServiceConstants {
log.info("Component instance conf dir already exists: " + compInstanceDir);
}
- if (log.isDebugEnabled()) {
- log.debug("Tokens substitution for component instance: " + instance
- .getCompInstanceName() + System.lineSeparator()
- + tokensForSubstitution);
- }
+ log.debug("Tokens substitution for component instance: {}{}{}" + instance
+ .getCompInstanceName(), System.lineSeparator(), tokensForSubstitution);
for (ConfigFile originalFile : compLaunchContext.getConfiguration()
.getFiles()) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
index e605184..b92f4e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java
@@ -287,9 +287,7 @@ public class FileSystemTimelineWriter extends TimelineWriter{
Path domainLogPath =
new Path(attemptDirCache.getAppAttemptDir(appAttemptId),
DOMAIN_LOG_PREFIX + appAttemptId.toString());
- if (LOG.isDebugEnabled()) {
- LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath);
- }
+ LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath);
this.logFDsCache.writeDomainLog(
fs, domainLogPath, objMapper, domain, isAppendSupported);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java
index 5dc1b3f..af6eec2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java
@@ -27,8 +27,8 @@ import io.netty.channel.epoll.EpollDomainSocketChannel;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.unix.DomainSocketAddress;
import io.netty.util.concurrent.DefaultThreadFactory;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.SocketAddress;
@@ -39,7 +39,8 @@ import java.util.concurrent.TimeUnit;
*/
public final class CsiGrpcClient implements AutoCloseable {
- private static final Log LOG = LogFactory.getLog(CsiGrpcClient.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(CsiGrpcClient.class);
private final ManagedChannel channel;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiDriver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiDriver.java
index e4d4da2..0c6de32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiDriver.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiDriver.java
@@ -25,7 +25,8 @@ import io.netty.channel.epoll.EpollServerDomainSocketChannel;
import org.apache.hadoop.yarn.csi.utils.GrpcHelper;
import java.io.IOException;
-import java.util.logging.Logger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A fake implementation of CSI driver.
@@ -33,7 +34,7 @@ import java.util.logging.Logger;
*/
public class FakeCsiDriver {
- private static final Logger LOG = Logger
+ private static final Logger LOG = LoggerFactory
.getLogger(FakeCsiDriver.class.getName());
private Server server;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java
index 3021def..15c6d3d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.util.timeline;
import java.util.LinkedHashSet;
import java.util.Set;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
@@ -33,7 +33,8 @@ import org.apache.hadoop.yarn.server.timeline.security.TimelineDelgationTokenSec
* Set of utility methods to be used across timeline reader and collector.
*/
public final class TimelineServerUtils {
- private static final Log LOG = LogFactory.getLog(TimelineServerUtils.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TimelineServerUtils.class);
private TimelineServerUtils() {
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
index 5a39cc3..c4d6918 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
@@ -36,8 +36,8 @@ import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DelegateToFileSystem;
import org.apache.hadoop.fs.FileContext;
@@ -68,8 +68,8 @@ import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
*/
public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
- private static final Log LOG = LogFactory
- .getLog(WindowsSecureContainerExecutor.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(WindowsSecureContainerExecutor.class);
public static final String LOCALIZER_PID_FORMAT = "STAR_LOCALIZER_%s";
@@ -591,10 +591,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
@Override
protected void copyFile(Path src, Path dst, String owner) throws IOException {
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("copyFile: %s -> %s owner:%s", src.toString(),
- dst.toString(), owner));
- }
+ LOG.debug("copyFile: {} -> {} owner:{}", src, dst, owner);
Native.Elevated.copy(src, dst, true);
Native.Elevated.chown(dst, owner, nodeManagerGroup);
}
@@ -607,10 +604,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
// This is similar to how LCE creates dirs
//
perms = new FsPermission(DIR_PERM);
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("createDir: %s perm:%s owner:%s",
- dirPath.toString(), perms.toString(), owner));
- }
+ LOG.debug("createDir: {} perm:{} owner:{}", dirPath, perms, owner);
super.createDir(dirPath, perms, createParent, owner);
lfs.setOwner(dirPath, owner, nodeManagerGroup);
@@ -619,10 +613,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
@Override
protected void setScriptExecutable(Path script, String owner)
throws IOException {
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("setScriptExecutable: %s owner:%s",
- script.toString(), owner));
- }
+ LOG.debug("setScriptExecutable: {} owner:{}", script, owner);
super.setScriptExecutable(script, owner);
Native.Elevated.chown(script, owner, nodeManagerGroup);
}
@@ -630,10 +621,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
@Override
public Path localizeClasspathJar(Path jarPath, Path target, String owner)
throws IOException {
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("localizeClasspathJar: %s %s o:%s",
- jarPath, target, owner));
- }
+ LOG.debug("localizeClasspathJar: {} {} o:{}", jarPath, target, owner);
createDir(target, new FsPermission(DIR_PERM), true, owner);
String fileName = jarPath.getName();
Path dst = new Path(target, fileName);
@@ -669,9 +657,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
copyFile(nmPrivateContainerTokensPath, tokenDst, user);
File cwdApp = new File(appStorageDir.toString());
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("cwdApp: %s", cwdApp));
- }
+ LOG.debug("cwdApp: {}", cwdApp);
List<String> command ;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoverPausedContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoverPausedContainerLaunch.java
index 761fe3b..c678c91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoverPausedContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoverPausedContainerLaunch.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -42,7 +42,7 @@ import java.io.InterruptedIOException;
*/
public class RecoverPausedContainerLaunch extends ContainerLaunch {
- private static final Log LOG = LogFactory.getLog(
+ private static final Logger LOG = LoggerFactory.getLogger(
RecoveredContainerLaunch.class);
public RecoverPausedContainerLaunch(Context context,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
index 752c3a6..e6a5999 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.IOUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.ApplicationConstants;
@@ -56,8 +56,8 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
* a container to kill. The algorithm that picks the container is a plugin.
*/
public class CGroupElasticMemoryController extends Thread {
- protected static final Log LOG = LogFactory
- .getLog(CGroupElasticMemoryController.class);
+ protected static final Logger LOG = LoggerFactory
+ .getLogger(CGroupElasticMemoryController.class);
private final Clock clock = new MonotonicClock();
private String yarnCGroupPath;
private String oomListenerPath;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index 20e0fc1..fab1490 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -561,11 +561,8 @@ class CGroupsHandlerImpl implements CGroupsHandler {
String cGroupParamPath = getPathForCGroupParam(controller, cGroupId, param);
PrintWriter pw = null;
- if (LOG.isDebugEnabled()) {
- LOG.debug(
- String.format("updateCGroupParam for path: %s with value %s",
- cGroupParamPath, value));
- }
+ LOG.debug("updateCGroupParam for path: {} with value {}",
+ cGroupParamPath, value);
try {
File file = new File(cGroupParamPath);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java
index 50ce3ea..0b25db4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java
@@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.CpuTimeTracker;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.SysInfoLinux;
@@ -63,8 +63,8 @@ public class CGroupsResourceCalculator extends ResourceCalculatorProcessTree {
Continue,
Exit
}
- protected static final Log LOG = LogFactory
- .getLog(CGroupsResourceCalculator.class);
+ protected static final Logger LOG = LoggerFactory
+ .getLogger(CGroupsResourceCalculator.class);
private static final String PROCFS = "/proc";
static final String CGROUP = "cgroup";
static final String CPU_STAT = "cpuacct.stat";
@@ -145,9 +145,7 @@ public class CGroupsResourceCalculator extends ResourceCalculatorProcessTree {
@Override
public float getCpuUsagePercent() {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Process " + pid + " jiffies:" + processTotalJiffies);
- }
+ LOG.debug("Process {} jiffies:{}", pid, processTotalJiffies);
return cpuTimeTracker.getCpuTrackerUsagePercent();
}
@@ -187,9 +185,9 @@ public class CGroupsResourceCalculator extends ResourceCalculatorProcessTree {
processPhysicalMemory = getMemorySize(memStat);
if (memswStat.exists()) {
processVirtualMemory = getMemorySize(memswStat);
- } else if(LOG.isDebugEnabled()) {
- LOG.debug("Swap cgroups monitoring is not compiled into the kernel " +
- memswStat.getAbsolutePath().toString());
+ } else {
+ LOG.debug("Swap cgroups monitoring is not compiled into the kernel {}",
+ memswStat.getAbsolutePath());
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CombinedResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CombinedResourceCalculator.java
index 84b3ed0..5d11818 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CombinedResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CombinedResourceCalculator.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree;
import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
@@ -29,8 +29,8 @@ import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
* it is backward compatible with procfs in terms of virtual memory usage.
*/
public class CombinedResourceCalculator extends ResourceCalculatorProcessTree {
- protected static final Log LOG = LogFactory
- .getLog(CombinedResourceCalculator.class);
+ protected static final Logger LOG = LoggerFactory
+ .getLogger(CombinedResourceCalculator.class);
private ProcfsBasedProcessTree procfs;
private CGroupsResourceCalculator cgroup;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
index 844bb6c..6d74809 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.yarn.api.records.ExecutionType;
@@ -46,8 +46,8 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DefaultOOMHandler implements Runnable {
- protected static final Log LOG = LogFactory
- .getLog(DefaultOOMHandler.class);
+ protected static final Logger LOG = LoggerFactory
+ .getLogger(DefaultOOMHandler.class);
private final Context context;
private final String memoryStatFile;
private final CGroupsHandler cgroups;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingManagerFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingManagerFactory.java
index 17e2e21..cc7fc13 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingManagerFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingManagerFactory.java
@@ -20,8 +20,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -32,7 +32,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
*
*/
public final class NetworkTagMappingManagerFactory {
- private static final Log LOG = LogFactory.getLog(
+ private static final Logger LOG = LoggerFactory.getLogger(
NetworkTagMappingManagerFactory.class);
private NetworkTagMappingManagerFactory() {}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
index 62dd3c4..334c6bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java
@@ -22,8 +22,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.nodemanager.Context;
@@ -44,7 +44,8 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.FPGA_URI;
* */
public class FpgaResourceAllocator {
- static final Log LOG = LogFactory.getLog(FpgaResourceAllocator.class);
+ static final Logger LOG = LoggerFactory.
+ getLogger(FpgaResourceAllocator.class);
private List<FpgaDevice> allowedFpgas = new LinkedList<>();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java
index 61ffd35..d9ca8d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java
@@ -20,8 +20,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.fpga;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -50,7 +50,8 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.FPGA_URI;
@InterfaceAudience.Private
public class FpgaResourceHandlerImpl implements ResourceHandler {
- static final Log LOG = LogFactory.getLog(FpgaResourceHandlerImpl.class);
+ static final Logger LOG = LoggerFactory.
+ getLogger(FpgaResourceHandlerImpl.class);
private final String REQUEST_FPGA_IP_ID_KEY = "REQUESTED_FPGA_IP_ID";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
index 2496ac8..67936ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
@@ -52,7 +52,8 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI;
* Allocate GPU resources according to requirements
*/
public class GpuResourceAllocator {
- final static Log LOG = LogFactory.getLog(GpuResourceAllocator.class);
+ final static Logger LOG = LoggerFactory.
+ getLogger(GpuResourceAllocator.class);
private static final int WAIT_MS_PER_LOOP = 1000;
private Set<GpuDevice> allowedGpuDevices = new TreeSet<>();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
index 2c9baf2..9474b0f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -41,8 +41,8 @@ import java.util.Arrays;
import java.util.List;
public class GpuResourceHandlerImpl implements ResourceHandler {
- final static Log LOG = LogFactory
- .getLog(GpuResourceHandlerImpl.class);
+ final static Logger LOG = LoggerFactory
+ .getLogger(GpuResourceHandlerImpl.class);
// This will be used by container-executor to add necessary clis
public static final String EXCLUDED_GPUS_CLI_OPTION = "--excluded_gpus";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaNodeResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaNodeResource.java
index f434412..7cb720d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaNodeResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaNodeResource.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
@@ -36,7 +36,8 @@ public class NumaNodeResource {
private long usedMemory;
private int usedCpus;
- private static final Log LOG = LogFactory.getLog(NumaNodeResource.class);
+ private static final Logger LOG = LoggerFactory.
+ getLogger(NumaNodeResource.class);
private Map<ContainerId, Long> containerVsMemUsage =
new ConcurrentHashMap<>();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
index e152bda..08c3282 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
@@ -29,8 +29,8 @@ import java.util.Map.Entry;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils;
@@ -51,7 +51,8 @@ import com.google.common.annotations.VisibleForTesting;
*/
public class NumaResourceAllocator {
- private static final Log LOG = LogFactory.getLog(NumaResourceAllocator.class);
+ private static final Logger LOG = LoggerFactory.
+ getLogger(NumaResourceAllocator.class);
// Regex to find node ids, Ex: 'available: 2 nodes (0-1)'
private static final String NUMA_NODEIDS_REGEX =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java
index e6e3159..8a6ebda 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import java.util.ArrayList;
import java.util.List;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -39,8 +39,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resource
*/
public class NumaResourceHandlerImpl implements ResourceHandler {
- private static final Log LOG = LogFactory
- .getLog(NumaResourceHandlerImpl.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(NumaResourceHandlerImpl.class);
private final NumaResourceAllocator numaResourceAllocator;
private final String numaCtlCmd;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java
index b620620..ed80d3f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java
@@ -22,8 +22,8 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
@@ -52,7 +52,8 @@ import java.util.concurrent.ConcurrentHashMap;
* scheduler.
* */
public class DeviceMappingManager {
- static final Log LOG = LogFactory.getLog(DeviceMappingManager.class);
+ static final Logger LOG = LoggerFactory.
+ getLogger(DeviceMappingManager.class);
private Context nmContext;
private static final int WAIT_MS_PER_LOOP = 1000;
@@ -163,10 +164,7 @@ public class DeviceMappingManager {
ContainerId containerId = container.getContainerId();
int requestedDeviceCount = getRequestedDeviceCount(resourceName,
requestedResource);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Try allocating " + requestedDeviceCount
- + " " + resourceName);
- }
+ LOG.debug("Try allocating {} {}", requestedDeviceCount, resourceName);
// Assign devices to container if requested some.
if (requestedDeviceCount > 0) {
if (requestedDeviceCount > getAvailableDevices(resourceName)) {
@@ -266,10 +264,8 @@ public class DeviceMappingManager {
while (iter.hasNext()) {
entry = iter.next();
if (entry.getValue().equals(containerId)) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Recycle devices: " + entry.getKey()
- + ", type: " + resourceName + " from " + containerId);
- }
+ LOG.debug("Recycle devices: {}, type: {} from {}", entry.getKey(),
+ resourceName, containerId);
iter.remove();
}
}
@@ -317,10 +313,8 @@ public class DeviceMappingManager {
ContainerId containerId = c.getContainerId();
Map<String, String> env = c.getLaunchContext().getEnvironment();
if (null == dps) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Customized device plugin scheduler is preferred "
- + "but not implemented, use default logic");
- }
+ LOG.debug("Customized device plugin scheduler is preferred "
+ + "but not implemented, use default logic");
defaultScheduleAction(allowed, used,
assigned, containerId, count);
} else {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java
index 462e45a..a99cc96 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java
@@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.nodemanager.Context;
@@ -47,7 +47,8 @@ import java.util.Map;
*
* */
public class DevicePluginAdapter implements ResourcePlugin {
- private final static Log LOG = LogFactory.getLog(DevicePluginAdapter.class);
+ private final static Logger LOG = LoggerFactory.
+ getLogger(DevicePluginAdapter.class);
private final String resourceName;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceDockerRuntimePluginImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceDockerRuntimePluginImpl.java
index aaa11bd..285ed05 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceDockerRuntimePluginImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceDockerRuntimePluginImpl.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device;
import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DevicePlugin;
@@ -47,7 +47,7 @@ import java.util.Set;
public class DeviceResourceDockerRuntimePluginImpl
implements DockerCommandPlugin {
- final static Log LOG = LogFactory.getLog(
+ final static Logger LOG = LoggerFactory.getLogger(
DeviceResourceDockerRuntimePluginImpl.class);
private String resourceName;
@@ -73,9 +73,7 @@ public class DeviceResourceDockerRuntimePluginImpl
public void updateDockerRunCommand(DockerRunCommand dockerRunCommand,
Container container) throws ContainerExecutionException {
String containerId = container.getContainerId().toString();
- if (LOG.isDebugEnabled()) {
- LOG.debug("Try to update docker run command for: " + containerId);
- }
+ LOG.debug("Try to update docker run command for: {}", containerId);
if(!requestedDevice(resourceName, container)) {
return;
}
@@ -89,17 +87,12 @@ public class DeviceResourceDockerRuntimePluginImpl
}
// handle runtime
dockerRunCommand.addRuntime(deviceRuntimeSpec.getContainerRuntime());
- if (LOG.isDebugEnabled()) {
- LOG.debug("Handle docker container runtime type: "
- + deviceRuntimeSpec.getContainerRuntime() + " for container: "
- + containerId);
- }
+ LOG.debug("Handle docker container runtime type: {} for container: {}",
+ deviceRuntimeSpec.getContainerRuntime(), containerId);
// handle device mounts
Set<MountDeviceSpec> deviceMounts = deviceRuntimeSpec.getDeviceMounts();
- if (LOG.isDebugEnabled()) {
- LOG.debug("Handle device mounts: " + deviceMounts + " for container: "
- + containerId);
- }
+ LOG.debug("Handle device mounts: {} for container: {}", deviceMounts,
+ containerId);
for (MountDeviceSpec mountDeviceSpec : deviceMounts) {
dockerRunCommand.addDevice(
mountDeviceSpec.getDevicePathInHost(),
@@ -107,10 +100,8 @@ public class DeviceResourceDockerRuntimePluginImpl
}
// handle volume mounts
Set<MountVolumeSpec> mountVolumeSpecs = deviceRuntimeSpec.getVolumeMounts();
- if (LOG.isDebugEnabled()) {
- LOG.debug("Handle volume mounts: " + mountVolumeSpecs + " for container: "
- + containerId);
- }
+ LOG.debug("Handle volume mounts: {} for container: {}", mountVolumeSpecs,
+ containerId);
for (MountVolumeSpec mountVolumeSpec : mountVolumeSpecs) {
if (mountVolumeSpec.getReadOnly()) {
dockerRunCommand.addReadOnlyMountLocation(
@@ -124,10 +115,8 @@ public class DeviceResourceDockerRuntimePluginImpl
}
// handle envs
dockerRunCommand.addEnv(deviceRuntimeSpec.getEnvs());
- if (LOG.isDebugEnabled()) {
- LOG.debug("Handle envs: " + deviceRuntimeSpec.getEnvs()
- + " for container: " + containerId);
- }
+ LOG.debug("Handle envs: {} for container: {}",
+ deviceRuntimeSpec.getEnvs(), containerId);
}
@Override
@@ -147,10 +136,8 @@ public class DeviceResourceDockerRuntimePluginImpl
DockerVolumeCommand.VOLUME_CREATE_SUB_COMMAND);
command.setDriverName(volumeSec.getVolumeDriver());
command.setVolumeName(volumeSec.getVolumeName());
- if (LOG.isDebugEnabled()) {
- LOG.debug("Get volume create request from plugin:" + volumeClaims
- + " for container: " + container.getContainerId().toString());
- }
+ LOG.debug("Get volume create request from plugin:{} for container: {}",
+ volumeClaims, container.getContainerId());
return command;
}
}
@@ -195,10 +182,8 @@ public class DeviceResourceDockerRuntimePluginImpl
allocated = devicePluginAdapter
.getDeviceMappingManager()
.getAllocatedDevices(resourceName, containerId);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Get allocation from deviceMappingManager: "
- + allocated + ", " + resourceName + " for container: " + containerId);
- }
+ LOG.debug("Get allocation from deviceMappingManager: {}, {} for"
+ + " container: {}", allocated, resourceName, containerId);
cachedAllocation.put(containerId, allocated);
return allocated;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java
index 0e2a6f8..97ff94f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java
@@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -53,7 +53,8 @@ import java.util.Set;
* */
public class DeviceResourceHandlerImpl implements ResourceHandler {
- static final Log LOG = LogFactory.getLog(DeviceResourceHandlerImpl.class);
+ static final Logger LOG = LoggerFactory.
+ getLogger(DeviceResourceHandlerImpl.class);
private final String resourceName;
private final DevicePlugin devicePlugin;
@@ -134,10 +135,7 @@ public class DeviceResourceHandlerImpl implements ResourceHandler {
String containerIdStr = container.getContainerId().toString();
DeviceMappingManager.DeviceAllocation allocation =
deviceMappingManager.assignDevices(resourceName, container);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Allocated to "
- + containerIdStr + ": " + allocation);
- }
+ LOG.debug("Allocated to {}: {}", containerIdStr, allocation);
DeviceRuntimeSpec spec;
try {
spec = devicePlugin.onDevicesAllocated(
@@ -291,13 +289,9 @@ public class DeviceResourceHandlerImpl implements ResourceHandler {
}
DeviceType deviceType;
try {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Try to get device type from device path: " + devName);
- }
+ LOG.debug("Try to get device type from device path: {}", devName);
String output = shellWrapper.getDeviceFileType(devName);
- if (LOG.isDebugEnabled()) {
- LOG.debug("stat output:" + output);
- }
+ LOG.debug("stat output:{}", output);
deviceType = output.startsWith("c") ? DeviceType.CHAR : DeviceType.BLOCK;
} catch (IOException e) {
String msg =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceUpdaterImpl.java
index e5ef578..da81cbb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceUpdaterImpl.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device;
@@ -33,7 +33,8 @@ import java.util.Set;
* */
public class DeviceResourceUpdaterImpl extends NodeResourceUpdaterPlugin {
- final static Log LOG = LogFactory.getLog(DeviceResourceUpdaterImpl.class);
+ final static Logger LOG = LoggerFactory.
+ getLogger(DeviceResourceUpdaterImpl.class);
private String resourceName;
private DevicePlugin devicePlugin;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaResourcePlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaResourcePlugin.java
index 9add3d2..4dab1a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaResourcePlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaResourcePlugin.java
@@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.fpga;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -37,7 +37,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NMResourceInfo;
public class FpgaResourcePlugin implements ResourcePlugin {
- private static final Log LOG = LogFactory.getLog(FpgaResourcePlugin.class);
+ private static final Logger LOG = LoggerFactory.
+ getLogger(FpgaResourcePlugin.class);
private ResourceHandler fpgaResourceHandler = null;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java
index c2e315a..36a0d55 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugi
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.IOUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -50,7 +50,8 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
* Implementation to use nvidia-docker v1 as GPU docker command plugin.
*/
public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin {
- final static Log LOG = LogFactory.getLog(NvidiaDockerV1CommandPlugin.class);
+ final static Logger LOG = LoggerFactory.
+ getLogger(NvidiaDockerV1CommandPlugin.class);
private Configuration conf;
private Map<String, Set<String>> additionalCommands = null;
@@ -121,9 +122,7 @@ public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin {
addToCommand(DEVICE_OPTION, getValue(str));
} else if (str.startsWith(VOLUME_DRIVER_OPTION)) {
volumeDriver = getValue(str);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Found volume-driver:" + volumeDriver);
- }
+ LOG.debug("Found volume-driver:{}", volumeDriver);
} else if (str.startsWith(MOUNT_RO_OPTION)) {
String mount = getValue(str);
if (!mount.endsWith(":ro")) {
@@ -286,15 +285,11 @@ public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin {
if (VOLUME_NAME_PATTERN.matcher(mountSource).matches()) {
// This is a valid named volume
newVolumeName = mountSource;
- if (LOG.isDebugEnabled()) {
- LOG.debug("Found volume name for GPU:" + newVolumeName);
- }
+ LOG.debug("Found volume name for GPU:{}", newVolumeName);
break;
} else{
- if (LOG.isDebugEnabled()) {
- LOG.debug("Failed to match " + mountSource
- + " to named-volume regex pattern");
- }
+ LOG.debug("Failed to match {} to named-volume regex pattern",
+ mountSource);
}
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java
index ff25eb6..f584485 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java
@@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings;
@@ -41,7 +41,8 @@ import java.util.Set;
* Implementation to use nvidia-docker v2 as GPU docker command plugin.
*/
public class NvidiaDockerV2CommandPlugin implements DockerCommandPlugin {
- final static Log LOG = LogFactory.getLog(NvidiaDockerV2CommandPlugin.class);
+ final static Logger LOG = LoggerFactory.
+ getLogger(NvidiaDockerV2CommandPlugin.class);
private String nvidiaRuntime = "nvidia";
private String nvidiaVisibleDevices = "NVIDIA_VISIBLE_DEVICES";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java
index 80b915c..7a6cc67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -33,8 +33,8 @@ import java.lang.management.ManagementFactory;
* Class for testing {@link NodeManagerMXBean} implementation.
*/
public class TestNodeManagerMXBean {
- public static final Log LOG = LogFactory.getLog(
- TestNodeManagerMXBean.class);
+ public static final Logger LOG = LoggerFactory.getLogger(
+ TestNodeManagerMXBean.class);
@Test
public void testNodeManagerMXBean() throws Exception {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java
index 40a296c..f10ec50 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -43,8 +43,8 @@ import static org.mockito.Mockito.when;
* Test for elastic non-strict memory controller based on cgroups.
*/
public class TestCGroupElasticMemoryController {
- protected static final Log LOG = LogFactory
- .getLog(TestCGroupElasticMemoryController.class);
+ protected static final Logger LOG = LoggerFactory
+ .getLogger(TestCGroupElasticMemoryController.class);
private YarnConfiguration conf = new YarnConfiguration();
private File script = new File("target/" +
TestCGroupElasticMemoryController.class.getName());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java
index 1ce67d6..9e5bdb0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java
@@ -92,10 +92,8 @@ public class AbstractAutoCreatedLeafQueue extends LeafQueue {
// note: we currently set maxCapacity to capacity
// this might be revised later
setMaxCapacity(nodeLabel, entitlement.getMaxCapacity());
- if (LOG.isDebugEnabled()) {
- LOG.debug("successfully changed to {} for queue {}", capacity, this
+ LOG.debug("successfully changed to {} for queue {}", capacity, this
.getQueueName());
- }
//update queue used capacity etc
CSQueueUtils.updateQueueStatistics(resourceCalculator,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 49f1954..4baf405 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -1053,10 +1053,8 @@ public class CapacityScheduler extends
+ " to scheduler from user " + application.getUser() + " in queue "
+ queue.getQueueName());
if (isAttemptRecovering) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(applicationAttemptId
- + " is recovering. Skipping notifying ATTEMPT_ADDED");
- }
+ LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED",
+ applicationAttemptId);
} else{
rmContext.getDispatcher().getEventHandler().handle(
new RMAppAttemptEvent(applicationAttemptId,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index cb6fc28..53e8fd2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -593,10 +593,8 @@ public class ParentQueue extends AbstractCSQueue {
NodeType.NODE_LOCAL);
while (canAssign(clusterResource, node)) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Trying to assign containers to child-queue of "
- + getQueueName());
- }
+ LOG.debug("Trying to assign containers to child-queue of {}",
+ getQueueName());
// Are we over maximum-capacity for this queue?
// This will also consider parent's limits and also continuous reservation
@@ -781,10 +779,8 @@ public class ParentQueue extends AbstractCSQueue {
for (Iterator<CSQueue> iter = sortAndGetChildrenAllocationIterator(
candidates.getPartition()); iter.hasNext(); ) {
CSQueue childQueue = iter.next();
- if(LOG.isDebugEnabled()) {
- LOG.debug("Trying to assign to queue: " + childQueue.getQueuePath()
- + " stats: " + childQueue);
- }
+ LOG.debug("Trying to assign to queue: {} stats: {}",
+ childQueue.getQueuePath(), childQueue);
// Get ResourceLimits of child queue before assign containers
ResourceLimits childLimits =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java
index ea43ac8..9602558 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java
@@ -221,15 +221,10 @@ public class QueueManagementDynamicEditPolicy implements SchedulingEditPolicy {
+ parentQueue.getQueueName(), e);
}
} else{
- if (LOG.isDebugEnabled()) {
- LOG.debug(
- "Skipping queue management updates for parent queue "
- + parentQueue
- .getQueuePath() + " "
- + "since configuration for auto creating queues beyond "
- + "parent's "
- + "guaranteed capacity is disabled");
- }
+ LOG.debug("Skipping queue management updates for parent queue {} "
+ + "since configuration for auto creating queues beyond "
+ + "parent's guaranteed capacity is disabled",
+ parentQueue.getQueuePath());
}
return queueManagementChanges;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java
index b1d3f74..d91f488 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java
@@ -669,19 +669,15 @@ public class GuaranteedOrZeroCapacityOverTimePolicy
if (updatedQueueTemplate.getQueueCapacities().
getCapacity(nodeLabel) > 0) {
if (isActive(leafQueue, nodeLabel)) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Queue is already active." + " Skipping activation : "
- + leafQueue.getQueueName());
- }
+ LOG.debug("Queue is already active. Skipping activation : {}",
+ leafQueue.getQueueName());
} else{
activate(leafQueue, nodeLabel);
}
} else{
if (!isActive(leafQueue, nodeLabel)) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Queue is already de-activated. Skipping "
- + "de-activation : " + leafQueue.getQueueName());
- }
+ LOG.debug("Queue is already de-activated. Skipping "
+ + "de-activation : {}", leafQueue.getQueueName());
} else{
deactivate(leafQueue, nodeLabel);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index 8711cb4..d04cf9c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -154,18 +154,13 @@ public final class PlacementConstraintsUtil {
if (schedulerNode.getNodeAttributes() == null ||
!schedulerNode.getNodeAttributes().contains(requestAttribute)) {
if (opCode == NodeAttributeOpCode.NE) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Incoming requestAttribute:" + requestAttribute
- + "is not present in " + schedulerNode.getNodeID()
- + ", however opcode is NE. Hence accept this node.");
- }
+ LOG.debug("Incoming requestAttribute:{} is not present in {},"
+ + " however opcode is NE. Hence accept this node.",
+ requestAttribute, schedulerNode.getNodeID());
return true;
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Incoming requestAttribute:" + requestAttribute
- + "is not present in " + schedulerNode.getNodeID()
- + ", skip such node.");
- }
+ LOG.debug("Incoming requestAttribute:{} is not present in {},"
+ + " skip such node.", requestAttribute, schedulerNode.getNodeID());
return false;
}
@@ -183,21 +178,16 @@ public final class PlacementConstraintsUtil {
}
if (requestAttribute.equals(nodeAttribute)) {
if (isOpCodeMatches(requestAttribute, nodeAttribute, opCode)) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(
- "Incoming requestAttribute:" + requestAttribute
- + " matches with node:" + schedulerNode.getNodeID());
- }
+ LOG.debug("Incoming requestAttribute:{} matches with node:{}",
+ requestAttribute, schedulerNode.getNodeID());
found = true;
return found;
}
}
}
if (!found) {
- if (LOG.isDebugEnabled()) {
- LOG.info("skip this node:" + schedulerNode.getNodeID()
- + " for requestAttribute:" + requestAttribute);
- }
+ LOG.debug("skip this node:{} for requestAttribute:{}",
+ schedulerNode.getNodeID(), requestAttribute);
return false;
}
return true;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index 1bf3618..c22fdb0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -426,10 +426,8 @@ public abstract class FSQueue implements Queue, Schedulable {
*/
boolean assignContainerPreCheck(FSSchedulerNode node) {
if (node.getReservedContainer() != null) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Assigning container failed on node '" + node.getNodeName()
- + " because it has reserved containers.");
- }
+ LOG.debug("Assigning container failed on node '{}' because it has"
+ + " reserved containers.", node.getNodeName());
return false;
} else if (!Resources.fitsIn(getResourceUsage(), getMaxShare())) {
if (LOG.isDebugEnabled()) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index dd64f6d..9bd2a11 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -397,9 +397,8 @@ public class FifoScheduler extends
LOG.info("Accepted application " + applicationId + " from user: " + user
+ ", currently num of applications: " + applications.size());
if (isAppRecovering) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(applicationId + " is recovering. Skip notifying APP_ACCEPTED");
- }
+ LOG.debug("{} is recovering. Skip notifying APP_ACCEPTED",
+ applicationId);
} else {
rmContext.getDispatcher().getEventHandler()
.handle(new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED));
@@ -429,10 +428,8 @@ public class FifoScheduler extends
LOG.info("Added Application Attempt " + appAttemptId
+ " to scheduler from user " + application.getUser());
if (isAttemptRecovering) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(appAttemptId
- + " is recovering. Skipping notifying ATTEMPT_ADDED");
- }
+ LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED",
+ appAttemptId);
} else {
rmContext.getDispatcher().getEventHandler().handle(
new RMAppAttemptEvent(appAttemptId,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java
index d0677c3..5c9ce50 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java
@@ -396,10 +396,8 @@ public class LocalityAppPlacementAllocator <N extends SchedulerNode>
SchedulingMode schedulingMode) {
// We will only look at node label = nodeLabelToLookAt according to
// schedulingMode and partition of node.
- if(LOG.isDebugEnabled()) {
- LOG.debug("precheckNode is invoked for " + schedulerNode.getNodeID() + ","
- + schedulingMode);
- }
+ LOG.debug("precheckNode is invoked for {},{}", schedulerNode.getNodeID(),
+ schedulingMode);
String nodePartitionToLookAt;
if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
nodePartitionToLookAt = schedulerNode.getPartition();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
index b686a9c..94e8933 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
@@ -223,10 +223,8 @@ public class Application {
if (requests == null) {
requests = new HashMap<String, ResourceRequest>();
this.requests.put(schedulerKey, requests);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Added priority=" + schedulerKey.getPriority()
- + " application="+ applicationId);
- }
+ LOG.debug("Added priority={} application={}", schedulerKey.getPriority(),
+ applicationId);
}
final Resource capability = requestSpec.get(schedulerKey);
@@ -242,10 +240,7 @@ public class Application {
LOG.info("Added task " + task.getTaskId() + " to application " +
applicationId + " at priority " + schedulerKey.getPriority());
- if(LOG.isDebugEnabled()) {
- LOG.debug("addTask: application=" + applicationId
- + " #asks=" + ask.size());
- }
+ LOG.debug("addTask: application={} #asks={}", applicationId, ask.size());
// Create resource requests
for (String host : task.getHosts()) {
@@ -320,12 +315,12 @@ public class Application {
public synchronized List<Container> getResources() throws IOException {
if(LOG.isDebugEnabled()) {
- LOG.debug("getResources begin:" + " application=" + applicationId
- + " #ask=" + ask.size());
+ LOG.debug("getResources begin: application={} #ask={}",
+ applicationId, ask.size());
for (ResourceRequest request : ask) {
- LOG.debug("getResources:" + " application=" + applicationId
- + " ask-request=" + request);
+ LOG.debug("getResources: application={} ask-request={}",
+ applicationId, request);
}
}
@@ -346,8 +341,8 @@ public class Application {
ask.clear();
if(LOG.isDebugEnabled()) {
- LOG.debug("getResources() for " + applicationId + ":"
- + " ask=" + ask.size() + " received=" + containers.size());
+ LOG.debug("getResources() for {}: ask={} received={}",
+ applicationId, ask.size(), containers.size());
}
return containers;
@@ -451,10 +446,8 @@ public class Application {
updateResourceRequest(requests.get(ResourceRequest.ANY));
- if(LOG.isDebugEnabled()) {
- LOG.debug("updateResourceDemands:" + " application=" + applicationId
- + " #asks=" + ask.size());
- }
+ LOG.debug("updateResourceDemands: application={} #asks={}",
+ applicationId, ask.size());
}
private void updateResourceRequest(ResourceRequest request) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main [...]
index 31122ca..9ecb6f6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -107,9 +107,7 @@ class FlowScanner implements RegionScanner, Closeable {
YarnConfiguration.APP_FINAL_VALUE_RETENTION_THRESHOLD,
YarnConfiguration.DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD);
}
- if (LOG.isDebugEnabled()) {
- LOG.debug(" batch size=" + batchSize);
- }
+ LOG.debug(" batch size={}", batchSize);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org