You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2018/02/14 16:21:39 UTC
[3/6] hadoop git commit: HADOOP-10571. Use Log.*(Object,
Throwable) overload to log exceptions. Contributed by Andras Bokor.
HADOOP-10571. Use Log.*(Object, Throwable) overload to log exceptions.
Contributed by Andras Bokor.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f20dc0d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f20dc0d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f20dc0d5
Branch: refs/heads/trunk
Commit: f20dc0d5770d3876954faf0a6e8dcce6539ffc23
Parents: 042ef2f
Author: Steve Loughran <st...@apache.org>
Authored: Wed Feb 14 16:20:14 2018 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Wed Feb 14 16:20:14 2018 +0000
----------------------------------------------------------------------
.../org/apache/hadoop/fs/LocalFileSystem.java | 2 +-
.../apache/hadoop/ha/ActiveStandbyElector.java | 30 +-
.../apache/hadoop/ha/FailoverController.java | 20 +-
.../org/apache/hadoop/ha/HealthMonitor.java | 9 +-
.../org/apache/hadoop/io/retry/RetryUtils.java | 11 +-
.../main/java/org/apache/hadoop/net/DNS.java | 39 +-
.../apache/hadoop/service/AbstractService.java | 27 +-
.../hadoop/service/ServiceOperations.java | 6 +-
.../hadoop/service/TestServiceOperations.java | 3 +-
.../hadoop/hdfs/nfs/nfs3/DFSClientCache.java | 25 +-
.../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 314 +++++++---------
.../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 370 +++++++++----------
.../hadoop/hdfs/server/datanode/DataNode.java | 211 +++++------
.../hdfs/server/datanode/DataXceiver.java | 172 ++++-----
.../hdfs/server/namenode/FSNamesystem.java | 2 +-
.../server/namenode/ha/StandbyCheckpointer.java | 34 +-
.../org/apache/hadoop/hdfs/MiniDFSCluster.java | 2 +-
.../hadoop/test/MiniDFSClusterManager.java | 26 +-
.../apache/hadoop/mapred/gridmix/Gridmix.java | 22 +-
.../swift/http/HttpInputStreamWithRelease.java | 29 +-
20 files changed, 587 insertions(+), 767 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
index 91b2315..538ccdf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
@@ -139,7 +139,7 @@ public class LocalFileSystem extends ChecksumFileSystem {
LOG.warn("Ignoring failure of renameTo");
}
} catch (IOException e) {
- LOG.warn("Error moving bad file " + p + ": " + e);
+ LOG.warn("Error moving bad file " + p, e);
}
return false;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index 93fd2cf..a23fb71 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -888,9 +888,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
Stat oldBreadcrumbStat = fenceOldActive();
writeBreadCrumbNode(oldBreadcrumbStat);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Becoming active for " + this);
- }
+ LOG.debug("Becoming active for {}", this);
+
appClient.becomeActive();
state = State.ACTIVE;
return true;
@@ -910,8 +909,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
throws KeeperException, InterruptedException {
Preconditions.checkState(appData != null, "no appdata");
- LOG.info("Writing znode " + zkBreadCrumbPath +
- " to indicate that the local node is the most recent active...");
+ LOG.info("Writing znode {} to indicate that the local " +
+ "node is the most recent active...", zkBreadCrumbPath);
if (oldBreadcrumbStat == null) {
// No previous active, just create the node
createWithRetries(zkBreadCrumbPath, appData, zkAcl,
@@ -948,9 +947,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
deleteWithRetries(zkBreadCrumbPath, stat.getVersion());
} catch (Exception e) {
- LOG.warn("Unable to delete our own bread-crumb of being active at " +
- zkBreadCrumbPath + ": " + e.getLocalizedMessage() + ". " +
- "Expecting to be fenced by the next active.");
+ LOG.warn("Unable to delete our own bread-crumb of being active at {}." +
+ ". Expecting to be fenced by the next active.", zkBreadCrumbPath, e);
}
}
@@ -984,7 +982,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
throw ke;
}
- LOG.info("Old node exists: " + StringUtils.byteToHexString(data));
+ LOG.info("Old node exists: {}", StringUtils.byteToHexString(data));
if (Arrays.equals(data, appData)) {
LOG.info("But old node has our own data, so don't need to fence it.");
} else {
@@ -995,9 +993,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
private void becomeStandby() {
if (state != State.STANDBY) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Becoming standby for " + this);
- }
+ LOG.debug("Becoming standby for {}", this);
state = State.STANDBY;
appClient.becomeStandby();
}
@@ -1005,9 +1001,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
private void enterNeutralMode() {
if (state != State.NEUTRAL) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Entering neutral mode for " + this);
- }
+ LOG.debug("Entering neutral mode for {}", this);
state = State.NEUTRAL;
appClient.enterNeutralMode();
}
@@ -1124,7 +1118,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
private synchronized boolean isStaleClient(Object ctx) {
Preconditions.checkNotNull(ctx);
if (zkClient != (ZooKeeper)ctx) {
- LOG.warn("Ignoring stale result from old client with sessionId " +
+ LOG.warn("Ignoring stale result from old client with sessionId {}",
String.format("0x%08x", ((ZooKeeper)ctx).getSessionId()));
return true;
}
@@ -1162,8 +1156,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
throws KeeperException, IOException {
try {
if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) {
- LOG.error("Connection timed out: couldn't connect to ZooKeeper in "
- + connectionTimeoutMs + " milliseconds");
+ LOG.error("Connection timed out: couldn't connect to ZooKeeper in " +
+ "{} milliseconds", connectionTimeoutMs);
zk.close();
throw KeeperException.create(Code.CONNECTIONLOSS);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
index 3c05a25..b86ae29 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
@@ -123,7 +123,7 @@ public class FailoverController {
toSvcStatus = toSvc.getServiceStatus();
} catch (IOException e) {
String msg = "Unable to get service state for " + target;
- LOG.error(msg + ": " + e.getLocalizedMessage());
+ LOG.error(msg, e);
throw new FailoverFailedException(msg, e);
}
@@ -139,7 +139,7 @@ public class FailoverController {
target + " is not ready to become active: " +
notReadyReason);
} else {
- LOG.warn("Service is not ready to become active, but forcing: " +
+ LOG.warn("Service is not ready to become active, but forcing: {}",
notReadyReason);
}
}
@@ -172,11 +172,11 @@ public class FailoverController {
proxy.transitionToStandby(createReqInfo());
return true;
} catch (ServiceFailedException sfe) {
- LOG.warn("Unable to gracefully make " + svc + " standby (" +
- sfe.getMessage() + ")");
+ LOG.warn("Unable to gracefully make {} standby ({})",
+ svc, sfe.getMessage());
} catch (IOException ioe) {
- LOG.warn("Unable to gracefully make " + svc +
- " standby (unable to connect)", ioe);
+ LOG.warn("Unable to gracefully make {} standby (unable to connect)",
+ svc, ioe);
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
@@ -227,13 +227,13 @@ public class FailoverController {
toSvc.getProxy(conf, rpcTimeoutToNewActive),
createReqInfo());
} catch (ServiceFailedException sfe) {
- LOG.error("Unable to make " + toSvc + " active (" +
- sfe.getMessage() + "). Failing back.");
+ LOG.error("Unable to make {} active ({}). Failing back.",
+ toSvc, sfe.getMessage());
failed = true;
cause = sfe;
} catch (IOException ioe) {
- LOG.error("Unable to make " + toSvc +
- " active (unable to connect). Failing back.", ioe);
+ LOG.error("Unable to make {} active (unable to connect). Failing back.",
+ toSvc, ioe);
failed = true;
cause = ioe;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
index a93df75..d1a858f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
@@ -204,12 +204,11 @@ public class HealthMonitor {
healthy = true;
} catch (Throwable t) {
if (isHealthCheckFailedException(t)) {
- LOG.warn("Service health check failed for " + targetToMonitor
- + ": " + t.getMessage());
+ LOG.warn("Service health check failed for {}", targetToMonitor, t);
enterState(State.SERVICE_UNHEALTHY);
} else {
- LOG.warn("Transport-level exception trying to monitor health of " +
- targetToMonitor + ": " + t.getCause() + " " + t.getLocalizedMessage());
+ LOG.warn("Transport-level exception trying to monitor health of {}",
+ targetToMonitor, t);
RPC.stopProxy(proxy);
proxy = null;
enterState(State.SERVICE_NOT_RESPONDING);
@@ -246,7 +245,7 @@ public class HealthMonitor {
private synchronized void enterState(State newState) {
if (newState != state) {
- LOG.info("Entering state " + newState);
+ LOG.info("Entering state {}", newState);
state = newState;
synchronized (callbacks) {
for (Callback cb : callbacks) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
index 1f5acfe..7e43974 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
@@ -72,9 +72,7 @@ public class RetryUtils {
retryPolicySpecKey, defaultRetryPolicySpec
);
- if (LOG.isDebugEnabled()) {
- LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry);
- }
+ LOG.debug("multipleLinearRandomRetry = {}", multipleLinearRandomRetry);
if (multipleLinearRandomRetry == null) {
//no retry
@@ -124,10 +122,9 @@ public class RetryUtils {
p = RetryPolicies.TRY_ONCE_THEN_FAIL;
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("RETRY " + retries + ") policy="
- + p.getClass().getSimpleName() + ", exception=" + e);
- }
+ LOG.debug("RETRY {}) policy={}", retries,
+ p.getClass().getSimpleName(), e);
+
return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
index 81041c1..2fb4d3e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
@@ -179,8 +179,7 @@ public class DNS {
netIf = getSubinterface(strInterface);
}
} catch (SocketException e) {
- LOG.warn("I/O error finding interface " + strInterface +
- ": " + e.getMessage());
+ LOG.warn("I/O error finding interface {}", strInterface, e);
return new String[] { cachedHostAddress };
}
if (netIf == null) {
@@ -265,7 +264,7 @@ public class DNS {
}
if (hosts.isEmpty()) {
- LOG.warn("Unable to determine hostname for interface " +
+ LOG.warn("Unable to determine hostname for interface {}",
strInterface);
hosts.add(cachedHostname);
}
@@ -283,8 +282,8 @@ public class DNS {
try {
localhost = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
- LOG.warn("Unable to determine local hostname "
- + "-falling back to \"" + LOCALHOST + "\"", e);
+ LOG.warn("Unable to determine local hostname -falling back to '{}'",
+ LOCALHOST, e);
localhost = LOCALHOST;
}
return localhost;
@@ -303,21 +302,21 @@ public class DNS {
*/
private static String resolveLocalHostIPAddress() {
String address;
+ try {
+ address = InetAddress.getLocalHost().getHostAddress();
+ } catch (UnknownHostException e) {
+ LOG.warn("Unable to determine address of the host " +
+ "-falling back to '{}' address", LOCALHOST, e);
try {
- address = InetAddress.getLocalHost().getHostAddress();
- } catch (UnknownHostException e) {
- LOG.warn("Unable to determine address of the host"
- + "-falling back to \"" + LOCALHOST + "\" address", e);
- try {
- address = InetAddress.getByName(LOCALHOST).getHostAddress();
- } catch (UnknownHostException noLocalHostAddressException) {
- //at this point, deep trouble
- LOG.error("Unable to determine local loopback address "
- + "of \"" + LOCALHOST + "\" " +
- "-this system's network configuration is unsupported", e);
- address = null;
- }
+ address = InetAddress.getByName(LOCALHOST).getHostAddress();
+ } catch (UnknownHostException noLocalHostAddressException) {
+ //at this point, deep trouble
+ LOG.error("Unable to determine local loopback address of '{}' " +
+ "-this system's network configuration is unsupported",
+ LOCALHOST, e);
+ address = null;
}
+ }
return address;
}
@@ -431,8 +430,8 @@ public class DNS {
netIf = getSubinterface(strInterface);
}
} catch (SocketException e) {
- LOG.warn("I/O error finding interface " + strInterface +
- ": " + e.getMessage());
+ LOG.warn("I/O error finding interface {}: {}",
+ strInterface, e.getMessage());
return Arrays.asList(InetAddress.getByName(cachedHostAddress));
}
if (netIf == null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
index 2a1140f..70de647 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
@@ -194,9 +194,7 @@ public abstract class AbstractService implements Service {
serviceStart();
if (isInState(STATE.STARTED)) {
//if the service started (and isn't now in a later state), notify
- if (LOG.isDebugEnabled()) {
- LOG.debug("Service " + getName() + " is started");
- }
+ LOG.debug("Service {} is started", getName());
notifyListeners();
}
} catch (Exception e) {
@@ -235,9 +233,7 @@ public abstract class AbstractService implements Service {
}
} else {
//already stopped: note it
- if (LOG.isDebugEnabled()) {
- LOG.debug("Ignoring re-entrant call to stop()");
- }
+ LOG.debug("Ignoring re-entrant call to stop()");
}
}
}
@@ -258,9 +254,7 @@ public abstract class AbstractService implements Service {
* @param exception the exception
*/
protected final void noteFailure(Exception exception) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("noteFailure " + exception, (Throwable) null);
- }
+ LOG.debug("noteFailure {}" + exception);
if (exception == null) {
//make sure failure logic doesn't itself cause problems
return;
@@ -270,10 +264,8 @@ public abstract class AbstractService implements Service {
if (failureCause == null) {
failureCause = exception;
failureState = getServiceState();
- LOG.info("Service " + getName()
- + " failed in state " + failureState
- + "; cause: " + exception,
- exception);
+ LOG.info("Service {} failed in state {}",
+ getName(), failureState, exception);
}
}
}
@@ -418,8 +410,7 @@ public abstract class AbstractService implements Service {
listeners.notifyListeners(this);
globalListeners.notifyListeners(this);
} catch (Throwable e) {
- LOG.warn("Exception while notifying listeners of " + this + ": " + e,
- e);
+ LOG.warn("Exception while notifying listeners of {}", this, e);
}
}
@@ -449,10 +440,8 @@ public abstract class AbstractService implements Service {
assert stateModel != null : "null state in " + name + " " + this.getClass();
STATE oldState = stateModel.enterState(newState);
if (oldState != newState) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(
- "Service: " + getName() + " entered state " + getServiceState());
- }
+ LOG.debug("Service: {} entered state {}", getName(), getServiceState());
+
recordLifecycleEvent();
}
return oldState;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
index e7683a2..d064ef9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
@@ -81,9 +81,7 @@ public final class ServiceOperations {
try {
stop(service);
} catch (Exception e) {
- log.warn("When stopping the service " + service.getName()
- + " : " + e,
- e);
+ log.warn("When stopping the service " + service.getName(), e);
return e;
}
return null;
@@ -103,7 +101,7 @@ public final class ServiceOperations {
try {
stop(service);
} catch (Exception e) {
- log.warn("When stopping the service {} : {}", service.getName(), e, e);
+ log.warn("When stopping the service {}", service.getName(), e);
return e;
}
return null;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
index 5df973d..cc1cfbf 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
@@ -57,8 +57,7 @@ public class TestServiceOperations {
ServiceOperations.stopQuietly(logger, service);
assertThat(logCapturer.getOutput(),
- containsString("When stopping the service " + service.getName()
- + " : " + e));
+ containsString("When stopping the service " + service.getName()));
verify(e, times(1)).printStackTrace(Mockito.any(PrintWriter.class));
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
index 9a9366f..e0fb302 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hdfs.nfs.nfs3;
-import org.apache.commons.logging.LogFactory;
-
import java.io.IOException;
import java.net.URI;
import java.nio.file.FileSystemException;
@@ -32,7 +30,6 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSClient;
@@ -50,12 +47,15 @@ import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A cache saves DFSClient objects for different users.
*/
class DFSClientCache {
- private static final Log LOG = LogFactory.getLog(DFSClientCache.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(DFSClientCache.class);
/**
* Cache that maps User id to the corresponding DFSClient.
*/
@@ -169,8 +169,8 @@ class DFSClientCache {
URI value = namenodeUriMap.get(namenodeId);
// if a unique nnid, add it to the map
if (value == null) {
- LOG.info("Added export:" + exportPath + " FileSystem URI:" + exportURI
- + " with namenodeId:" + namenodeId);
+ LOG.info("Added export: {} FileSystem URI: {} with namenodeId: {}",
+ exportPath, exportPath, namenodeId);
namenodeUriMap.put(namenodeId, exportURI);
} else {
// if the nnid already exists, it better be the for the same namenode
@@ -194,7 +194,7 @@ class DFSClientCache {
try {
closeAll(true);
} catch (IOException e) {
- LOG.info("DFSClientCache.closeAll() threw an exception:\n", e);
+ LOG.info("DFSClientCache.closeAll() threw an exception", e);
}
}
}
@@ -269,10 +269,7 @@ class DFSClientCache {
UserGroupInformation ugi =
UserGroupInformation.createProxyUser(effectiveUser, realUser);
- if (LOG.isDebugEnabled()){
- LOG.debug(String.format("Created ugi:" +
- " %s for username: %s", ugi, effectiveUser));
- }
+ LOG.debug("Created ugi: {} for username: {}", ugi, effectiveUser);
return ugi;
}
@@ -329,8 +326,7 @@ class DFSClientCache {
try {
client = clientCache.get(new DfsClientKey(userName, namenodeId));
} catch (ExecutionException e) {
- LOG.error("Failed to create DFSClient for user:" + userName + " Cause:"
- + e);
+ LOG.error("Failed to create DFSClient for user: {}", userName, e);
}
return client;
}
@@ -343,8 +339,7 @@ class DFSClientCache {
try {
s = inputstreamCache.get(k);
} catch (ExecutionException e) {
- LOG.warn("Failed to create DFSInputStream for user:" + userName
- + " Cause:" + e);
+ LOG.warn("Failed to create DFSInputStream for user: {}", userName, e);
}
return s;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index 5b7dc14..6067a5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -31,8 +31,6 @@ import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.atomic.AtomicLong;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -61,13 +59,15 @@ import org.jboss.netty.channel.Channel;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* OpenFileCtx saves the context of one HDFS file output stream. Access to it is
* synchronized by its member lock.
*/
class OpenFileCtx {
- public static final Log LOG = LogFactory.getLog(OpenFileCtx.class);
+ public static final Logger LOG = LoggerFactory.getLogger(OpenFileCtx.class);
// Pending writes water mark for dump, 1MB
private static long DUMP_WRITE_WATER_MARK = 1024 * 1024;
@@ -210,10 +210,8 @@ class OpenFileCtx {
/** Increase or decrease the memory occupation of non-sequential writes */
private long updateNonSequentialWriteInMemory(long count) {
long newValue = nonSequentialWriteInMemory.addAndGet(count);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Update nonSequentialWriteInMemory by " + count + " new value: "
- + newValue);
- }
+ LOG.debug("Update nonSequentialWriteInMemory by {} new value: {}",
+ count, newValue);
Preconditions.checkState(newValue >= 0,
"nonSequentialWriteInMemory is negative " + newValue
@@ -273,9 +271,7 @@ class OpenFileCtx {
// Check if need to dump the new writes
private void waitForDump() {
if (!enabledDump) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Do nothing, dump is disabled.");
- }
+ LOG.debug("Do nothing, dump is disabled.");
return;
}
@@ -286,9 +282,7 @@ class OpenFileCtx {
// wake up the dumper thread to dump the data
synchronized (this) {
if (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Asking dumper to dump...");
- }
+ LOG.debug("Asking dumper to dump...");
if (dumpThread == null) {
dumpThread = new Daemon(new Dumper());
dumpThread.start();
@@ -312,7 +306,7 @@ class OpenFileCtx {
private void dump() {
// Create dump outputstream for the first time
if (dumpOut == null) {
- LOG.info("Create dump file: " + dumpFilePath);
+ LOG.info("Create dump file: {}", dumpFilePath);
File dumpFile = new File(dumpFilePath);
try {
synchronized (this) {
@@ -322,13 +316,14 @@ class OpenFileCtx {
dumpOut = new FileOutputStream(dumpFile);
}
} catch (IOException e) {
- LOG.error("Got failure when creating dump stream " + dumpFilePath, e);
+ LOG.error("Got failure when creating dump stream {}",
+ dumpFilePath, e);
enabledDump = false;
if (dumpOut != null) {
try {
dumpOut.close();
} catch (IOException e1) {
- LOG.error("Can't close dump stream " + dumpFilePath, e);
+ LOG.error("Can't close dump stream {}", dumpFilePath, e);
}
}
return;
@@ -340,17 +335,15 @@ class OpenFileCtx {
try {
raf = new RandomAccessFile(dumpFilePath, "r");
} catch (FileNotFoundException e) {
- LOG.error("Can't get random access to file " + dumpFilePath);
+ LOG.error("Can't get random access to file {}", dumpFilePath);
// Disable dump
enabledDump = false;
return;
}
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Start dump. Before dump, nonSequentialWriteInMemory == "
- + nonSequentialWriteInMemory.get());
- }
+ LOG.debug("Start dump. Before dump, nonSequentialWriteInMemory == {}",
+ nonSequentialWriteInMemory.get());
Iterator<OffsetRange> it = pendingWrites.keySet().iterator();
while (activeState && it.hasNext()
@@ -367,18 +360,16 @@ class OpenFileCtx {
updateNonSequentialWriteInMemory(-dumpedDataSize);
}
} catch (IOException e) {
- LOG.error("Dump data failed: " + writeCtx + " with error: " + e
- + " OpenFileCtx state: " + activeState);
+ LOG.error("Dump data failed: {} OpenFileCtx state: {}",
+ writeCtx, activeState, e);
// Disable dump
enabledDump = false;
return;
}
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("After dump, nonSequentialWriteInMemory == "
- + nonSequentialWriteInMemory.get());
- }
+ LOG.debug("After dump, nonSequentialWriteInMemory == {}",
+ nonSequentialWriteInMemory.get());
}
@Override
@@ -393,26 +384,22 @@ class OpenFileCtx {
OpenFileCtx.this.notifyAll();
try {
OpenFileCtx.this.wait();
- if (LOG.isDebugEnabled()) {
- LOG.debug("Dumper woke up");
- }
+ LOG.debug("Dumper woke up");
} catch (InterruptedException e) {
- LOG.info("Dumper is interrupted, dumpFilePath= "
- + OpenFileCtx.this.dumpFilePath);
+ LOG.info("Dumper is interrupted, dumpFilePath = {}",
+ OpenFileCtx.this.dumpFilePath);
}
}
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Dumper checking OpenFileCtx activeState: " + activeState
- + " enabledDump: " + enabledDump);
- }
+ LOG.debug("Dumper checking OpenFileCtx activeState: {} " +
+ "enabledDump: {}", activeState, enabledDump);
} catch (Throwable t) {
// unblock threads with new request
synchronized (OpenFileCtx.this) {
OpenFileCtx.this.notifyAll();
}
- LOG.info("Dumper get Throwable: " + t + ". dumpFilePath: "
- + OpenFileCtx.this.dumpFilePath, t);
+ LOG.info("Dumper got Throwable. dumpFilePath: {}",
+ OpenFileCtx.this.dumpFilePath, t);
activeState = false;
}
}
@@ -428,8 +415,8 @@ class OpenFileCtx {
return null;
} else {
if (xid != writeCtx.getXid()) {
- LOG.warn("Got a repeated request, same range, with a different xid: "
- + xid + " xid in old request: " + writeCtx.getXid());
+ LOG.warn("Got a repeated request, same range, with a different xid: " +
+ "{} xid in old request: {}", xid, writeCtx.getXid());
//TODO: better handling.
}
return writeCtx;
@@ -441,8 +428,8 @@ class OpenFileCtx {
IdMappingServiceProvider iug) {
if (!activeState) {
- LOG.info("OpenFileCtx is inactive, fileId: "
- + request.getHandle().dumpFileHandle());
+ LOG.info("OpenFileCtx is inactive, fileId: {}",
+ request.getHandle().dumpFileHandle());
WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
@@ -460,15 +447,11 @@ class OpenFileCtx {
xid);
if (existantWriteCtx != null) {
if (!existantWriteCtx.getReplied()) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Repeated write request which hasn't been served: xid="
- + xid + ", drop it.");
- }
+ LOG.debug("Repeated write request which hasn't been served: " +
+ "xid={}, drop it.", xid);
} else {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Repeated write request which is already served: xid="
- + xid + ", resend response.");
- }
+ LOG.debug("Repeated write request which is already served: xid={}" +
+ ", resend response.", xid);
WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, request.getCount(), request.getStableHow(),
@@ -489,13 +472,11 @@ class OpenFileCtx {
long offset = request.getOffset();
int count = request.getCount();
long smallerCount = offset + count - cachedOffset;
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("Got overwrite with appended data [%d-%d),"
- + " current offset %d," + " drop the overlapped section [%d-%d)"
- + " and append new data [%d-%d).", offset, (offset + count),
- cachedOffset, offset, cachedOffset, cachedOffset, (offset
- + count)));
- }
+ LOG.debug("Got overwrite with appended data [{}-{}),"
+ + " current offset {}," + " drop the overlapped section [{}-{})"
+ + " and append new data [{}-{}).", offset, (offset + count),
+ cachedOffset, offset, cachedOffset, cachedOffset,
+ (offset + count));
ByteBuffer data = request.getData();
Preconditions.checkState(data.position() == 0,
@@ -538,10 +519,8 @@ class OpenFileCtx {
long cachedOffset = nextOffset.get();
int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
- if (LOG.isDebugEnabled()) {
- LOG.debug("requested offset=" + offset + " and current offset="
- + cachedOffset);
- }
+ LOG.debug("requested offset={} and current offset={}",
+ offset, cachedOffset);
// Ignore write request with range below the current offset
if (offset + count <= cachedOffset) {
@@ -576,8 +555,8 @@ class OpenFileCtx {
// Fail non-append call
if (offset < cachedOffset) {
- LOG.warn("(offset,count,nextOffset): " + "(" + offset + "," + count + ","
- + nextOffset + ")");
+ LOG.warn("(offset,count,nextOffset): ({},{},{})",
+ offset, count, nextOffset);
return null;
} else {
DataState dataState = offset == cachedOffset ? WriteCtx.DataState.NO_DUMP
@@ -586,10 +565,8 @@ class OpenFileCtx {
request.getOffset(), request.getCount(), originalCount,
request.getStableHow(), request.getData(), channel, xid, false,
dataState);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Add new write to the list with nextOffset " + cachedOffset
- + " and requested offset=" + offset);
- }
+ LOG.debug("Add new write to the list with nextOffset {}" +
+ " and requested offset={}", cachedOffset, offset);
if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
// update the memory size
updateNonSequentialWriteInMemory(count);
@@ -598,14 +575,12 @@ class OpenFileCtx {
WriteCtx oldWriteCtx = checkRepeatedWriteRequest(request, channel, xid);
if (oldWriteCtx == null) {
pendingWrites.put(new OffsetRange(offset, offset + count), writeCtx);
- if (LOG.isDebugEnabled()) {
- LOG.debug("New write buffered with xid " + xid + " nextOffset "
- + cachedOffset + " req offset=" + offset + " mapsize="
- + pendingWrites.size());
- }
+ LOG.debug("New write buffered with xid {} nextOffset {}" +
+ "req offset={} mapsize={}",
+ xid, cachedOffset, offset, pendingWrites.size());
} else {
- LOG.warn("Got a repeated request, same range, with xid: " + xid
- + " nextOffset " + +cachedOffset + " req offset=" + offset);
+ LOG.warn("Got a repeated request, same range, with xid: " +
+ "{} nextOffset {} req offset={}", xid, cachedOffset, offset);
}
return writeCtx;
}
@@ -625,9 +600,7 @@ class OpenFileCtx {
response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0,
WriteStableHow.UNSTABLE, Nfs3Constant.WRITE_COMMIT_VERF);
} else {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Process perfectOverWrite");
- }
+ LOG.debug("Process perfectOverWrite");
// TODO: let executor handle perfect overwrite
response = processPerfectOverWrite(dfsClient, offset, count, stableHow,
request.getData().array(),
@@ -652,17 +625,13 @@ class OpenFileCtx {
if (writeCtx.getOffset() == nextOffset.get()) {
if (!asyncStatus) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Trigger the write back task. Current nextOffset: "
- + nextOffset.get());
- }
+ LOG.debug("Trigger the write back task. Current nextOffset: {}",
+ nextOffset.get());
asyncStatus = true;
asyncWriteBackStartOffset = writeCtx.getOffset();
asyncDataService.execute(new AsyncDataService.WriteBackTask(this));
} else {
- if (LOG.isDebugEnabled()) {
- LOG.debug("The write back thread is working.");
- }
+ LOG.debug("The write back thread is working.");
}
return true;
} else {
@@ -694,15 +663,13 @@ class OpenFileCtx {
// responses of the previous batch. So here send response immediately
// for unstable non-sequential write
if (stableHow != WriteStableHow.UNSTABLE) {
- LOG.info("Have to change stable write to unstable write: "
- + request.getStableHow());
+ LOG.info("Have to change stable write to unstable write: {}",
+ request.getStableHow());
stableHow = WriteStableHow.UNSTABLE;
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("UNSTABLE write request, send response for offset: "
- + writeCtx.getOffset());
- }
+ LOG.debug("UNSTABLE write request, send response for offset: {}",
+ writeCtx.getOffset());
WccData fileWcc = new WccData(preOpAttr, latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
@@ -738,8 +705,8 @@ class OpenFileCtx {
LOG.info("The FSDataOutputStream has been closed. "
+ "Continue processing the perfect overwrite.");
} catch (IOException e) {
- LOG.info("hsync failed when processing possible perfect overwrite, path="
- + path + " error: " + e);
+ LOG.info("hsync failed when processing possible perfect overwrite, " +
+ "path={} error: {}", path, e.toString());
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
Nfs3Constant.WRITE_COMMIT_VERF);
}
@@ -748,18 +715,18 @@ class OpenFileCtx {
fis = dfsClient.createWrappedInputStream(dfsClient.open(path));
readCount = fis.read(offset, readbuffer, 0, count);
if (readCount < count) {
- LOG.error("Can't read back " + count + " bytes, partial read size: "
- + readCount);
+ LOG.error("Can't read back {} bytes, partial read size: {}",
+ count, readCount);
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
Nfs3Constant.WRITE_COMMIT_VERF);
}
} catch (IOException e) {
- LOG.info("Read failed when processing possible perfect overwrite, path="
- + path, e);
+ LOG.info("Read failed when processing possible perfect overwrite, " +
+ "path={}", path, e);
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
Nfs3Constant.WRITE_COMMIT_VERF);
} finally {
- IOUtils.cleanup(LOG, fis);
+ IOUtils.cleanupWithLogger(LOG, fis);
}
// Compare with the request
@@ -776,8 +743,8 @@ class OpenFileCtx {
dfsClient.setTimes(path, Time.monotonicNow(), -1);
postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
} catch (IOException e) {
- LOG.info("Got error when processing perfect overwrite, path=" + path
- + " error: " + e);
+ LOG.info("Got error when processing perfect overwrite, path={} " +
+ "error: {}", path, e.toString());
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
Nfs3Constant.WRITE_COMMIT_VERF);
}
@@ -810,9 +777,7 @@ class OpenFileCtx {
COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
preOpAttr, fromRead);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Got commit status: " + ret.name());
- }
+ LOG.debug("Got commit status: {}", ret.name());
// Do the sync outside the lock
if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
|| ret == COMMIT_STATUS.COMMIT_FINISHED) {
@@ -828,7 +793,7 @@ class OpenFileCtx {
ret = COMMIT_STATUS.COMMIT_ERROR;
}
} catch (IOException e) {
- LOG.error("Got stream error during data sync: " + e);
+ LOG.error("Got stream error during data sync", e);
// Do nothing. Stream will be closed eventually by StreamMonitor.
// status = Nfs3Status.NFS3ERR_IO;
ret = COMMIT_STATUS.COMMIT_ERROR;
@@ -867,9 +832,7 @@ class OpenFileCtx {
CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid, preOpAttr);
pendingCommits.put(commitOffset, commitCtx);
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("return COMMIT_SPECIAL_WAIT");
- }
+ LOG.debug("return COMMIT_SPECIAL_WAIT");
return COMMIT_STATUS.COMMIT_SPECIAL_WAIT;
}
@@ -886,10 +849,8 @@ class OpenFileCtx {
}
long flushed = getFlushedOffset();
- if (LOG.isDebugEnabled()) {
- LOG.debug("getFlushedOffset=" + flushed + " commitOffset=" + commitOffset
- + "nextOffset=" + nextOffset.get());
- }
+ LOG.debug("getFlushedOffset={} commitOffset={} nextOffset={}",
+ flushed, commitOffset, nextOffset.get());
if (pendingWrites.isEmpty()) {
if (aixCompatMode) {
@@ -898,10 +859,8 @@ class OpenFileCtx {
return COMMIT_STATUS.COMMIT_FINISHED;
} else {
if (flushed < nextOffset.get()) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("get commit while still writing to the requested offset,"
- + " with empty queue");
- }
+ LOG.debug("get commit while still writing to the requested offset,"
+ + " with empty queue");
return handleSpecialWait(fromRead, nextOffset.get(), channel, xid,
preOpAttr);
} else {
@@ -920,18 +879,14 @@ class OpenFileCtx {
if (co <= flushed) {
return COMMIT_STATUS.COMMIT_DO_SYNC;
} else if (co < nextOffset.get()) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("get commit while still writing to the requested offset");
- }
+ LOG.debug("get commit while still writing to the requested offset");
return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
} else {
// co >= nextOffset
if (checkSequential(co, nextOffset.get())) {
return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
} else {
- if (LOG.isDebugEnabled()) {
- LOG.debug("return COMMIT_SPECIAL_SUCCESS");
- }
+ LOG.debug("return COMMIT_SPECIAL_SUCCESS");
return COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS;
}
}
@@ -993,8 +948,8 @@ class OpenFileCtx {
// Check the stream timeout
if (checkStreamTimeout(streamTimeout)) {
if (LOG.isDebugEnabled()) {
- LOG.debug("stream can be closed for fileId: "
- + handle.dumpFileHandle());
+ LOG.debug("stream can be closed for fileId: {}",
+ handle.dumpFileHandle());
}
flag = true;
}
@@ -1009,10 +964,8 @@ class OpenFileCtx {
*/
private synchronized WriteCtx offerNextToWrite() {
if (pendingWrites.isEmpty()) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("The async write task has no pending writes, fileId: "
- + latestAttr.getFileId());
- }
+ LOG.debug("The async write task has no pending writes, fileId: {}",
+ latestAttr.getFileId());
// process pending commit again to handle this race: a commit is added
// to pendingCommits map just after the last doSingleWrite returns.
// There is no pending write and the commit should be handled by the
@@ -1029,49 +982,35 @@ class OpenFileCtx {
OffsetRange range = lastEntry.getKey();
WriteCtx toWrite = lastEntry.getValue();
- if (LOG.isTraceEnabled()) {
- LOG.trace("range.getMin()=" + range.getMin() + " nextOffset="
- + nextOffset);
- }
+ LOG.trace("range.getMin()={} nextOffset={}",
+ range.getMin(), nextOffset);
long offset = nextOffset.get();
if (range.getMin() > offset) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("The next sequential write has not arrived yet");
- }
+ LOG.debug("The next sequential write has not arrived yet");
processCommits(nextOffset.get()); // handle race
this.asyncStatus = false;
} else if (range.getMax() <= offset) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Remove write " + range.toString()
- + " which is already written from the list");
- }
+ LOG.debug("Remove write {} which is already written from the list",
+ range);
// remove the WriteCtx from cache
pendingWrites.remove(range);
} else if (range.getMin() < offset && range.getMax() > offset) {
- LOG.warn("Got an overlapping write " + range.toString()
- + ", nextOffset=" + offset
- + ". Remove and trim it");
+ LOG.warn("Got an overlapping write {}, nextOffset={}. " +
+ "Remove and trim it", range, offset);
pendingWrites.remove(range);
trimWriteRequest(toWrite, offset);
// update nextOffset
nextOffset.addAndGet(toWrite.getCount());
- if (LOG.isDebugEnabled()) {
- LOG.debug("Change nextOffset (after trim) to " + nextOffset.get());
- }
+ LOG.debug("Change nextOffset (after trim) to {}", nextOffset.get());
return toWrite;
} else {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Remove write " + range.toString()
- + " from the list");
- }
+ LOG.debug("Remove write {} from the list", range);
// after writing, remove the WriteCtx from cache
pendingWrites.remove(range);
// update nextOffset
nextOffset.addAndGet(toWrite.getCount());
- if (LOG.isDebugEnabled()) {
- LOG.debug("Change nextOffset to " + nextOffset.get());
- }
+ LOG.debug("Change nextOffset to {}", nextOffset.get());
return toWrite;
}
return null;
@@ -1095,9 +1034,9 @@ class OpenFileCtx {
}
}
- if (!activeState && LOG.isDebugEnabled()) {
- LOG.debug("The openFileCtx is not active anymore, fileId: "
- + latestAttr.getFileId());
+ if (!activeState) {
+ LOG.debug("The openFileCtx is not active anymore, fileId: {}",
+ latestAttr.getFileId());
}
} finally {
// Make sure to reset asyncStatus to false unless a race happens
@@ -1105,11 +1044,12 @@ class OpenFileCtx {
if (startOffset == asyncWriteBackStartOffset) {
asyncStatus = false;
} else {
- LOG.info("Another async task is already started before this one"
- + " is finalized. fileId: " + latestAttr.getFileId()
- + " asyncStatus: " + asyncStatus + " original startOffset: "
- + startOffset + " new startOffset: " + asyncWriteBackStartOffset
- + ". Won't change asyncStatus here.");
+ LOG.info("Another async task is already started before this one " +
+ "is finalized. fileId: {} asyncStatus: {} " +
+ "original startOffset: {} " +
+ "new startOffset: {}. Won't change asyncStatus here.",
+ latestAttr.getFileId(), asyncStatus,
+ startOffset, asyncWriteBackStartOffset);
}
}
}
@@ -1132,8 +1072,8 @@ class OpenFileCtx {
status = Nfs3Status.NFS3_OK;
} catch (ClosedChannelException cce) {
if (!pendingWrites.isEmpty()) {
- LOG.error("Can't sync for fileId: " + latestAttr.getFileId()
- + ". Channel closed with writes pending.", cce);
+ LOG.error("Can't sync for fileId: {}. " +
+ "Channel closed with writes pending", latestAttr.getFileId(), cce);
}
status = Nfs3Status.NFS3ERR_IO;
} catch (IOException e) {
@@ -1152,8 +1092,8 @@ class OpenFileCtx {
}
if (latestAttr.getSize() != offset) {
- LOG.error("After sync, the expect file size: " + offset
- + ", however actual file size is: " + latestAttr.getSize());
+ LOG.error("After sync, the expect file size: {}, " +
+ "however actual file size is: {}", offset, latestAttr.getSize());
status = Nfs3Status.NFS3ERR_IO;
}
WccData wccData = new WccData(Nfs3Utils.getWccAttr(latestAttr), latestAttr);
@@ -1170,11 +1110,11 @@ class OpenFileCtx {
Nfs3Utils.writeChannelCommit(commit.getChannel(), response
.serialize(new XDR(), commit.getXid(),
new VerifierNone()), commit.getXid());
-
+
if (LOG.isDebugEnabled()) {
- LOG.debug("FileId: " + latestAttr.getFileId() + " Service time: "
- + Nfs3Utils.getElapsedTime(commit.startTime)
- + "ns. Sent response for commit: " + commit);
+ LOG.debug("FileId: {} Service time: {}ns. " +
+ "Sent response for commit: {}", latestAttr.getFileId(),
+ Nfs3Utils.getElapsedTime(commit.startTime), commit);
}
entry = pendingCommits.firstEntry();
}
@@ -1190,8 +1130,8 @@ class OpenFileCtx {
FileHandle handle = writeCtx.getHandle();
if (LOG.isDebugEnabled()) {
- LOG.debug("do write, fileHandle " + handle.dumpFileHandle() + " offset: "
- + offset + " length: " + count + " stableHow: " + stableHow.name());
+ LOG.debug("do write, fileHandle {} offset: {} length: {} stableHow: {}",
+ handle.dumpFileHandle(), offset, count, stableHow.name());
}
try {
@@ -1215,10 +1155,10 @@ class OpenFileCtx {
writeCtx.setDataState(WriteCtx.DataState.NO_DUMP);
updateNonSequentialWriteInMemory(-count);
if (LOG.isDebugEnabled()) {
- LOG.debug("After writing " + handle.dumpFileHandle()
- + " at offset " + offset
- + ", updated the memory count, new value: "
- + nonSequentialWriteInMemory.get());
+ LOG.debug("After writing {} at offset {}, " +
+ "updated the memory count, new value: {}",
+ handle.dumpFileHandle(), offset,
+ nonSequentialWriteInMemory.get());
}
}
}
@@ -1226,7 +1166,7 @@ class OpenFileCtx {
if (!writeCtx.getReplied()) {
if (stableHow != WriteStableHow.UNSTABLE) {
- LOG.info("Do sync for stable write: " + writeCtx);
+ LOG.info("Do sync for stable write: {}", writeCtx);
try {
if (stableHow == WriteStableHow.DATA_SYNC) {
fos.hsync();
@@ -1237,7 +1177,7 @@ class OpenFileCtx {
fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
}
} catch (IOException e) {
- LOG.error("hsync failed with writeCtx: " + writeCtx, e);
+ LOG.error("hsync failed with writeCtx: {}", writeCtx, e);
throw e;
}
}
@@ -1245,8 +1185,8 @@ class OpenFileCtx {
WccAttr preOpAttr = latestAttr.getWccAttr();
WccData fileWcc = new WccData(preOpAttr, latestAttr);
if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {
- LOG.warn("Return original count: " + writeCtx.getOriginalCount()
- + " instead of real data count: " + count);
+ LOG.warn("Return original count: {} instead of real data count: {}",
+ writeCtx.getOriginalCount(), count);
count = writeCtx.getOriginalCount();
}
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
@@ -1260,8 +1200,8 @@ class OpenFileCtx {
processCommits(writeCtx.getOffset() + writeCtx.getCount());
} catch (IOException e) {
- LOG.error("Error writing to fileHandle " + handle.dumpFileHandle()
- + " at offset " + offset + " and length " + count, e);
+ LOG.error("Error writing to fileHandle {} at offset {} and length {}",
+ handle.dumpFileHandle(), offset, count, e);
if (!writeCtx.getReplied()) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
Nfs3Utils.writeChannel(channel, response.serialize(
@@ -1269,8 +1209,8 @@ class OpenFileCtx {
// Keep stream open. Either client retries or SteamMonitor closes it.
}
- LOG.info("Clean up open file context for fileId: "
- + latestAttr.getFileId());
+ LOG.info("Clean up open file context for fileId: {}",
+ latestAttr.getFileId());
cleanup();
}
}
@@ -1297,17 +1237,16 @@ class OpenFileCtx {
fos.close();
}
} catch (IOException e) {
- LOG.info("Can't close stream for fileId: " + latestAttr.getFileId()
- + ", error: " + e);
+ LOG.info("Can't close stream for fileId: {}, error: {}",
+ latestAttr.getFileId(), e.toString());
}
// Reply error for pending writes
- LOG.info("There are " + pendingWrites.size() + " pending writes.");
+ LOG.info("There are {} pending writes.", pendingWrites.size());
WccAttr preOpAttr = latestAttr.getWccAttr();
while (!pendingWrites.isEmpty()) {
OffsetRange key = pendingWrites.firstKey();
- LOG.info("Fail pending write: " + key.toString()
- + ", nextOffset=" + nextOffset.get());
+ LOG.info("Fail pending write: {}, nextOffset={}", key, nextOffset.get());
WriteCtx writeCtx = pendingWrites.remove(key);
if (!writeCtx.getReplied()) {
@@ -1325,11 +1264,12 @@ class OpenFileCtx {
try {
dumpOut.close();
} catch (IOException e) {
- LOG.error("Failed to close outputstream of dump file" + dumpFilePath, e);
+ LOG.error("Failed to close outputstream of dump file {}",
+ dumpFilePath, e);
}
File dumpFile = new File(dumpFilePath);
if (dumpFile.exists() && !dumpFile.delete()) {
- LOG.error("Failed to delete dumpfile: " + dumpFile);
+ LOG.error("Failed to delete dumpfile: {}", dumpFile);
}
}
if (raf != null) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org