You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/09/30 17:41:52 UTC
[52/58] [abbrv] hadoop git commit: Revert "HDFS-9170. Move libhdfs /
fuse-dfs / libwebhdfs to hdfs-client. Contributed by Haohui Mai."
Revert "HDFS-9170. Move libhdfs / fuse-dfs / libwebhdfs to hdfs-client. Contributed by Haohui Mai."
This reverts commit d5a9a3daa0224249221ffa7b8bd5751ab2feca56.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ee0539e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ee0539e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ee0539e
Branch: refs/heads/HDFS-7285
Commit: 6ee0539ede78b640f01c5eac18ded161182a7835
Parents: d5a9a3d
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Sep 29 17:51:15 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Sep 29 17:51:15 2015 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdfs/BlockReaderFactory.java | 114 +++++++++++++------
.../apache/hadoop/hdfs/BlockReaderLocal.java | 77 +++++++++----
.../hadoop/hdfs/BlockReaderLocalLegacy.java | 20 +++-
.../java/org/apache/hadoop/hdfs/DFSClient.java | 36 ++++--
.../org/apache/hadoop/hdfs/DFSInputStream.java | 85 +++++++++-----
.../org/apache/hadoop/hdfs/DFSOutputStream.java | 36 +++---
.../org/apache/hadoop/hdfs/DFSUtilClient.java | 10 +-
.../org/apache/hadoop/hdfs/DataStreamer.java | 47 +++++---
.../apache/hadoop/hdfs/RemoteBlockReader.java | 4 +-
.../apache/hadoop/hdfs/RemoteBlockReader2.java | 25 ++--
.../hadoop/hdfs/client/impl/LeaseRenewer.java | 18 ++-
.../protocol/datatransfer/PacketReceiver.java | 8 +-
.../hdfs/protocol/datatransfer/Sender.java | 6 +-
.../datatransfer/sasl/DataTransferSaslUtil.java | 8 +-
.../ClientDatanodeProtocolTranslatorPB.java | 10 +-
.../hdfs/shortcircuit/DfsClientShmManager.java | 45 +++++---
.../hdfs/shortcircuit/ShortCircuitCache.java | 101 ++++++++++++----
.../hdfs/shortcircuit/ShortCircuitReplica.java | 33 ++++--
.../hdfs/shortcircuit/ShortCircuitShm.java | 14 ++-
.../hadoop/hdfs/util/ByteArrayManager.java | 63 ++++++++--
.../org/apache/hadoop/hdfs/web/TokenAspect.java | 8 +-
.../hadoop/hdfs/web/URLConnectionFactory.java | 4 +-
.../hadoop/hdfs/web/WebHdfsFileSystem.java | 28 +++--
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 -
24 files changed, 573 insertions(+), 230 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index f249692..4f37090 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -349,13 +349,17 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
if (clientContext.getUseLegacyBlockReaderLocal()) {
reader = getLegacyBlockReaderLocal();
if (reader != null) {
- LOG.trace("{}: returning new legacy block reader local.", this);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": returning new legacy block reader local.");
+ }
return reader;
}
} else {
reader = getBlockReaderLocal();
if (reader != null) {
- LOG.trace("{}: returning new block reader local.", this);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": returning new block reader local.");
+ }
return reader;
}
}
@@ -363,8 +367,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
if (scConf.isDomainSocketDataTraffic()) {
reader = getRemoteBlockReaderFromDomain();
if (reader != null) {
- LOG.trace("{}: returning new remote block reader using UNIX domain "
- + "socket on {}", this, pathInfo.getPath());
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": returning new remote block reader using " +
+ "UNIX domain socket on " + pathInfo.getPath());
+ }
return reader;
}
}
@@ -399,8 +405,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
setVisibleLength(visibleLength).
build();
if (accessor == null) {
- LOG.trace("{}: No ReplicaAccessor created by {}",
- this, cls.getName());
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": No ReplicaAccessor created by " +
+ cls.getName());
+ }
} else {
return new ExternalBlockReader(accessor, visibleLength, startOffset);
}
@@ -419,10 +427,14 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
* first introduced in HDFS-2246.
*/
private BlockReader getLegacyBlockReaderLocal() throws IOException {
- LOG.trace("{}: trying to construct BlockReaderLocalLegacy", this);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": trying to construct BlockReaderLocalLegacy");
+ }
if (!DFSUtilClient.isLocalAddress(inetSocketAddress)) {
- LOG.trace("{}: can't construct BlockReaderLocalLegacy because the address"
- + "{} is not local", this, inetSocketAddress);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": can't construct BlockReaderLocalLegacy because " +
+ "the address " + inetSocketAddress + " is not local");
+ }
return null;
}
if (clientContext.getDisableLegacyBlockReaderLocal()) {
@@ -458,8 +470,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
}
private BlockReader getBlockReaderLocal() throws InvalidToken {
- LOG.trace("{}: trying to construct a BlockReaderLocal for short-circuit "
- + " reads.", this);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": trying to construct a BlockReaderLocal " +
+ "for short-circuit reads.");
+ }
if (pathInfo == null) {
pathInfo = clientContext.getDomainSocketFactory()
.getPathInfo(inetSocketAddress, conf.getShortCircuitConf());
@@ -474,8 +488,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
ShortCircuitReplicaInfo info = cache.fetchOrCreate(key, this);
InvalidToken exc = info.getInvalidTokenException();
if (exc != null) {
- LOG.trace("{}: got InvalidToken exception while trying to construct "
- + "BlockReaderLocal via {}", this, pathInfo.getPath());
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": got InvalidToken exception while trying to " +
+ "construct BlockReaderLocal via " + pathInfo.getPath());
+ }
throw exc;
}
if (info.getReplica() == null) {
@@ -511,7 +527,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
createShortCircuitReplicaInfoCallback.createShortCircuitReplicaInfo();
if (info != null) return info;
}
- LOG.trace("{}: trying to create ShortCircuitReplicaInfo.", this);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": trying to create ShortCircuitReplicaInfo.");
+ }
BlockReaderPeer curPeer;
while (true) {
curPeer = nextDomainPeer();
@@ -526,8 +544,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId()),
clientName);
if (usedPeer.booleanValue()) {
- LOG.trace("{}: allocShmSlot used up our previous socket {}. "
- + "Allocating a new one...", this, peer.getDomainSocket());
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": allocShmSlot used up our previous socket " +
+ peer.getDomainSocket() + ". Allocating a new one...");
+ }
curPeer = nextDomainPeer();
if (curPeer == null) break;
peer = (DomainPeer)curPeer.peer;
@@ -542,7 +562,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
if (curPeer.fromCache) {
// Handle an I/O error we got when using a cached socket.
// These are considered less serious, because the socket may be stale.
- LOG.debug("{}: closing stale domain peer {}", this, peer, e);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(this + ": closing stale domain peer " + peer, e);
+ }
IOUtilsClient.cleanup(LOG, peer);
} else {
// Handle an I/O error we got when using a newly created socket.
@@ -595,7 +617,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
ExtendedBlockId key =
new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId());
if (buf[0] == USE_RECEIPT_VERIFICATION.getNumber()) {
- LOG.trace("Sending receipt verification byte for slot {}", slot);
+ LOG.trace("Sending receipt verification byte for slot " + slot);
sock.getOutputStream().write(0);
}
replica = new ShortCircuitReplica(key, fis[0], fis[1], cache,
@@ -628,7 +650,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
String msg = "access control error while " +
"attempting to set up short-circuit access to " +
fileName + resp.getMessage();
- LOG.debug("{}:{}", this, msg);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(this + ":" + msg);
+ }
return new ShortCircuitReplicaInfo(new InvalidToken(msg));
default:
LOG.warn(this + ": unknown response code " + resp.getStatus() +
@@ -660,8 +684,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
" is not usable.", this, pathInfo);
return null;
}
- LOG.trace("{}: trying to create a remote block reader from the UNIX domain "
- + "socket at {}", this, pathInfo.getPath());
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": trying to create a remote block reader from the " +
+ "UNIX domain socket at " + pathInfo.getPath());
+ }
while (true) {
BlockReaderPeer curPeer = nextDomainPeer();
@@ -675,15 +701,19 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
} catch (IOException ioe) {
IOUtilsClient.cleanup(LOG, peer);
if (isSecurityException(ioe)) {
- LOG.trace("{}: got security exception while constructing a remote "
- + " block reader from the unix domain socket at {}",
- this, pathInfo.getPath(), ioe);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": got security exception while constructing " +
+ "a remote block reader from the unix domain socket at " +
+ pathInfo.getPath(), ioe);
+ }
throw ioe;
}
if (curPeer.fromCache) {
// Handle an I/O error we got when using a cached peer. These are
// considered less serious, because the underlying socket may be stale.
- LOG.debug("Closed potentially stale domain peer {}", peer, ioe);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Closed potentially stale domain peer " + peer, ioe);
+ }
} else {
// Handle an I/O error we got when using a newly created domain peer.
// We temporarily disable the domain socket path for a few minutes in
@@ -717,8 +747,10 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
* If there was another problem.
*/
private BlockReader getRemoteBlockReaderFromTcp() throws IOException {
- LOG.trace("{}: trying to create a remote block reader from a TCP socket",
- this);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": trying to create a remote block reader from a " +
+ "TCP socket");
+ }
BlockReader blockReader = null;
while (true) {
BlockReaderPeer curPeer = null;
@@ -731,15 +763,19 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
return blockReader;
} catch (IOException ioe) {
if (isSecurityException(ioe)) {
- LOG.trace("{}: got security exception while constructing a remote "
- + "block reader from {}", this, peer, ioe);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": got security exception while constructing " +
+ "a remote block reader from " + peer, ioe);
+ }
throw ioe;
}
if ((curPeer != null) && curPeer.fromCache) {
// Handle an I/O error we got when using a cached peer. These are
// considered less serious, because the underlying socket may be
// stale.
- LOG.debug("Closed potentially stale remote peer {}", peer, ioe);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Closed potentially stale remote peer " + peer, ioe);
+ }
} else {
// Handle an I/O error we got when using a newly created peer.
LOG.warn("I/O error constructing remote block reader.", ioe);
@@ -772,7 +808,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
if (remainingCacheTries > 0) {
Peer peer = clientContext.getPeerCache().get(datanode, true);
if (peer != null) {
- LOG.trace("nextDomainPeer: reusing existing peer {}", peer);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("nextDomainPeer: reusing existing peer " + peer);
+ }
return new BlockReaderPeer(peer, true);
}
}
@@ -794,18 +832,24 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
if (remainingCacheTries > 0) {
Peer peer = clientContext.getPeerCache().get(datanode, false);
if (peer != null) {
- LOG.trace("nextTcpPeer: reusing existing peer {}", peer);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("nextTcpPeer: reusing existing peer " + peer);
+ }
return new BlockReaderPeer(peer, true);
}
}
try {
Peer peer = remotePeerFactory.newConnectedPeer(inetSocketAddress, token,
datanode);
- LOG.trace("nextTcpPeer: created newConnectedPeer {}", peer);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("nextTcpPeer: created newConnectedPeer " + peer);
+ }
return new BlockReaderPeer(peer, false);
} catch (IOException e) {
- LOG.trace("nextTcpPeer: failed to create newConnectedPeer connected to"
- + "{}", datanode);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("nextTcpPeer: failed to create newConnectedPeer " +
+ "connected to " + datanode);
+ }
throw e;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
index 10dc35c..62e7af6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
@@ -412,10 +412,17 @@ class BlockReaderLocal implements BlockReader {
public synchronized int read(ByteBuffer buf) throws IOException {
boolean canSkipChecksum = createNoChecksumContext();
try {
- String traceFormatStr = "read(buf.remaining={}, block={}, filename={}, "
- + "canSkipChecksum={})";
- LOG.trace(traceFormatStr + ": starting",
- buf.remaining(), block, filename, canSkipChecksum);
+ String traceString = null;
+ if (LOG.isTraceEnabled()) {
+ traceString = new StringBuilder().
+ append("read(").
+ append("buf.remaining=").append(buf.remaining()).
+ append(", block=").append(block).
+ append(", filename=").append(filename).
+ append(", canSkipChecksum=").append(canSkipChecksum).
+ append(")").toString();
+ LOG.info(traceString + ": starting");
+ }
int nRead;
try {
if (canSkipChecksum && zeroReadaheadRequested) {
@@ -424,12 +431,14 @@ class BlockReaderLocal implements BlockReader {
nRead = readWithBounceBuffer(buf, canSkipChecksum);
}
} catch (IOException e) {
- LOG.trace(traceFormatStr + ": I/O error",
- buf.remaining(), block, filename, canSkipChecksum, e);
+ if (LOG.isTraceEnabled()) {
+ LOG.info(traceString + ": I/O error", e);
+ }
throw e;
}
- LOG.trace(traceFormatStr + ": returning {}",
- buf.remaining(), block, filename, canSkipChecksum, nRead);
+ if (LOG.isTraceEnabled()) {
+ LOG.info(traceString + ": returning " + nRead);
+ }
return nRead;
} finally {
if (canSkipChecksum) releaseNoChecksumContext();
@@ -481,8 +490,10 @@ class BlockReaderLocal implements BlockReader {
}
dataBuf.limit(dataBuf.position());
dataBuf.position(Math.min(dataBuf.position(), slop));
- LOG.trace("loaded {} bytes into bounce buffer from offset {} of {}",
- dataBuf.remaining(), oldDataPos, block);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("loaded " + dataBuf.remaining() + " bytes into bounce " +
+ "buffer from offset " + oldDataPos + " of " + block);
+ }
return dataBuf.limit() != maxReadaheadLength;
}
@@ -554,10 +565,18 @@ class BlockReaderLocal implements BlockReader {
boolean canSkipChecksum = createNoChecksumContext();
int nRead;
try {
- final String traceFormatStr = "read(arr.length={}, off={}, len={}, "
- + "filename={}, block={}, canSkipChecksum={})";
- LOG.trace(traceFormatStr + ": starting",
- arr.length, off, len, filename, block, canSkipChecksum);
+ String traceString = null;
+ if (LOG.isTraceEnabled()) {
+ traceString = new StringBuilder().
+ append("read(arr.length=").append(arr.length).
+ append(", off=").append(off).
+ append(", len=").append(len).
+ append(", filename=").append(filename).
+ append(", block=").append(block).
+ append(", canSkipChecksum=").append(canSkipChecksum).
+ append(")").toString();
+ LOG.trace(traceString + ": starting");
+ }
try {
if (canSkipChecksum && zeroReadaheadRequested) {
nRead = readWithoutBounceBuffer(arr, off, len);
@@ -565,12 +584,14 @@ class BlockReaderLocal implements BlockReader {
nRead = readWithBounceBuffer(arr, off, len, canSkipChecksum);
}
} catch (IOException e) {
- LOG.trace(traceFormatStr + ": I/O error",
- arr.length, off, len, filename, block, canSkipChecksum, e);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(traceString + ": I/O error", e);
+ }
throw e;
}
- LOG.trace(traceFormatStr + ": returning {}",
- arr.length, off, len, filename, block, canSkipChecksum, nRead);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(traceString + ": returning " + nRead);
+ }
} finally {
if (canSkipChecksum) releaseNoChecksumContext();
}
@@ -613,9 +634,11 @@ class BlockReaderLocal implements BlockReader {
dataBuf.position(dataBuf.position() + discardedFromBuf);
remaining -= discardedFromBuf;
}
- LOG.trace("skip(n={}, block={}, filename={}): discarded {} bytes from "
- + "dataBuf and advanced dataPos by {}",
- n, block, filename, discardedFromBuf, remaining);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("skip(n=" + n + ", block=" + block + ", filename=" +
+ filename + "): discarded " + discardedFromBuf + " bytes from " +
+ "dataBuf and advanced dataPos by " + remaining);
+ }
dataPos += remaining;
return n;
}
@@ -630,7 +653,9 @@ class BlockReaderLocal implements BlockReader {
public synchronized void close() throws IOException {
if (closed) return;
closed = true;
- LOG.trace("close(filename={}, block={})", filename, block);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("close(filename=" + filename + ", block=" + block + ")");
+ }
replica.unref();
freeDataBufIfExists();
freeChecksumBufIfExists();
@@ -680,9 +705,11 @@ class BlockReaderLocal implements BlockReader {
(opts.contains(ReadOption.SKIP_CHECKSUMS) == false);
if (anchor) {
if (!createNoChecksumContext()) {
- LOG.trace("can't get an mmap for {} of {} since SKIP_CHECKSUMS was not "
- + "given, we aren't skipping checksums, and the block is not "
- + "mlocked.", block, filename);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("can't get an mmap for " + block + " of " + filename +
+ " since SKIP_CHECKSUMS was not given, " +
+ "we aren't skipping checksums, and the block is not mlocked.");
+ }
return null;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
index 4a1828e..5235287 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
@@ -221,9 +221,11 @@ class BlockReaderLocalLegacy implements BlockReader {
File blkfile = new File(pathinfo.getBlockPath());
dataIn = new FileInputStream(blkfile);
- LOG.debug("New BlockReaderLocalLegacy for file {} of size {} startOffset "
- + "{} length {} short circuit checksum {}",
- blkfile, blkfile.length(), startOffset, length, !skipChecksumCheck);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("New BlockReaderLocalLegacy for file " + blkfile + " of size "
+ + blkfile.length() + " startOffset " + startOffset + " length "
+ + length + " short circuit checksum " + !skipChecksumCheck);
+ }
if (!skipChecksumCheck) {
// get the metadata file
@@ -290,7 +292,9 @@ class BlockReaderLocalLegacy implements BlockReader {
// channel for the DataNode to notify the client that the path has been
// invalidated. Therefore, our only option is to skip caching.
if (pathinfo != null && !storageType.isTransient()) {
- LOG.debug("Cached location of block {} as {}", blk, pathinfo);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Cached location of block " + blk + " as " + pathinfo);
+ }
localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
}
} catch (IOException e) {
@@ -599,7 +603,9 @@ class BlockReaderLocalLegacy implements BlockReader {
@Override
public synchronized int read(byte[] buf, int off, int len) throws IOException {
- LOG.trace("read off {} len {}", off, len);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("read off " + off + " len " + len);
+ }
if (!verifyChecksum) {
return dataIn.read(buf, off, len);
}
@@ -618,7 +624,9 @@ class BlockReaderLocalLegacy implements BlockReader {
@Override
public synchronized long skip(long n) throws IOException {
- LOG.debug("skip {}", n);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("skip " + n);
+ }
if (n <= 0) {
return 0;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8f3df81..92d117c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -419,7 +419,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
final int idx = r.nextInt(localInterfaceAddrs.length);
final SocketAddress addr = localInterfaceAddrs[idx];
- LOG.debug("Using local interface {}", addr);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Using local interface " + addr);
+ }
return addr;
}
@@ -1214,7 +1216,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
InetSocketAddress[] favoredNodes) throws IOException {
checkOpen();
final FsPermission masked = applyUMask(permission);
- LOG.debug("{}: masked={}", src, masked);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(src + ": masked=" + masked);
+ }
final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
src, masked, flag, createParent, replication, blockSize, progress,
buffersize, dfsClientConf.createChecksum(checksumOpt),
@@ -1811,8 +1815,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
smallBufferSize));
in = new DataInputStream(pair.in);
- LOG.debug("write to {}: {}, block={}",
- datanodes[j], Op.BLOCK_CHECKSUM, block);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("write to " + datanodes[j] + ": "
+ + Op.BLOCK_CHECKSUM + ", block=" + block);
+ }
// get block MD5
new Sender(out).blockChecksum(block, lb.getBlockToken());
@@ -1876,10 +1882,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
} catch (InvalidBlockTokenException ibte) {
if (i > lastRetriedIndex) {
- LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
- + "for file {} for block {} from datanode {}. Will retry the "
- + "block once.",
- src, block, datanodes[j]);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
+ + "for file " + src + " for block " + block
+ + " from datanode " + datanodes[j]
+ + ". Will retry the block once.");
+ }
lastRetriedIndex = i;
done = true; // actually it's not done; but we'll retry
i--; // repeat at i-th block
@@ -1933,7 +1941,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
try {
sock = socketFactory.createSocket();
String dnAddr = dn.getXferAddr(getConf().isConnectToDnViaHostname());
- LOG.debug("Connecting to datanode {}", dnAddr);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Connecting to datanode " + dnAddr);
+ }
NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
sock.setSoTimeout(timeout);
@@ -2553,7 +2563,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
absPermission = applyUMask(null);
}
- LOG.debug("{}: masked={}", src, absPermission);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug(src + ": masked=" + absPermission);
+ }
TraceScope scope = tracer.newScope("mkdir");
try {
return namenode.mkdirs(src, absPermission, createParent);
@@ -3049,7 +3061,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
});
HEDGED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
- LOG.debug("Using hedged reads; pool threads={}", num);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Using hedged reads; pool threads=" + num);
+ }
}
ThreadPoolExecutor getHedgedReadsThreadPool() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 81e8c27..7101753 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -315,7 +315,9 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
if (locatedBlocks == null || refresh) {
newInfo = dfsClient.getLocatedBlocks(src, 0);
}
- DFSClient.LOG.debug("newInfo = {}", newInfo);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("newInfo = " + newInfo);
+ }
if (newInfo == null) {
throw new IOException("Cannot open filename " + src);
}
@@ -381,8 +383,10 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
replicaNotFoundCount--;
}
- DFSClient.LOG.debug("Failed to getReplicaVisibleLength from datanode {}"
- + " for block {}", datanode, locatedblock.getBlock(), ioe);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("Failed to getReplicaVisibleLength from datanode "
+ + datanode + " for block " + locatedblock.getBlock(), ioe);
+ }
} finally {
if (cdp != null) {
RPC.stopProxy(cdp);
@@ -1063,7 +1067,9 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
}
final String dnAddr =
chosenNode.getXferAddr(dfsClient.getConf().isConnectToDnViaHostname());
- DFSClient.LOG.debug("Connecting to datanode {}", dnAddr);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
+ }
InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr);
return new DNAddrPair(chosenNode, targetAddr, storageType);
}
@@ -1303,8 +1309,11 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
future.get();
return;
}
- DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
- + "read", conf.getHedgedReadThresholdMillis(), chosenNode.info);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("Waited " + conf.getHedgedReadThresholdMillis()
+ + "ms to read from " + chosenNode.info
+ + "; spawning hedged read");
+ }
// Ignore this node on next go around.
ignored.add(chosenNode.info);
dfsClient.getHedgedReadMetrics().incHedgedReadOps();
@@ -1331,8 +1340,10 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
.submit(getFromDataNodeCallable);
futures.add(oneMoreRequest);
} catch (IOException ioe) {
- DFSClient.LOG.debug("Failed getting node for hedged read: {}",
- ioe.getMessage());
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("Failed getting node for hedged read: "
+ + ioe.getMessage());
+ }
}
// if not succeeded. Submit callables for each datanode in a loop, wait
// for a fixed interval and get the result from the fastest one.
@@ -1588,8 +1599,11 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
throw new IOException(errMsg);
}
} catch (IOException e) {//make following read to retry
- DFSClient.LOG.debug("Exception while seek to {} from {} of {} from "
- + "{}", targetPos, getCurrentBlock(), src, currentNode, e);
+ if(DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("Exception while seek to " + targetPos
+ + " from " + getCurrentBlock() + " of " + src + " from "
+ + currentNode, e);
+ }
}
}
}
@@ -1805,16 +1819,20 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
} else {
length63 = 1 + curEnd - curPos;
if (length63 <= 0) {
- DFSClient.LOG.debug("Unable to perform a zero-copy read from offset {}"
- + " of {}; {} bytes left in block. blockPos={}; curPos={};"
- + "curEnd={}",
- curPos, src, length63, blockPos, curPos, curEnd);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("Unable to perform a zero-copy read from offset " +
+ curPos + " of " + src + "; " + length63 + " bytes left in block. " +
+ "blockPos=" + blockPos + "; curPos=" + curPos +
+ "; curEnd=" + curEnd);
+ }
return null;
}
- DFSClient.LOG.debug("Reducing read length from {} to {} to avoid going "
- + "more than one byte past the end of the block. blockPos={}; "
- +" curPos={}; curEnd={}",
- maxLength, length63, blockPos, curPos, curEnd);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("Reducing read length from " + maxLength +
+ " to " + length63 + " to avoid going more than one byte " +
+ "past the end of the block. blockPos=" + blockPos +
+ "; curPos=" + curPos + "; curEnd=" + curEnd);
+ }
}
// Make sure that don't go beyond 31-bit offsets in the MappedByteBuffer.
int length;
@@ -1828,20 +1846,28 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
// So we can't mmap the parts of the block higher than the 2 GB offset.
// FIXME: we could work around this with multiple memory maps.
// See HDFS-5101.
- DFSClient.LOG.debug("Unable to perform a zero-copy read from offset {} "
- + " of {}; 31-bit MappedByteBuffer limit exceeded. blockPos={}, "
- + "curEnd={}", curPos, src, blockPos, curEnd);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("Unable to perform a zero-copy read from offset " +
+ curPos + " of " + src + "; 31-bit MappedByteBuffer limit " +
+ "exceeded. blockPos=" + blockPos + ", curEnd=" + curEnd);
+ }
return null;
}
length = (int)length31;
- DFSClient.LOG.debug("Reducing read length from {} to {} to avoid 31-bit "
- + "limit. blockPos={}; curPos={}; curEnd={}",
- maxLength, length, blockPos, curPos, curEnd);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("Reducing read length from " + maxLength +
+ " to " + length + " to avoid 31-bit limit. " +
+ "blockPos=" + blockPos + "; curPos=" + curPos +
+ "; curEnd=" + curEnd);
+ }
}
final ClientMmap clientMmap = blockReader.getClientMmap(opts);
if (clientMmap == null) {
- DFSClient.LOG.debug("unable to perform a zero-copy read from offset {} of"
- + " {}; BlockReader#getClientMmap returned null.", curPos, src);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("unable to perform a zero-copy read from offset " +
+ curPos + " of " + src + "; BlockReader#getClientMmap returned " +
+ "null.");
+ }
return null;
}
boolean success = false;
@@ -1855,8 +1881,11 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
synchronized (infoLock) {
readStatistics.addZeroCopyBytes(length);
}
- DFSClient.LOG.debug("readZeroCopy read {} bytes from offset {} via the "
- + "zero-copy read path. blockEnd = {}", length, curPos, blockEnd);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("readZeroCopy read " + length +
+ " bytes from offset " + curPos + " via the zero-copy read " +
+ "path. blockEnd = " + blockEnd);
+ }
success = true;
} finally {
if (!success) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 47c4b7e..7a40d73 100755
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -190,9 +190,9 @@ public class DFSOutputStream extends FSOutputSummer
this.fileEncryptionInfo = stat.getFileEncryptionInfo();
this.cachingStrategy = new AtomicReference<CachingStrategy>(
dfsClient.getDefaultWriteCachingStrategy());
- if (progress != null) {
- DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "
- +"{}", src);
+ if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug(
+ "Set non-null progress callback on DFSOutputStream " + src);
}
this.bytesPerChecksum = checksum.getBytesPerChecksum();
@@ -365,9 +365,12 @@ public class DFSOutputStream extends FSOutputSummer
final int chunkSize = csize + getChecksumSize();
chunksPerPacket = Math.max(bodySize/chunkSize, 1);
packetSize = chunkSize*chunksPerPacket;
- DFSClient.LOG.debug("computePacketChunkSize: src={}, chunkSize={}, "
- + "chunksPerPacket={}, packetSize={}",
- src, chunkSize, chunksPerPacket, packetSize);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("computePacketChunkSize: src=" + src +
+ ", chunkSize=" + chunkSize +
+ ", chunksPerPacket=" + chunksPerPacket +
+ ", packetSize=" + packetSize);
+ }
}
protected TraceScope createWriteTraceScope() {
@@ -394,10 +397,14 @@ public class DFSOutputStream extends FSOutputSummer
if (currentPacket == null) {
currentPacket = createPacket(packetSize, chunksPerPacket, getStreamer()
.getBytesCurBlock(), getStreamer().getAndIncCurrentSeqno(), false);
- DFSClient.LOG.debug("DFSClient writeChunk allocating new packet seqno={},"
- + " src={}, packetSize={}, chunksPerPacket={}, bytesCurBlock={}",
- currentPacket.getSeqno(), src, packetSize, chunksPerPacket,
- getStreamer().getBytesCurBlock());
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("DFSClient writeChunk allocating new packet seqno=" +
+ currentPacket.getSeqno() +
+ ", src=" + src +
+ ", packetSize=" + packetSize +
+ ", chunksPerPacket=" + chunksPerPacket +
+ ", bytesCurBlock=" + getStreamer().getBytesCurBlock());
+ }
}
currentPacket.writeChecksum(checksum, ckoff, cklen);
@@ -551,9 +558,12 @@ public class DFSOutputStream extends FSOutputSummer
int numKept = flushBuffer(!endBlock, true);
// bytesCurBlock potentially incremented if there was buffered data
- DFSClient.LOG.debug("DFSClient flush(): bytesCurBlock={}, "
- + "lastFlushOffset={}, createNewBlock={}",
- getStreamer().getBytesCurBlock(), lastFlushOffset, endBlock);
+ if (DFSClient.LOG.isDebugEnabled()) {
+ DFSClient.LOG.debug("DFSClient flush(): "
+ + " bytesCurBlock=" + getStreamer().getBytesCurBlock()
+ + " lastFlushOffset=" + lastFlushOffset
+ + " createNewBlock=" + endBlock);
+ }
// Flush only if we haven't already flushed till this offset.
if (lastFlushOffset != getStreamer().getBytesCurBlock()) {
assert getStreamer().getBytesCurBlock() > lastFlushOffset;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 71ce7cc..e275afb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -462,13 +462,19 @@ public class DFSUtilClient {
InetAddress addr = targetAddr.getAddress();
Boolean cached = localAddrMap.get(addr.getHostAddress());
if (cached != null) {
- LOG.trace("Address {} is {} local", targetAddr, (cached ? "" : "not"));
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Address " + targetAddr +
+ (cached ? " is local" : " is not local"));
+ }
return cached;
}
boolean local = NetUtils.isLocalAddress(addr);
- LOG.trace("Address {} is {} local", targetAddr, (local ? "" : "not"));
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Address " + targetAddr +
+ (local ? " is local" : " is not local"));
+ }
localAddrMap.put(addr.getHostAddress(), local);
return local;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 4ea1f41..d1d8d37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -132,14 +132,18 @@ class DataStreamer extends Daemon {
final int length, final DFSClient client) throws IOException {
final DfsClientConf conf = client.getConf();
final String dnAddr = first.getXferAddr(conf.isConnectToDnViaHostname());
- LOG.debug("Connecting to datanode {}", dnAddr);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Connecting to datanode " + dnAddr);
+ }
final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
final Socket sock = client.socketFactory.createSocket();
final int timeout = client.getDatanodeReadTimeout(length);
NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), conf.getSocketTimeout());
sock.setSoTimeout(timeout);
sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
- LOG.debug("Send buf size {}", sock.getSendBufferSize());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Send buf size " + sock.getSendBufferSize());
+ }
return sock;
}
@@ -480,7 +484,9 @@ class DataStreamer extends Daemon {
}
private void endBlock() {
- LOG.debug("Closing old block {}", block);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Closing old block " + block);
+ }
this.setName("DataStreamer for file " + src);
closeResponder();
closeStream();
@@ -561,11 +567,15 @@ class DataStreamer extends Daemon {
// get new block from namenode.
if (stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) {
- LOG.debug("Allocating new block");
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Allocating new block");
+ }
setPipeline(nextBlockOutputStream());
initDataStreaming();
} else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) {
- LOG.debug("Append to block {}", block);
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Append to block " + block);
+ }
setupPipelineForAppendOrRecovery();
if (streamerClosed) {
continue;
@@ -617,7 +627,10 @@ class DataStreamer extends Daemon {
}
}
- LOG.debug("DataStreamer block {} sending packet {}", block, one);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("DataStreamer block " + block +
+ " sending packet " + one);
+ }
// write out data to remote datanode
TraceScope writeScope = dfsClient.getTracer().
@@ -728,7 +741,9 @@ class DataStreamer extends Daemon {
TraceScope scope = dfsClient.getTracer().
newScope("waitForAckedSeqno");
try {
- LOG.debug("Waiting for ack for: {}", seqno);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Waiting for ack for: " + seqno);
+ }
long begin = Time.monotonicNow();
try {
synchronized (dataQueue) {
@@ -940,8 +955,8 @@ class DataStreamer extends Daemon {
LOG.warn("Slow ReadProcessor read fields took " + duration
+ "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms); ack: "
+ ack + ", targets: " + Arrays.asList(targets));
- } else {
- LOG.debug("DFSClient {}", ack);
+ } else if (LOG.isDebugEnabled()) {
+ LOG.debug("DFSClient " + ack);
}
long seqno = ack.getSeqno();
@@ -1161,7 +1176,9 @@ class DataStreamer extends Daemon {
}
private void addDatanode2ExistingPipeline() throws IOException {
- DataTransferProtocol.LOG.debug("lastAckedSeqno = {}", lastAckedSeqno);
+ if (DataTransferProtocol.LOG.isDebugEnabled()) {
+ DataTransferProtocol.LOG.debug("lastAckedSeqno = " + lastAckedSeqno);
+ }
/*
* Is data transfer necessary? We have the following cases.
*
@@ -1628,8 +1645,10 @@ class DataStreamer extends Daemon {
new HashSet<String>(Arrays.asList(favoredNodes));
for (int i = 0; i < nodes.length; i++) {
pinnings[i] = favoredSet.remove(nodes[i].getXferAddrWithHostname());
- LOG.debug("{} was chosen by name node (favored={}).",
- nodes[i].getXferAddrWithHostname(), pinnings[i]);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(nodes[i].getXferAddrWithHostname() +
+ " was chosen by name node (favored=" + pinnings[i] + ").");
+ }
}
if (shouldLog && !favoredSet.isEmpty()) {
// There is one or more favored nodes that were not allocated.
@@ -1768,7 +1787,9 @@ class DataStreamer extends Daemon {
packet.addTraceParent(Tracer.getCurrentSpanId());
dataQueue.addLast(packet);
lastQueuedSeqno = packet.getSeqno();
- LOG.debug("Queued packet {}", packet.getSeqno());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Queued packet " + packet.getSeqno());
+ }
dataQueue.notifyAll();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
index 017be9f..15a5bee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java
@@ -250,7 +250,9 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
PacketHeader header = new PacketHeader();
header.readFields(in);
- LOG.debug("DFSClient readChunk got header {}", header);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("DFSClient readChunk got header " + header);
+ }
// Sanity check the lengths
if (!header.sanityCheck(lastSeqNo)) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
index ca31e67..7a7932d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
@@ -135,9 +135,14 @@ public class RemoteBlockReader2 implements BlockReader {
@Override
public synchronized int read(byte[] buf, int off, int len)
throws IOException {
- UUID randomId = (LOG.isTraceEnabled() ? UUID.randomUUID() : null);
- LOG.trace("Starting read #{} file {} from datanode {}",
- randomId, filename, datanodeID.getHostName());
+
+ UUID randomId = null;
+ if (LOG.isTraceEnabled()) {
+ randomId = UUID.randomUUID();
+ LOG.trace(String.format("Starting read #%s file %s from datanode %s",
+ randomId.toString(), this.filename,
+ this.datanodeID.getHostName()));
+ }
if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
TraceScope scope = tracer.newScope(
@@ -149,7 +154,9 @@ public class RemoteBlockReader2 implements BlockReader {
}
}
- LOG.trace("Finishing read #{}", randomId);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(String.format("Finishing read #" + randomId));
+ }
if (curDataSlice.remaining() == 0) {
// we're at EOF now
@@ -196,7 +203,9 @@ public class RemoteBlockReader2 implements BlockReader {
curDataSlice = packetReceiver.getDataSlice();
assert curDataSlice.capacity() == curHeader.getDataLen();
- LOG.trace("DFSClient readNextPacket got header {}", curHeader);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("DFSClient readNextPacket got header " + curHeader);
+ }
// Sanity check the lengths
if (!curHeader.sanityCheck(lastSeqNo)) {
@@ -267,8 +276,10 @@ public class RemoteBlockReader2 implements BlockReader {
}
private void readTrailingEmptyPacket() throws IOException {
- LOG.trace("Reading empty packet at end of read");
-
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Reading empty packet at end of read");
+ }
+
packetReceiver.receiveNextPacket(in);
PacketHeader trailer = packetReceiver.getHeader();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
index 8457d65..c3d2cfc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
@@ -308,7 +308,10 @@ public class LeaseRenewer {
}
LeaseRenewer.this.run(id);
} catch(InterruptedException e) {
- LOG.debug("LeaseRenewer is interrupted.", e);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(LeaseRenewer.this.getClass().getSimpleName()
+ + " is interrupted.", e);
+ }
} finally {
synchronized(LeaseRenewer.this) {
Factory.INSTANCE.remove(LeaseRenewer.this);
@@ -396,7 +399,9 @@ public class LeaseRenewer {
}
if (daemonCopy != null) {
- LOG.debug("Wait for lease checker to terminate");
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Wait for lease checker to terminate");
+ }
daemonCopy.join();
}
}
@@ -419,11 +424,16 @@ public class LeaseRenewer {
//skip if current client name is the same as the previous name.
if (!c.getClientName().equals(previousName)) {
if (!c.renewLease()) {
- LOG.debug("Did not renew lease for client {}", c);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Did not renew lease for client " +
+ c);
+ }
continue;
}
previousName = c.getClientName();
- LOG.debug("Lease renewed for client {}", previousName);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Lease renewed for client " + previousName);
+ }
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
index e6709d9..c4093b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
@@ -147,9 +147,11 @@ public class PacketReceiver implements Closeable {
throw new IOException("Invalid header length " + headerLen);
}
- LOG.trace("readNextPacket: dataPlusChecksumLen={}, headerLen={}",
- dataPlusChecksumLen, headerLen);
-
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("readNextPacket: dataPlusChecksumLen = " + dataPlusChecksumLen +
+ " headerLen = " + headerLen);
+ }
+
// Sanity check the buffer size so we don't allocate too much memory
// and OOME.
int totalLen = payloadLen + headerLen;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
index d2bc348..e856211 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
@@ -73,8 +73,10 @@ public class Sender implements DataTransferProtocol {
private static void send(final DataOutputStream out, final Op opcode,
final Message proto) throws IOException {
- LOG.trace("Sending DataTransferOp {}: {}",
- proto.getClass().getSimpleName(), proto);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Sending DataTransferOp " + proto.getClass().getSimpleName()
+ + ": " + proto);
+ }
op(out, opcode);
proto.writeDelimitedTo(out);
out.flush();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
index 006d304..256caff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
@@ -332,9 +332,11 @@ public final class DataTransferSaslUtil {
public static IOStreamPair createStreamPair(Configuration conf,
CipherOption cipherOption, OutputStream out, InputStream in,
boolean isServer) throws IOException {
- LOG.debug("Creating IOStreamPair of CryptoInputStream and "
- + "CryptoOutputStream.");
- CryptoCodec codec = CryptoCodec.getInstance(conf,
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Creating IOStreamPair of CryptoInputStream and " +
+ "CryptoOutputStream.");
+ }
+ CryptoCodec codec = CryptoCodec.getInstance(conf,
cipherOption.getCipherSuite());
byte[] inKey = cipherOption.getInKey();
byte[] inIv = cipherOption.getInIv();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index 24e1dd2..f764275 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -130,7 +130,9 @@ public class ClientDatanodeProtocolTranslatorPB implements
throws IOException {
final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
- LOG.debug("Connecting to datanode {} addr={}", dnAddr, addr);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
+ }
rpcProxy = createClientDatanodeProtocolProxy(addr,
UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf), socketTimeout);
@@ -141,8 +143,10 @@ public class ClientDatanodeProtocolTranslatorPB implements
boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
- LOG.debug("Connecting to datanode {} addr={}", dnAddr, addr);
-
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
+ }
+
// Since we're creating a new UserGroupInformation here, we know that no
// future RPC proxies will be able to re-use the same connection. And
// usages of this proxy tend to be one-off calls.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
index 4ffc108..f70398a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
@@ -129,13 +129,18 @@ public class DfsClientShmManager implements Closeable {
ShmId shmId = shm.getShmId();
Slot slot = shm.allocAndRegisterSlot(blockId);
if (shm.isFull()) {
- LOG.trace("{}: pulled the last slot {} out of {}",
- this, slot.getSlotIdx(), shm);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": pulled the last slot " + slot.getSlotIdx() +
+ " out of " + shm);
+ }
DfsClientShm removedShm = notFull.remove(shmId);
Preconditions.checkState(removedShm == shm);
full.put(shmId, shm);
} else {
- LOG.trace("{}: pulled slot {} out of {}", this, slot.getSlotIdx(), shm);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": pulled slot " + slot.getSlotIdx() +
+ " out of " + shm);
+ }
}
return slot;
}
@@ -182,7 +187,9 @@ public class DfsClientShmManager implements Closeable {
DfsClientShm shm =
new DfsClientShm(PBHelperClient.convert(resp.getId()),
fis[0], this, peer);
- LOG.trace("{}: createNewShm: created {}", this, shm);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": createNewShm: created " + shm);
+ }
return shm;
} finally {
try {
@@ -227,11 +234,15 @@ public class DfsClientShmManager implements Closeable {
String clientName, ExtendedBlockId blockId) throws IOException {
while (true) {
if (closed) {
- LOG.trace("{}: the DfsClientShmManager has been closed.", this);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": the DfsClientShmManager has been closed.");
+ }
return null;
}
if (disabled) {
- LOG.trace("{}: shared memory segment access is disabled.", this);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": shared memory segment access is disabled.");
+ }
return null;
}
// Try to use an existing slot.
@@ -242,7 +253,9 @@ public class DfsClientShmManager implements Closeable {
// There are no free slots. If someone is loading more slots, wait
// for that to finish.
if (loading) {
- LOG.trace("{}: waiting for loading to finish...", this);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": waiting for loading to finish...");
+ }
finishedLoading.awaitUninterruptibly();
} else {
// Otherwise, load the slot ourselves.
@@ -269,9 +282,11 @@ public class DfsClientShmManager implements Closeable {
// fired and marked the shm as disconnected. In this case, we
// obviously don't want to add the SharedMemorySegment to our list
// of valid not-full segments.
- LOG.debug("{}: the UNIX domain socket associated with this "
- + "short-circuit memory closed before we could make use of "
- + "the shm.", this);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(this + ": the UNIX domain socket associated with " +
+ "this short-circuit memory closed before we could make " +
+ "use of the shm.");
+ }
} else {
notFull.put(shm.getShmId(), shm);
}
@@ -294,7 +309,9 @@ public class DfsClientShmManager implements Closeable {
Preconditions.checkState(!full.containsKey(shm.getShmId()));
Preconditions.checkState(!notFull.containsKey(shm.getShmId()));
if (shm.isEmpty()) {
- LOG.trace("{}: freeing empty stale {}", this, shm);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": freeing empty stale " + shm);
+ }
shm.free();
}
} else {
@@ -319,8 +336,10 @@ public class DfsClientShmManager implements Closeable {
// lowest ID, but it could still occur. In most workloads,
// fragmentation should not be a major concern, since it doesn't impact
// peak file descriptor usage or the speed of allocation.
- LOG.trace("{}: shutting down UNIX domain socket for empty {}",
- this, shm);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": shutting down UNIX domain socket for " +
+ "empty " + shm);
+ }
shutdown(shm);
} else {
notFull.put(shmId, shm);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index 07f5064..52c1a6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -103,7 +103,9 @@ public class ShortCircuitCache implements Closeable {
if (ShortCircuitCache.this.closed) return;
long curMs = Time.monotonicNow();
- LOG.debug("{}: cache cleaner running at {}", this, curMs);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(this + ": cache cleaner running at " + curMs);
+ }
int numDemoted = demoteOldEvictableMmaped(curMs);
int numPurged = 0;
@@ -125,9 +127,11 @@ public class ShortCircuitCache implements Closeable {
numPurged++;
}
- LOG.debug("{}: finishing cache cleaner run started at {}. Demoted {} "
- + "mmapped replicas; purged {} replicas.",
- this, curMs, numDemoted, numPurged);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(this + ": finishing cache cleaner run started at " +
+ curMs + ". Demoted " + numDemoted + " mmapped replicas; " +
+ "purged " + numPurged + " replicas.");
+ }
} finally {
ShortCircuitCache.this.lock.unlock();
}
@@ -182,7 +186,9 @@ public class ShortCircuitCache implements Closeable {
@Override
public void run() {
- LOG.trace("{}: about to release {}", ShortCircuitCache.this, slot);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(ShortCircuitCache.this + ": about to release " + slot);
+ }
final DfsClientShm shm = (DfsClientShm)slot.getShm();
final DomainSocket shmSock = shm.getPeer().getDomainSocket();
final String path = shmSock.getPath();
@@ -199,7 +205,9 @@ public class ShortCircuitCache implements Closeable {
String error = resp.hasError() ? resp.getError() : "(unknown)";
throw new IOException(resp.getStatus().toString() + ": " + error);
}
- LOG.trace("{}: released {}", this, slot);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(ShortCircuitCache.this + ": released " + slot);
+ }
success = true;
} catch (IOException e) {
LOG.error(ShortCircuitCache.this + ": failed to release " +
@@ -425,7 +433,9 @@ public class ShortCircuitCache implements Closeable {
purgeReason = "purging replica because it is stale.";
}
if (purgeReason != null) {
- LOG.debug("{}: {}", this, purgeReason);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(this + ": " + purgeReason);
+ }
purge(replica);
}
}
@@ -667,8 +677,10 @@ public class ShortCircuitCache implements Closeable {
ShortCircuitReplicaInfo info = null;
do {
if (closed) {
- LOG.trace("{}: can't fethchOrCreate {} because the cache is closed.",
- this, key);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": can't fetchOrCreate " + key +
+ " because the cache is closed.");
+ }
return null;
}
Waitable<ShortCircuitReplicaInfo> waitable = replicaInfoMap.get(key);
@@ -676,7 +688,9 @@ public class ShortCircuitCache implements Closeable {
try {
info = fetch(key, waitable);
} catch (RetriableException e) {
- LOG.debug("{}: retrying {}", this, e.getMessage());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(this + ": retrying " + e.getMessage());
+ }
continue;
}
}
@@ -707,7 +721,9 @@ public class ShortCircuitCache implements Closeable {
// ShortCircuitReplica. So we simply wait for it to complete.
ShortCircuitReplicaInfo info;
try {
- LOG.trace("{}: found waitable for {}", this, key);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": found waitable for " + key);
+ }
info = waitable.await();
} catch (InterruptedException e) {
LOG.info(this + ": interrupted while waiting for " + key);
@@ -749,7 +765,9 @@ public class ShortCircuitCache implements Closeable {
// Handle loading a new replica.
ShortCircuitReplicaInfo info = null;
try {
- LOG.trace("{}: loading {}", this, key);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": loading " + key);
+ }
info = creator.createShortCircuitReplicaInfo();
} catch (RuntimeException e) {
LOG.warn(this + ": failed to load " + key, e);
@@ -759,7 +777,9 @@ public class ShortCircuitCache implements Closeable {
try {
if (info.getReplica() != null) {
// On success, make sure the cache cleaner thread is running.
- LOG.trace("{}: successfully loaded {}", this, info.getReplica());
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": successfully loaded " + info.getReplica());
+ }
startCacheCleanerThreadIfNeeded();
// Note: new ShortCircuitReplicas start with a refCount of 2,
// indicating that both this cache and whoever requested the
@@ -791,8 +811,10 @@ public class ShortCircuitCache implements Closeable {
cleanerExecutor.scheduleAtFixedRate(cacheCleaner, rateMs, rateMs,
TimeUnit.MILLISECONDS);
cacheCleaner.setFuture(future);
- LOG.debug("{}: starting cache cleaner thread which will run every {} ms",
- this, rateMs);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(this + ": starting cache cleaner thread which will run " +
+ "every " + rateMs + " ms");
+ }
}
}
@@ -810,12 +832,17 @@ public class ShortCircuitCache implements Closeable {
long lastAttemptTimeMs = (Long)replica.mmapData;
long delta = Time.monotonicNow() - lastAttemptTimeMs;
if (delta < mmapRetryTimeoutMs) {
- LOG.trace("{}: can't create client mmap for {} because we failed to"
- + " create one just {}ms ago.", this, replica, delta);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": can't create client mmap for " +
+ replica + " because we failed to " +
+ "create one just " + delta + "ms ago.");
+ }
return null;
}
- LOG.trace("{}: retrying client mmap for {}, {} ms after the previous "
- + "failure.", this, replica, delta);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": retrying client mmap for " + replica +
+ ", " + delta + " ms after the previous failure.");
+ }
} else if (replica.mmapData instanceof Condition) {
Condition cond = (Condition)replica.mmapData;
cond.awaitUninterruptibly();
@@ -938,10 +965,38 @@ public class ShortCircuitCache implements Closeable {
}
}
}
- LOG.debug("visiting {} with outstandingMmapCount={}, replicas={}, "
- + "failedLoads={}, evictable={}, evictableMmapped={}",
- visitor.getClass().getName(), outstandingMmapCount, replicas,
- failedLoads, evictable, evictableMmapped);
+ if (LOG.isDebugEnabled()) {
+ StringBuilder builder = new StringBuilder();
+ builder.append("visiting ").append(visitor.getClass().getName()).
+ append("with outstandingMmapCount=").append(outstandingMmapCount).
+ append(", replicas=");
+ String prefix = "";
+ for (Entry<ExtendedBlockId, ShortCircuitReplica> entry : replicas.entrySet()) {
+ builder.append(prefix).append(entry.getValue());
+ prefix = ",";
+ }
+ prefix = "";
+ builder.append(", failedLoads=");
+ for (Entry<ExtendedBlockId, InvalidToken> entry : failedLoads.entrySet()) {
+ builder.append(prefix).append(entry.getValue());
+ prefix = ",";
+ }
+ prefix = "";
+ builder.append(", evictable=");
+ for (Entry<Long, ShortCircuitReplica> entry : evictable.entrySet()) {
+ builder.append(prefix).append(entry.getKey()).
+ append(":").append(entry.getValue());
+ prefix = ",";
+ }
+ prefix = "";
+ builder.append(", evictableMmapped=");
+ for (Entry<Long, ShortCircuitReplica> entry : evictableMmapped.entrySet()) {
+ builder.append(prefix).append(entry.getKey()).
+ append(":").append(entry.getValue());
+ prefix = ",";
+ }
+ LOG.debug(builder.toString());
+ }
visitor.visit(outstandingMmapCount, replicas, failedLoads,
evictable, evictableMmapped);
} finally {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
index 38cf22b..37566e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
@@ -154,19 +154,25 @@ public class ShortCircuitReplica {
// Check staleness by looking at the shared memory area we use to
// communicate with the DataNode.
boolean stale = !slot.isValid();
- LOG.trace("{}: checked shared memory segment. isStale={}", this, stale);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": checked shared memory segment. isStale=" + stale);
+ }
return stale;
} else {
// Fall back to old, time-based staleness method.
long deltaMs = Time.monotonicNow() - creationTimeMs;
long staleThresholdMs = cache.getStaleThresholdMs();
if (deltaMs > staleThresholdMs) {
- LOG.trace("{} is stale because it's {} ms old and staleThreadholdMS={}",
- this, deltaMs, staleThresholdMs);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " is stale because it's " + deltaMs +
+ " ms old, and staleThresholdMs = " + staleThresholdMs);
+ }
return true;
} else {
- LOG.trace("{} is not stale because it's only {} ms old "
- + "and staleThresholdMs={}", this, deltaMs, staleThresholdMs);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " is not stale because it's only " + deltaMs +
+ " ms old, and staleThresholdMs = " + staleThresholdMs);
+ }
return false;
}
}
@@ -188,8 +194,13 @@ public class ShortCircuitReplica {
return false;
}
boolean result = slot.addAnchor();
- LOG.trace("{}: {} no-checksum anchor to slot {}",
- this, result ? "added" : "could not add", slot);
+ if (LOG.isTraceEnabled()) {
+ if (result) {
+ LOG.trace(this + ": added no-checksum anchor to slot " + slot);
+ } else {
+ LOG.trace(this + ": could not add no-checksum anchor to slot " + slot);
+ }
+ }
return result;
}
@@ -252,7 +263,9 @@ public class ShortCircuitReplica {
suffix += " scheduling " + slot + " for later release.";
}
}
- LOG.trace("closed {}{}", this, suffix);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("closed " + this + suffix);
+ }
}
public FileInputStream getDataStream() {
@@ -280,7 +293,9 @@ public class ShortCircuitReplica {
FileChannel channel = dataStream.getChannel();
MappedByteBuffer mmap = channel.map(MapMode.READ_ONLY, 0,
Math.min(Integer.MAX_VALUE, channel.size()));
- LOG.trace("{}: created mmap of size {}", this, channel.size());
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": created mmap of size " + channel.size());
+ }
return mmap;
} catch (IOException e) {
LOG.warn(this + ": mmap error", e);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
index fa40c15..78325a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
@@ -484,9 +484,13 @@ public class ShortCircuitShm {
POSIX.MMAP_PROT_READ | POSIX.MMAP_PROT_WRITE, true, mmappedLength);
this.slots = new Slot[mmappedLength / BYTES_PER_SLOT];
this.allocatedSlots = new BitSet(slots.length);
- LOG.trace("creating {}(shmId={}, mmappedLength={}, baseAddress={}, "
- + "slots.length={})", this.getClass().getSimpleName(), shmId,
- mmappedLength, String.format("%x", baseAddress), slots.length);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("creating " + this.getClass().getSimpleName() +
+ "(shmId=" + shmId +
+ ", mmappedLength=" + mmappedLength +
+ ", baseAddress=" + String.format("%x", baseAddress) +
+ ", slots.length=" + slots.length + ")");
+ }
}
public final ShmId getShmId() {
@@ -611,7 +615,9 @@ public class ShortCircuitShm {
"tried to unregister slot " + slotIdx + ", which was not registered.");
allocatedSlots.set(slotIdx, false);
slots[slotIdx] = null;
- LOG.trace("{}: unregisterSlot {}", this, slotIdx);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + ": unregisterSlot " + slotIdx);
+ }
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
index e361252..a9adb7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
@@ -36,6 +36,18 @@ import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
public abstract class ByteArrayManager {
static final Logger LOG = LoggerFactory.getLogger(ByteArrayManager.class);
+ private static final ThreadLocal<StringBuilder> DEBUG_MESSAGE =
+ new ThreadLocal<StringBuilder>() {
+ protected StringBuilder initialValue() {
+ return new StringBuilder();
+ }
+ };
+
+ private static void logDebugMessage() {
+ final StringBuilder b = DEBUG_MESSAGE.get();
+ LOG.debug(b.toString());
+ b.setLength(0);
+ }
static final int MIN_ARRAY_LENGTH = 32;
static final byte[] EMPTY_BYTE_ARRAY = {};
@@ -148,18 +160,27 @@ public abstract class ByteArrayManager {
* via the {@link FixedLengthManager#recycle(byte[])} method.
*/
synchronized byte[] allocate() throws InterruptedException {
- LOG.debug(", {}", this);
+ if (LOG.isDebugEnabled()) {
+ DEBUG_MESSAGE.get().append(", ").append(this);
+ }
for(; numAllocated >= maxAllocated;) {
- LOG.debug(": wait ...");
+ if (LOG.isDebugEnabled()) {
+ DEBUG_MESSAGE.get().append(": wait ...");
+ logDebugMessage();
+ }
wait();
- LOG.debug("wake up: {}", this);
+ if (LOG.isDebugEnabled()) {
+ DEBUG_MESSAGE.get().append("wake up: ").append(this);
+ }
}
numAllocated++;
final byte[] array = freeQueue.poll();
- LOG.debug(", recycled? {}", array != null);
+ if (LOG.isDebugEnabled()) {
+ DEBUG_MESSAGE.get().append(", recycled? ").append(array != null);
+ }
return array != null? array : new byte[byteArrayLength];
}
@@ -173,7 +194,9 @@ public abstract class ByteArrayManager {
synchronized int recycle(byte[] array) {
Preconditions.checkNotNull(array);
Preconditions.checkArgument(array.length == byteArrayLength);
- LOG.debug(", {}", this);
+ if (LOG.isDebugEnabled()) {
+ DEBUG_MESSAGE.get().append(", ").append(this);
+ }
notify();
numAllocated--;
@@ -184,7 +207,9 @@ public abstract class ByteArrayManager {
}
if (freeQueue.size() < maxAllocated - numAllocated) {
- LOG.debug(", freeQueue.offer");
+ if (LOG.isDebugEnabled()) {
+ DEBUG_MESSAGE.get().append(", freeQueue.offer");
+ }
freeQueue.offer(array);
}
return freeQueue.size();
@@ -324,7 +349,9 @@ public abstract class ByteArrayManager {
public byte[] newByteArray(final int arrayLength)
throws InterruptedException {
Preconditions.checkArgument(arrayLength >= 0);
- LOG.debug("allocate({})", arrayLength);
+ if (LOG.isDebugEnabled()) {
+ DEBUG_MESSAGE.get().append("allocate(").append(arrayLength).append(")");
+ }
final byte[] array;
if (arrayLength == 0) {
@@ -338,12 +365,18 @@ public abstract class ByteArrayManager {
final FixedLengthManager manager =
managers.get(powerOfTwo, aboveThreshold);
- LOG.debug(": count={}, {}Threshold", count,
- aboveThreshold ? "above" : "below");
+ if (LOG.isDebugEnabled()) {
+ DEBUG_MESSAGE.get().append(": count=").append(count)
+ .append(aboveThreshold? ", aboveThreshold": ", belowThreshold");
+ }
array = manager != null? manager.allocate(): new byte[powerOfTwo];
}
- LOG.debug(", return byte[{}]", array.length);
+ if (LOG.isDebugEnabled()) {
+ DEBUG_MESSAGE.get().append(", return byte[")
+ .append(array.length).append("]");
+ logDebugMessage();
+ }
return array;
}
@@ -358,7 +391,10 @@ public abstract class ByteArrayManager {
@Override
public int release(final byte[] array) {
Preconditions.checkNotNull(array);
- LOG.debug("recycle: array.length={}", array.length);
+ if (LOG.isDebugEnabled()) {
+ DEBUG_MESSAGE.get()
+ .append("recycle: array.length=").append(array.length);
+ }
final int freeQueueSize;
if (array.length == 0) {
@@ -368,7 +404,10 @@ public abstract class ByteArrayManager {
freeQueueSize = manager == null? -1: manager.recycle(array);
}
- LOG.debug(", freeQueueSize={}", freeQueueSize);
+ if (LOG.isDebugEnabled()) {
+ DEBUG_MESSAGE.get().append(", freeQueueSize=").append(freeQueueSize);
+ logDebugMessage();
+ }
return freeQueueSize;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
index 870103e..a864d37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
@@ -134,7 +134,9 @@ final class TokenAspect<T extends FileSystem & Renewable> {
if (token != null) {
fs.setDelegationToken(token);
addRenewAction(fs);
- LOG.debug("Created new DT for {}", token.getService());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Created new DT for {}", token.getService());
+ }
}
hasInitedToken = true;
}
@@ -147,7 +149,9 @@ final class TokenAspect<T extends FileSystem & Renewable> {
synchronized void initDelegationToken(UserGroupInformation ugi) {
Token<?> token = selectDelegationToken(ugi);
if (token != null) {
- LOG.debug("Found existing DT for {}", token.getService());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Found existing DT for {}", token.getService());
+ }
fs.setDelegationToken(token);
hasInitedToken = true;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
index be5f17d..4c23241 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
@@ -182,7 +182,9 @@ public class URLConnectionFactory {
public URLConnection openConnection(URL url, boolean isSpnego)
throws IOException, AuthenticationException {
if (isSpnego) {
- LOG.debug("open AuthenticatedURL connection {}", url);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("open AuthenticatedURL connection {}", url);
+ }
UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab();
final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
return new AuthenticatedURL(new KerberosUgiAuthenticator(),