You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2018/02/14 16:21:38 UTC

[2/6] hadoop git commit: HADOOP-10571. Use Log.*(Object, Throwable) overload to log exceptions. Contributed by Andras Bokor.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 0db633f..6e63543 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -28,8 +28,6 @@ import java.nio.ByteBuffer;
 import java.nio.charset.Charset;
 import java.util.EnumSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
@@ -137,6 +135,8 @@ import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelHandlerContext;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * RPC program corresponding to nfs daemon. See {@link Nfs3}.
@@ -146,7 +146,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   public static final FsPermission umask = new FsPermission(
       (short) DEFAULT_UMASK);
 
-  static final Log LOG = LogFactory.getLog(RpcProgramNfs3.class);
+  static final Logger LOG = LoggerFactory.getLogger(RpcProgramNfs3.class);
 
   private final NfsConfiguration config;
   private final WriteManager writeManager;
@@ -204,7 +204,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
     superuser = config.get(NfsConfigKeys.NFS_SUPERUSER_KEY,
         NfsConfigKeys.NFS_SUPERUSER_DEFAULT);
-    LOG.info("Configured HDFS superuser is " + superuser);
+    LOG.info("Configured HDFS superuser is {}", superuser);
 
     if (!enableDump) {
       writeDumpDir = null;
@@ -230,13 +230,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   private void clearDirectory(String writeDumpDir) throws IOException {
     File dumpDir = new File(writeDumpDir);
     if (dumpDir.exists()) {
-      LOG.info("Delete current dump directory " + writeDumpDir);
+      LOG.info("Delete current dump directory {}", writeDumpDir);
       if (!(FileUtil.fullyDelete(dumpDir))) {
         throw new IOException("Cannot remove current dump directory: "
             + dumpDir);
       }
     }
-    LOG.info("Create new dump directory " + writeDumpDir);
+    LOG.info("Create new dump directory {}", writeDumpDir);
     if (!dumpDir.mkdirs()) {
       throw new IOException("Cannot create dump directory " + dumpDir);
     }
@@ -298,9 +298,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
   @Override
   public NFS3Response nullProcedure() {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS NULL");
-    }
+    LOG.debug("NFS NULL");
     return new NFS3Response(Nfs3Status.NFS3_OK);
   }
 
@@ -331,10 +329,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("GETATTR for fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("GETATTR for fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -346,7 +343,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       attrs = writeManager.getFileAttr(dfsClient, handle, iug);
     } catch (RemoteException r) {
-      LOG.warn("Exception ", r);
+      LOG.warn("Exception", r);
       IOException io = r.unwrapRemoteException();
       /**
        * AuthorizationException can be thrown if the user can't be proxy'ed.
@@ -357,13 +354,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         return new GETATTR3Response(Nfs3Status.NFS3ERR_IO);
       }
     } catch (IOException e) {
-      LOG.info("Can't get file attribute, fileId=" + handle.getFileId(), e);
+      LOG.info("Can't get file attribute, fileId={}", handle.getFileId(), e);
       int status = mapErrorStatus(e);
       response.setStatus(status);
       return response;
     }
     if (attrs == null) {
-      LOG.error("Can't get path for fileId: " + handle.getFileId());
+      LOG.error("Can't get path for fileId: {}", handle.getFileId());
       response.setStatus(Nfs3Status.NFS3ERR_STALE);
       return response;
     }
@@ -378,9 +375,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     EnumSet<SetAttrField> updateFields = newAttr.getUpdateFields();
 
     if (setMode && updateFields.contains(SetAttrField.MODE)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("set new mode: " + newAttr.getMode());
-      }
+      LOG.debug("set new mode: {}", newAttr.getMode());
       dfsClient.setPermission(fileIdPath,
           new FsPermission((short) (newAttr.getMode())));
     }
@@ -398,9 +393,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     long mtime = updateFields.contains(SetAttrField.MTIME) ? newAttr.getMtime()
         .getMilliSeconds() : -1;
     if (atime != -1 || mtime != -1) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("set atime: " + +atime + " mtime: " + mtime);
-      }
+      LOG.debug("set atime: {} mtime: {}", atime, mtime);
       dfsClient.setTimes(fileIdPath, mtime, atime);
     }
   }
@@ -427,10 +420,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS SETATTR fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("NFS SETATTR fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -439,8 +431,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
 
     if (request.getAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
-      LOG.error("Setting file size is not supported when setattr, fileId: "
-          + handle.getFileId());
+      LOG.error("Setting file size is not supported when setattr, fileId: {}",
+          handle.getFileId());
       response.setStatus(Nfs3Status.NFS3ERR_INVAL);
       return response;
     }
@@ -450,7 +442,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
       if (preOpAttr == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         response.setStatus(Nfs3Status.NFS3ERR_STALE);
         return response;
       }
@@ -474,13 +466,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       WccData wccData = new WccData(preOpWcc, postOpAttr);
       return new SETATTR3Response(Nfs3Status.NFS3_OK, wccData);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       WccData wccData = null;
       try {
         wccData = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpAttr),
             dfsClient, fileIdPath, iug);
       } catch (IOException e1) {
-        LOG.info("Can't get postOpAttr for fileIdPath: " + fileIdPath, e1);
+        LOG.info("Can't get postOpAttr for fileIdPath: {}", fileIdPath, e1);
       }
 
       int status = mapErrorStatus(e);
@@ -515,10 +507,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String fileName = request.getName();
     int namenodeId = dirHandle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS LOOKUP dir fileHandle: " + dirHandle.dumpFileHandle()
-          + " name: " + fileName + " client: " + remoteAddress);
+      LOG.debug("NFS LOOKUP dir fileHandle: {} name: {} client: {}",
+          dirHandle.dumpFileHandle(), fileName, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -530,10 +521,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes postOpObjAttr = writeManager.getFileAttr(dfsClient,
           dirHandle, fileName, namenodeId);
       if (postOpObjAttr == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("NFS LOOKUP fileId: " + dirHandle.getFileId() + " name: "
-              + fileName + " does not exist");
-        }
+        LOG.debug("NFS LOOKUP fileId: {} name: {} does not exist",
+            dirHandle.getFileId(), fileName);
         Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient,
             dirFileIdPath, iug);
         return new LOOKUP3Response(Nfs3Status.NFS3ERR_NOENT, null, null,
@@ -543,7 +532,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient,
           dirFileIdPath, iug);
       if (postOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: {}", dirHandle.getFileId());
         return new LOOKUP3Response(Nfs3Status.NFS3ERR_STALE);
       }
       FileHandle fileHandle =
@@ -552,7 +541,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           postOpDirAttr);
 
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new LOOKUP3Response(status);
     }
@@ -592,16 +581,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS ACCESS fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("NFS ACCESS fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     Nfs3FileAttributes attrs;
     try {
       attrs = writeManager.getFileAttr(dfsClient, handle, iug);
 
       if (attrs == null) {
-        LOG.error("Can't get path for fileId: " + handle.getFileId());
+        LOG.error("Can't get path for fileId: {}", handle.getFileId());
         return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE);
       }
       if(iug.getUserName(securityHandler.getUid(), "unknown").equals(superuser)) {
@@ -616,7 +604,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
       return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access);
     } catch (RemoteException r) {
-      LOG.warn("Exception ", r);
+      LOG.warn("Exception", r);
       IOException io = r.unwrapRemoteException();
       /**
        * AuthorizationException can be thrown if the user can't be proxy'ed.
@@ -627,7 +615,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         return new ACCESS3Response(Nfs3Status.NFS3ERR_IO);
       }
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new ACCESS3Response(status);
     }
@@ -660,10 +648,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS READLINK fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("NFS READLINK fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -678,24 +665,23 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient,
           fileIdPath, iug);
       if (postOpAttr == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new READLINK3Response(Nfs3Status.NFS3ERR_STALE);
       }
       if (postOpAttr.getType() != NfsFileType.NFSLNK.toValue()) {
-        LOG.error("Not a symlink, fileId: " + handle.getFileId());
+        LOG.error("Not a symlink, fileId: {}", handle.getFileId());
         return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL);
       }
       if (target == null) {
-        LOG.error("Symlink target should not be null, fileId: "
-            + handle.getFileId());
+        LOG.error("Symlink target should not be null, fileId: {}",
+            handle.getFileId());
         return new READLINK3Response(Nfs3Status.NFS3ERR_SERVERFAULT);
       }
       int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY,
           NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
       if (rtmax < target.getBytes(Charset.forName("UTF-8")).length) {
-        LOG.error("Link size: "
-            + target.getBytes(Charset.forName("UTF-8")).length
-            + " is larger than max transfer size: " + rtmax);
+        LOG.error("Link size: {} is larger than max transfer size: {}",
+            target.getBytes(Charset.forName("UTF-8")).length, rtmax);
         return new READLINK3Response(Nfs3Status.NFS3ERR_IO, postOpAttr,
             new byte[0]);
       }
@@ -704,7 +690,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           target.getBytes(Charset.forName("UTF-8")));
 
     } catch (IOException e) {
-      LOG.warn("Readlink error: " + e.getClass(), e);
+      LOG.warn("Readlink error", e);
       int status = mapErrorStatus(e);
       return new READLINK3Response(status);
     }
@@ -741,10 +727,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS READ fileHandle: " + handle.dumpFileHandle()+ " offset: "
-          + offset + " count: " + count + " client: " + remoteAddress);
+      LOG.debug("NFS READ fileHandle: {} offset: {} count: {} client: {}",
+          handle.dumpFileHandle(), offset, count, remoteAddress);
     }
-
     DFSClient dfsClient = clientCache.getDfsClient(userName, namenodeId);
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
@@ -760,15 +745,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         attrs = Nfs3Utils.getFileAttr(dfsClient,
             Nfs3Utils.getFileIdPath(handle), iug);
       } catch (IOException e) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Get error accessing file, fileId: " + handle.getFileId(), e);
-        }
+        LOG.debug("Get error accessing file, fileId: {}",
+            handle.getFileId(), e);
         return new READ3Response(Nfs3Status.NFS3ERR_IO);
       }
       if (attrs == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Can't get path for fileId: " + handle.getFileId());
-        }
+        LOG.debug("Can't get path for fileId: {}", handle.getFileId());
         return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
       }
       int access = Nfs3Utils.getAccessRightsForUserGroup(
@@ -787,8 +769,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     // optimized later by reading from the cache.
     int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
     if (ret != Nfs3Status.NFS3_OK) {
-      LOG.warn("commitBeforeRead didn't succeed with ret=" + ret
-          + ". Read may not get most recent data.");
+      LOG.warn("commitBeforeRead didn't succeed with ret={}. " +
+          "Read may not get most recent data.", ret);
     }
 
     try {
@@ -828,9 +810,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
           iug);
       if (readCount < count) {
-        LOG.info("Partial read. Asked offset: " + offset + " count: " + count
-            + " and read back: " + readCount + " file size: "
-            + attrs.getSize());
+        LOG.info("Partial read. Asked offset: {} count: {} and read back: {} " +
+                "file size: {}", offset, count, readCount, attrs.getSize());
       }
       // HDFS returns -1 for read beyond file size.
       if (readCount < 0) {
@@ -841,8 +822,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           ByteBuffer.wrap(readbuffer));
 
     } catch (IOException e) {
-      LOG.warn("Read error: " + e.getClass() + " offset: " + offset
-          + " count: " + count, e);
+      LOG.warn("Read error. Offset: {} count: {}", offset, count, e);
       int status = mapErrorStatus(e);
       return new READ3Response(status);
     }
@@ -884,11 +864,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS WRITE fileHandle: " + handle.dumpFileHandle() + " offset: "
-          + offset + " length: " + count + " stableHow: " + stableHow.getValue()
-          + " xid: " + xid + " client: " + remoteAddress);
+      LOG.debug("NFS WRITE fileHandle: {} offset: {} length: {} " +
+              "stableHow: {} xid: {} client: {}",
+          handle.dumpFileHandle(), offset, count, stableHow.getValue(), xid,
+          remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -900,7 +880,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
       if (preOpAttr == null) {
-        LOG.error("Can't get path for fileId: " + handle.getFileId());
+        LOG.error("Can't get path for fileId: {}", handle.getFileId());
         return new WRITE3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -910,22 +890,20 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
             Nfs3Constant.WRITE_COMMIT_VERF);
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("requested offset=" + offset + " and current filesize="
-            + preOpAttr.getSize());
-      }
+      LOG.debug("requested offset={} and current filesize={}",
+          offset, preOpAttr.getSize());
 
       writeManager.handleWrite(dfsClient, request, channel, xid, preOpAttr);
 
     } catch (IOException e) {
-      LOG.info("Error writing to fileId " + handle.getFileId() + " at offset "
-          + offset + " and length " + data.length, e);
+      LOG.info("Error writing to fileId {} at offset {} and length {}",
+          handle.getFileId(), offset, data.length, e);
       // Try to return WccData
       Nfs3FileAttributes postOpAttr = null;
       try {
         postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
       } catch (IOException e1) {
-        LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1);
+        LOG.info("Can't get postOpAttr for fileId: {}", e1);
       }
       WccAttr attr = preOpAttr == null ? null : Nfs3Utils.getWccAttr(preOpAttr);
       WccData fileWcc = new WccData(attr, postOpAttr);
@@ -961,10 +939,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String fileName = request.getName();
     int namenodeId = dirHandle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS CREATE dir fileHandle: " + dirHandle.dumpFileHandle()
-          + " filename: " + fileName + " client: " + remoteAddress);
+      LOG.debug("NFS CREATE dir fileHandle: {} filename: {} client: {}",
+          dirHandle.dumpFileHandle(), fileName, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -976,8 +953,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     if ((createMode != Nfs3Constant.CREATE_EXCLUSIVE)
         && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)
         && request.getObjAttr().getSize() != 0) {
-      LOG.error("Setting file size is not supported when creating file: "
-          + fileName + " dir fileId: " + dirHandle.getFileId());
+      LOG.error("Setting file size is not supported when creating file: {} " +
+          "dir fileId: {}", fileName, dirHandle.getFileId());
       return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
     }
 
@@ -990,7 +967,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.error("Can't get path for dirHandle: " + dirHandle);
+        LOG.error("Can't get path for dirHandle: {}", dirHandle);
         return new CREATE3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1041,10 +1018,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         fos.close();
         fos = null;
       } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Opened stream for file: " + fileName + ", fileId: "
-              + fileHandle.getFileId());
-        }
+        LOG.debug("Opened stream for file: {}, fileId: {}",
+            fileName, fileHandle.getFileId());
       }
 
     } catch (IOException e) {
@@ -1053,8 +1028,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         try {
           fos.close();
         } catch (IOException e1) {
-          LOG.error("Can't close stream for dirFileId: " + dirHandle.getFileId()
-              + " filename: " + fileName, e1);
+          LOG.error("Can't close stream for dirFileId: {} filename: {}",
+              dirHandle.getFileId(), fileName, e1);
         }
       }
       if (dirWcc == null) {
@@ -1062,8 +1037,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr),
               dfsClient, dirFileIdPath, iug);
         } catch (IOException e1) {
-          LOG.error("Can't get postOpDirAttr for dirFileId: "
-              + dirHandle.getFileId(), e1);
+          LOG.error("Can't get postOpDirAttr for dirFileId: {}",
+              dirHandle.getFileId(), e1);
         }
       }
 
@@ -1105,13 +1080,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS MKDIR dirHandle: " + dirHandle.dumpFileHandle()
-          + " filename: " + fileName + " client: " + remoteAddress);
+      LOG.debug("NFS MKDIR dirHandle: {} filename: {} client: {}",
+          dirHandle.dumpFileHandle(), fileName, remoteAddress);
     }
-
     if (request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
-      LOG.error("Setting file size is not supported when mkdir: " + fileName
-          + " in dirHandle" + dirHandle);
+      LOG.error("Setting file size is not supported when mkdir: " +
+          "{} in dirHandle {}", fileName, dirHandle);
       return new MKDIR3Response(Nfs3Status.NFS3ERR_INVAL);
     }
 
@@ -1123,7 +1097,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: {}", dirHandle.getFileId());
         return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1158,13 +1132,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return new MKDIR3Response(Nfs3Status.NFS3_OK, new FileHandle(
           postOpObjAttr.getFileId(), namenodeId), postOpObjAttr, dirWcc);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       // Try to return correct WccData
       if (postOpDirAttr == null) {
         try {
           postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
         } catch (IOException e1) {
-          LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e);
+          LOG.info("Can't get postOpDirAttr for {}", dirFileIdPath, e);
         }
       }
 
@@ -1202,10 +1176,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     String fileName = request.getName();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS REMOVE dir fileHandle: " + dirHandle.dumpFileHandle()
-          + " fileName: " + fileName + " client: " + remoteAddress);
+      LOG.debug("NFS REMOVE dir fileHandle: {} fileName: {} client: {}",
+          dirHandle.dumpFileHandle(), fileName, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -1219,7 +1192,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr =  Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: {}", dirHandle.getFileId());
         return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1247,13 +1220,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       }
       return new REMOVE3Response(Nfs3Status.NFS3_OK, dirWcc);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       // Try to return correct WccData
       if (postOpDirAttr == null) {
         try {
           postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
         } catch (IOException e1) {
-          LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
+          LOG.info("Can't get postOpDirAttr for {}", dirFileIdPath, e1);
         }
       }
 
@@ -1285,10 +1258,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String fileName = request.getName();
     int namenodeId = dirHandle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS RMDIR dir fileHandle: " + dirHandle.dumpFileHandle()
-          + " fileName: " + fileName + " client: " + remoteAddress);
+      LOG.debug("NFS RMDIR dir fileHandle: {} fileName: {} client: {}",
+          dirHandle.dumpFileHandle(), fileName, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -1302,7 +1274,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: {}", dirHandle.getFileId());
         return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1334,13 +1306,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
       return new RMDIR3Response(Nfs3Status.NFS3_OK, dirWcc);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       // Try to return correct WccData
       if (postOpDirAttr == null) {
         try {
           postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
         } catch (IOException e1) {
-          LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
+          LOG.info("Can't get postOpDirAttr for {}", dirFileIdPath, e1);
         }
       }
 
@@ -1376,11 +1348,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     int toNamenodeId = toHandle.getNamenodeId();
     String toName = request.getToName();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS RENAME from: " + fromHandle.dumpFileHandle()
-          + "/" + fromName + " to: " + toHandle.dumpFileHandle()
-          + "/" + toName + " client: " + remoteAddress);
+      LOG.debug("NFS RENAME from: {}/{} to: {}/{} client: {}",
+          fromHandle.dumpFileHandle(), fromName, toHandle.dumpFileHandle(),
+          toName, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), fromNamenodeId);
     if (dfsClient == null) {
@@ -1403,14 +1374,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       fromPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, fromDirFileIdPath, iug);
       if (fromPreOpAttr == null) {
-        LOG.info("Can't get path for fromHandle fileId: "
-            + fromHandle.getFileId());
+        LOG.info("Can't get path for fromHandle fileId: {}",
+            fromHandle.getFileId());
         return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
       toPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, toDirFileIdPath, iug);
       if (toPreOpAttr == null) {
-        LOG.info("Can't get path for toHandle fileId: " + toHandle.getFileId());
+        LOG.info("Can't get path for toHandle fileId: {}",
+            toHandle.getFileId());
         return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1434,7 +1406,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           dfsClient, toDirFileIdPath, iug);
       return new RENAME3Response(Nfs3Status.NFS3_OK, fromDirWcc, toDirWcc);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       // Try to return correct WccData
       try {
         fromDirWcc = Nfs3Utils.createWccData(
@@ -1443,8 +1415,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         toDirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(toPreOpAttr),
             dfsClient, toDirFileIdPath, iug);
       } catch (IOException e1) {
-        LOG.info("Can't get postOpDirAttr for " + fromDirFileIdPath + " or"
-            + toDirFileIdPath, e1);
+        LOG.info("Can't get postOpDirAttr for {} or {}",
+            fromDirFileIdPath, toDirFileIdPath, e1);
       }
 
       int status = mapErrorStatus(e);
@@ -1484,10 +1456,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     // Don't do any name check to source path, just leave it to HDFS
     String linkIdPath = linkDirIdPath + "/" + name;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS SYMLINK, target: " + symData + " link: " + linkIdPath
-          + " namenodeId: " + namenodeId + " client: " + remoteAddress);
-    }
+    LOG.debug("NFS SYMLINK, target: {} link: {} namenodeId: {} client: {}",
+        symData, linkIdPath, namenodeId, remoteAddress);
 
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
@@ -1515,7 +1485,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           objAttr.getFileId(), namenodeId), objAttr, dirWcc);
 
     } catch (IOException e) {
-      LOG.warn("Exception: " + e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       response.setStatus(status);
       return response;
@@ -1542,9 +1512,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         throw io;
       }
       // This happens when startAfter was just deleted
-      LOG.info("Cookie couldn't be found: "
-          + new String(startAfter, Charset.forName("UTF-8"))
-          + ", do listing from beginning");
+      LOG.info("Cookie couldn't be found: {}, do listing from beginning",
+          new String(startAfter, Charset.forName("UTF-8")));
       dlisting = dfsClient
           .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
     }
@@ -1577,21 +1546,19 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     long cookie = request.getCookie();
     if (cookie < 0) {
-      LOG.error("Invalid READDIR request, with negative cookie: " + cookie);
+      LOG.error("Invalid READDIR request, with negative cookie: {}", cookie);
       return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
     }
     long count = request.getCount();
     if (count <= 0) {
-      LOG.info("Nonpositive count in invalid READDIR request: " + count);
+      LOG.info("Nonpositive count in invalid READDIR request: {}", count);
       return new READDIR3Response(Nfs3Status.NFS3_OK);
     }
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS READDIR fileHandle: " + handle.dumpFileHandle()
-          + " cookie: " + cookie + " count: " + count + " client: "
-          + remoteAddress);
+      LOG.debug("NFS READDIR fileHandle: {} cookie: {} count: {} client: {}",
+          handle.dumpFileHandle(), cookie, count, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -1607,12 +1574,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
       dirStatus = dfsClient.getFileInfo(dirFileIdPath);
       if (dirStatus == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
       if (!dirStatus.isDirectory()) {
-        LOG.error("Can't readdir for regular file, fileId: "
-            + handle.getFileId());
+        LOG.error("Can't readdir for regular file, fileId: {}",
+            handle.getFileId());
         return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
       }
       long cookieVerf = request.getCookieVerf();
@@ -1631,8 +1598,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " +
               "mismatches.");
         } else {
-          LOG.error("CookieVerf mismatch. request cookieVerf: " + cookieVerf
-              + " dir cookieVerf: " + dirStatus.getModificationTime());
+          LOG.error("CookieVerf mismatch. request cookieVerf: {} " +
+              "dir cookieVerf: {}",
+              cookieVerf, dirStatus.getModificationTime());
           return new READDIR3Response(
               Nfs3Status.NFS3ERR_BAD_COOKIE,
               Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug));
@@ -1664,11 +1632,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
       postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (postOpAttr == null) {
-        LOG.error("Can't get path for fileId: " + handle.getFileId());
+        LOG.error("Can't get path for fileId: {}", handle.getFileId());
         return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new READDIR3Response(status);
     }
@@ -1742,26 +1710,28 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     int namenodeId = handle.getNamenodeId();
     long cookie = request.getCookie();
     if (cookie < 0) {
-      LOG.error("Invalid READDIRPLUS request, with negative cookie: " + cookie);
+      LOG.error("Invalid READDIRPLUS request, with negative cookie: {}",
+          cookie);
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
     }
     long dirCount = request.getDirCount();
     if (dirCount <= 0) {
-      LOG.info("Nonpositive dircount in invalid READDIRPLUS request: " + dirCount);
+      LOG.info("Nonpositive dircount in invalid READDIRPLUS request: {}",
+          dirCount);
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
     }
     int maxCount = request.getMaxCount();
     if (maxCount <= 0) {
-      LOG.info("Nonpositive maxcount in invalid READDIRPLUS request: " + maxCount);
+      LOG.info("Nonpositive maxcount in invalid READDIRPLUS request: {}",
+          maxCount);
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
     }
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS READDIRPLUS fileHandle: " + handle.dumpFileHandle()
-          + " cookie: " + cookie + " dirCount: " + dirCount + " maxCount: "
-          + maxCount + " client: " + remoteAddress);
+      LOG.debug("NFS READDIRPLUS fileHandle: {} cookie: {} dirCount: {} " +
+              "maxCount: {} client: {}",
+          handle.dumpFileHandle(), cookie, dirCount, maxCount, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -1777,12 +1747,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
       dirStatus = dfsClient.getFileInfo(dirFileIdPath);
       if (dirStatus == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
       }
       if (!dirStatus.isDirectory()) {
-        LOG.error("Can't readdirplus for regular file, fileId: "
-            + handle.getFileId());
+        LOG.error("Can't readdirplus for regular file, fileId: {}",
+            handle.getFileId());
         return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
       }
       long cookieVerf = request.getCookieVerf();
@@ -1799,8 +1769,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " +
               "mismatches.");
         } else {
-          LOG.error("cookieverf mismatch. request cookieverf: " + cookieVerf
-              + " dir cookieverf: " + dirStatus.getModificationTime());
+          LOG.error("cookieverf mismatch. request cookieverf: {} " +
+                  "dir cookieverf: {}",
+              cookieVerf, dirStatus.getModificationTime());
           return new READDIRPLUS3Response(
               Nfs3Status.NFS3ERR_BAD_COOKIE,
               Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug),
@@ -1833,11 +1804,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
       postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (postOpDirAttr == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
       }
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new READDIRPLUS3Response(status);
     }
@@ -1865,7 +1836,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         try {
           attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
         } catch (IOException e) {
-          LOG.error("Can't get file attributes for fileId: " + fileId, e);
+          LOG.error("Can't get file attributes for fileId: {}", fileId, e);
           continue;
         }
         entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId,
@@ -1882,7 +1853,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         try {
           attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
         } catch (IOException e) {
-          LOG.error("Can't get file attributes for fileId: " + fileId, e);
+          LOG.error("Can't get file attributes for fileId: {}", fileId, e);
           continue;
         }
         entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId,
@@ -1923,10 +1894,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS FSSTAT fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("NFS FSSTAT fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -1942,7 +1912,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle,
           iug);
       if (attrs == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1957,7 +1927,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return new FSSTAT3Response(Nfs3Status.NFS3_OK, attrs, totalBytes,
           freeBytes, freeBytes, maxFsObjects, maxFsObjects, maxFsObjects, 0);
     } catch (RemoteException r) {
-      LOG.warn("Exception ", r);
+      LOG.warn("Exception", r);
       IOException io = r.unwrapRemoteException();
       /**
        * AuthorizationException can be thrown if the user can't be proxy'ed.
@@ -1968,7 +1938,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO);
       }
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new FSSTAT3Response(status);
     }
@@ -2000,10 +1970,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS FSINFO fileHandle: " + handle.dumpFileHandle()
-          +" client: " + remoteAddress);
+      LOG.debug("NFS FSINFO fileHandle: {} client: {}", remoteAddress,
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -2025,7 +1994,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes attrs = Nfs3Utils.getFileAttr(dfsClient,
           Nfs3Utils.getFileIdPath(handle), iug);
       if (attrs == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new FSINFO3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -2035,7 +2004,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return new FSINFO3Response(Nfs3Status.NFS3_OK, attrs, rtmax, rtmax, 1,
           wtmax, wtmax, 1, dtperf, Long.MAX_VALUE, new NfsTime(1), fsProperty);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new FSINFO3Response(status);
     }
@@ -2069,10 +2038,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     int namenodeId = handle.getNamenodeId();
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS PATHCONF fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("NFS PATHCONF fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -2084,14 +2052,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
           iug);
       if (attrs == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new PATHCONF3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
       return new PATHCONF3Response(Nfs3Status.NFS3_OK, attrs, 0,
           HdfsServerConstants.MAX_PATH_LENGTH, true, false, false, true);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new PATHCONF3Response(status);
     }
@@ -2123,11 +2091,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS COMMIT fileHandle: " + handle.dumpFileHandle() + " offset="
-          + request.getOffset() + " count=" + request.getCount() + " client: "
-          + remoteAddress);
+      LOG.debug("NFS COMMIT fileHandle: {} offset={} count={} client: {}",
+          handle.dumpFileHandle(), request.getOffset(), request.getCount(),
+          remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -2140,7 +2107,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
       if (preOpAttr == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -2158,12 +2125,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           preOpAttr, namenodeId);
       return null;
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       Nfs3FileAttributes postOpAttr = null;
       try {
         postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
       } catch (IOException e1) {
-        LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1);
+        LOG.info("Can't get postOpAttr for fileId: {}", handle.getFileId(), e1);
       }
 
       WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
@@ -2205,8 +2172,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     if (nfsproc3 != NFSPROC3.NULL) {
       if (credentials.getFlavor() != AuthFlavor.AUTH_SYS
           && credentials.getFlavor() != AuthFlavor.RPCSEC_GSS) {
-        LOG.info("Wrong RPC AUTH flavor, " + credentials.getFlavor()
-            + " is not AUTH_SYS or RPCSEC_GSS.");
+        LOG.info("Wrong RPC AUTH flavor, {} is not AUTH_SYS or RPCSEC_GSS.",
+            credentials.getFlavor());
         XDR reply = new XDR();
         RpcDeniedReply rdr = new RpcDeniedReply(xid,
             RpcReply.ReplyState.MSG_ACCEPTED,
@@ -2226,12 +2193,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           xid);
       if (entry != null) { // in cache
         if (entry.isCompleted()) {
-          LOG.info("Sending the cached reply to retransmitted request " + xid);
+          LOG.info("Sending the cached reply to retransmitted request {}",
+              xid);
           RpcUtil.sendRpcResponse(ctx, entry.getResponse());
           return;
         } else { // else request is in progress
-          LOG.info("Retransmitted request, transaction still in progress "
-              + xid);
+          LOG.info("Retransmitted request, transaction still in progress {}",
+              xid);
           // Ignore the request and do nothing
           return;
         }
@@ -2261,18 +2229,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       response = readlink(xdr, info);
       metrics.addReadlink(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.READ) {
-      if (LOG.isDebugEnabled()) {
-          LOG.debug(Nfs3Utils.READ_RPC_START + xid);
-      }
+      LOG.debug("{}{}", Nfs3Utils.READ_RPC_START, xid);
       response = read(xdr, info);
-      if (LOG.isDebugEnabled() && (nfsproc3 == NFSPROC3.READ)) {
-        LOG.debug(Nfs3Utils.READ_RPC_END + xid);
-      }
+      LOG.debug("{}{}", Nfs3Utils.READ_RPC_END, xid);
       metrics.addRead(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.WRITE) {
-      if (LOG.isDebugEnabled()) {
-          LOG.debug(Nfs3Utils.WRITE_RPC_START + xid);
-      }
+      LOG.debug("{}{}", Nfs3Utils.WRITE_RPC_START, xid);
       response = write(xdr, info);
       // Write end debug trace is in Nfs3Utils.writeChannel
     } else if (nfsproc3 == NFSPROC3.CREATE) {
@@ -2323,10 +2285,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           out);
     }
     if (response == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No sync response, expect an async response for request XID="
-            + rpcCall.getXid());
-      }
+      LOG.debug("No sync response, expect an async response for request XID={}",
+          rpcCall.getXid());
       return;
     }
     // TODO: currently we just return VerifierNone

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 6163d93..30f75ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -477,7 +477,7 @@ public class DataNode extends ReconfigurableBase
               HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT)) {
       String reason = DomainSocket.getLoadingFailureReason();
       if (reason != null) {
-        LOG.warn("File descriptor passing is disabled because " + reason);
+        LOG.warn("File descriptor passing is disabled because {}", reason);
         this.fileDescriptorPassingDisabledReason = reason;
       } else {
         LOG.info("File descriptor passing is enabled.");
@@ -493,7 +493,7 @@ public class DataNode extends ReconfigurableBase
 
     try {
       hostName = getHostName(conf);
-      LOG.info("Configured hostname is " + hostName);
+      LOG.info("Configured hostname is {}", hostName);
       startDataNode(dataDirs, resources);
     } catch (IOException ie) {
       shutdown();
@@ -533,7 +533,7 @@ public class DataNode extends ReconfigurableBase
       case DFS_DATANODE_DATA_DIR_KEY: {
         IOException rootException = null;
         try {
-          LOG.info("Reconfiguring " + property + " to " + newVal);
+          LOG.info("Reconfiguring {} to {}", property, newVal);
           this.refreshVolumes(newVal);
           return getConf().get(DFS_DATANODE_DATA_DIR_KEY);
         } catch (IOException e) {
@@ -545,7 +545,7 @@ public class DataNode extends ReconfigurableBase
                 new BlockReportOptions.Factory().setIncremental(false).build());
           } catch (IOException e) {
             LOG.warn("Exception while sending the block report after refreshing"
-                + " volumes " + property + " to " + newVal, e);
+                + " volumes {} to {}", property, newVal, e);
             if (rootException == null) {
               rootException = e;
             }
@@ -561,7 +561,7 @@ public class DataNode extends ReconfigurableBase
       case DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY: {
         ReconfigurationException rootException = null;
         try {
-          LOG.info("Reconfiguring " + property + " to " + newVal);
+          LOG.info("Reconfiguring {} to {}", property, newVal);
           int movers;
           if (newVal == null) {
             // set to default
@@ -696,8 +696,8 @@ public class DataNode extends ReconfigurableBase
       // New conf doesn't have the storage location which available in
       // the current storage locations. Add to the deactivateLocations list.
       if (!found) {
-        LOG.info("Deactivation request received for active volume: "
-            + dir.getRoot().toString());
+        LOG.info("Deactivation request received for active volume: {}",
+            dir.getRoot());
         results.deactivateLocations.add(
             StorageLocation.parse(dir.getRoot().toString()));
       }
@@ -724,8 +724,8 @@ public class DataNode extends ReconfigurableBase
         // New conf doesn't have this failed storage location.
         // Add to the deactivate locations list.
         if (!found) {
-          LOG.info("Deactivation request received for failed volume: "
-              + failedStorageLocation);
+          LOG.info("Deactivation request received for failed volume: {}",
+              failedStorageLocation);
           results.deactivateLocations.add(StorageLocation.parse(
               failedStorageLocation));
         }
@@ -760,7 +760,7 @@ public class DataNode extends ReconfigurableBase
         throw new IOException("Attempt to remove all volumes.");
       }
       if (!changedVolumes.newLocations.isEmpty()) {
-        LOG.info("Adding new volumes: " +
+        LOG.info("Adding new volumes: {}",
             Joiner.on(",").join(changedVolumes.newLocations));
 
         // Add volumes for each Namespace
@@ -794,16 +794,16 @@ public class DataNode extends ReconfigurableBase
               errorMessageBuilder.append(
                   String.format("FAILED TO ADD: %s: %s%n",
                   volume, ioe.getMessage()));
-              LOG.error("Failed to add volume: " + volume, ioe);
+              LOG.error("Failed to add volume: {}", volume, ioe);
             } else {
               effectiveVolumes.add(volume.toString());
-              LOG.info("Successfully added volume: " + volume);
+              LOG.info("Successfully added volume: {}", volume);
             }
           } catch (Exception e) {
             errorMessageBuilder.append(
                 String.format("FAILED to ADD: %s: %s%n", volume,
                               e.toString()));
-            LOG.error("Failed to add volume: " + volume, e);
+            LOG.error("Failed to add volume: {}", volume, e);
           }
         }
       }
@@ -812,7 +812,7 @@ public class DataNode extends ReconfigurableBase
         removeVolumes(changedVolumes.deactivateLocations);
       } catch (IOException e) {
         errorMessageBuilder.append(e.getMessage());
-        LOG.error("Failed to remove volume: " + e.getMessage(), e);
+        LOG.error("Failed to remove volume", e);
       }
 
       if (errorMessageBuilder.length() > 0) {
@@ -967,16 +967,17 @@ public class DataNode extends ReconfigurableBase
           ServicePlugin.class);
     } catch (RuntimeException e) {
       String pluginsValue = conf.get(DFS_DATANODE_PLUGINS_KEY);
-      LOG.error("Unable to load DataNode plugins. Specified list of plugins: " +
+      LOG.error("Unable to load DataNode plugins. " +
+              "Specified list of plugins: {}",
           pluginsValue, e);
       throw e;
     }
     for (ServicePlugin p: plugins) {
       try {
         p.start(this);
-        LOG.info("Started plug-in " + p);
+        LOG.info("Started plug-in {}", p);
       } catch (Throwable t) {
-        LOG.warn("ServicePlugin " + p + " could not be started", t);
+        LOG.warn("ServicePlugin {} could not be started", p, t);
       }
     }
   }
@@ -1026,7 +1027,7 @@ public class DataNode extends ReconfigurableBase
         traceAdminService,
         ipcServer);
 
-    LOG.info("Opened IPC server at " + ipcServer.getListenerAddress());
+    LOG.info("Opened IPC server at {}", ipcServer.getListenerAddress());
 
     // set service-level authorization security policy
     if (getConf().getBoolean(
@@ -1085,8 +1086,9 @@ public class DataNode extends ReconfigurableBase
       directoryScanner = new DirectoryScanner(this, data, conf);
       directoryScanner.start();
     } else {
-      LOG.info("Periodic Directory Tree Verification scan is disabled because " +
-                   reason);
+      LOG.info("Periodic Directory Tree Verification scan " +
+              "is disabled because {}",
+          reason);
     }
   }
   
@@ -1139,7 +1141,7 @@ public class DataNode extends ReconfigurableBase
           dnConf.getTransferSocketRecvBufferSize());
     }
     streamingAddr = tcpPeerServer.getStreamingAddr();
-    LOG.info("Opened streaming server at " + streamingAddr);
+    LOG.info("Opened streaming server at {}", streamingAddr);
     this.threadGroup = new ThreadGroup("dataXceiverServer");
     xserver = new DataXceiverServer(tcpPeerServer, getConf(), this);
     this.dataXceiverServer = new Daemon(threadGroup, xserver);
@@ -1157,7 +1159,7 @@ public class DataNode extends ReconfigurableBase
       if (domainPeerServer != null) {
         this.localDataXceiverServer = new Daemon(threadGroup,
             new DataXceiverServer(domainPeerServer, getConf(), this));
-        LOG.info("Listening on UNIX domain socket: " +
+        LOG.info("Listening on UNIX domain socket: {}",
             domainPeerServer.getBindPath());
       }
     }
@@ -1175,7 +1177,7 @@ public class DataNode extends ReconfigurableBase
          (!conf.getBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
           HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
         LOG.warn("Although short-circuit local reads are configured, " +
-            "they are disabled because you didn't configure " +
+            "they are disabled because you didn't configure {}",
             DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
       }
       return null;
@@ -1205,8 +1207,8 @@ public class DataNode extends ReconfigurableBase
       bpos.notifyNamenodeReceivedBlock(block, delHint, storageUuid,
           isOnTransientStorage);
     } else {
-      LOG.error("Cannot find BPOfferService for reporting block received for bpid="
-          + block.getBlockPoolId());
+      LOG.error("Cannot find BPOfferService for reporting block received " +
+              "for bpid={}", block.getBlockPoolId());
     }
   }
   
@@ -1217,8 +1219,8 @@ public class DataNode extends ReconfigurableBase
     if(bpos != null) {
       bpos.notifyNamenodeReceivingBlock(block, storageUuid);
     } else {
-      LOG.error("Cannot find BPOfferService for reporting block receiving for bpid="
-          + block.getBlockPoolId());
+      LOG.error("Cannot find BPOfferService for reporting block receiving " +
+          "for bpid={}", block.getBlockPoolId());
     }
   }
   
@@ -1239,7 +1241,7 @@ public class DataNode extends ReconfigurableBase
   public void reportBadBlocks(ExtendedBlock block) throws IOException{
     FsVolumeSpi volume = getFSDataset().getVolume(block);
     if (volume == null) {
-      LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
+      LOG.warn("Cannot find FsVolumeSpi to report bad block: {}", block);
       return;
     }
     reportBadBlocks(block, volume);
@@ -1382,7 +1384,7 @@ public class DataNode extends ReconfigurableBase
         }
       }
     }
-    LOG.info("Starting DataNode with maxLockedMemory = " +
+    LOG.info("Starting DataNode with maxLockedMemory = {}",
         dnConf.maxLockedMemory);
 
     int volFailuresTolerated = dnConf.getVolFailuresTolerated();
@@ -1409,8 +1411,8 @@ public class DataNode extends ReconfigurableBase
 
     // Login is done by now. Set the DN user name.
     dnUserName = UserGroupInformation.getCurrentUser().getUserName();
-    LOG.info("dnUserName = " + dnUserName);
-    LOG.info("supergroup = " + supergroup);
+    LOG.info("dnUserName = {}", dnUserName);
+    LOG.info("supergroup = {}", supergroup);
     initIpcServer();
 
     metrics = DataNodeMetrics.create(getConf(), getDisplayName());
@@ -1514,8 +1516,8 @@ public class DataNode extends ReconfigurableBase
     if (storage.getDatanodeUuid() == null) {
       storage.setDatanodeUuid(generateUuid());
       storage.writeAll();
-      LOG.info("Generated and persisted new Datanode UUID " +
-               storage.getDatanodeUuid());
+      LOG.info("Generated and persisted new Datanode UUID {}",
+          storage.getDatanodeUuid());
     }
   }
 
@@ -1583,11 +1585,11 @@ public class DataNode extends ReconfigurableBase
     if (!blockPoolTokenSecretManager.isBlockPoolRegistered(blockPoolId)) {
       long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
       long blockTokenLifetime = keys.getTokenLifetime();
-      LOG.info("Block token params received from NN: for block pool " +
-          blockPoolId + " keyUpdateInterval="
-          + blockKeyUpdateInterval / (60 * 1000)
-          + " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000)
-          + " min(s)");
+      LOG.info("Block token params received from NN: " +
+          "for block pool {} keyUpdateInterval={} min(s), " +
+          "tokenLifetime={} min(s)",
+          blockPoolId, blockKeyUpdateInterval / (60 * 1000),
+          blockTokenLifetime / (60 * 1000));
       final boolean enableProtobuf = getConf().getBoolean(
           DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE,
           DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE_DEFAULT);
@@ -1690,9 +1692,10 @@ public class DataNode extends ReconfigurableBase
         storage.recoverTransitionRead(this, nsInfo, dataDirs, startOpt);
       }
       final StorageInfo bpStorage = storage.getBPStorage(bpid);
-      LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID()
-          + ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion()
-          + ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
+      LOG.info("Setting up storage: nsid={};bpid={};lv={};" +
+              "nsInfo={};dnuuid={}",
+          bpStorage.getNamespaceID(), bpid, storage.getLayoutVersion(),
+          nsInfo, storage.getDatanodeUuid());
     }
 
     // If this is a newly formatted DataNode then assign a new DatanodeUuid.
@@ -1802,9 +1805,8 @@ public class DataNode extends ReconfigurableBase
       final boolean connectToDnViaHostname) throws IOException {
     final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
     final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
-    }
+    LOG.debug("Connecting to datanode {} addr={}",
+        dnAddr, addr);
     final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
     try {
       return loginUgi
@@ -1868,20 +1870,15 @@ public class DataNode extends ReconfigurableBase
     checkBlockToken(block, token, BlockTokenIdentifier.AccessMode.READ);
     Preconditions.checkNotNull(data, "Storage not yet initialized");
     BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
-    if (LOG.isDebugEnabled()) {
-      if (info != null) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("getBlockLocalPathInfo successful block=" + block
-              + " blockfile " + info.getBlockPath() + " metafile "
-              + info.getMetaPath());
-        }
-      } else {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("getBlockLocalPathInfo for block=" + block
-              + " returning null");
-        }
-      }
+    if (info != null) {
+      LOG.trace("getBlockLocalPathInfo successful " +
+          "block={} blockfile {} metafile {}",
+          block, info.getBlockPath(), info.getMetaPath());
+    } else {
+      LOG.trace("getBlockLocalPathInfo for block={} " +
+          "returning null", block);
     }
+
     metrics.incrBlocksGetLocalPathInfo();
     return info;
   }
@@ -1939,9 +1936,7 @@ public class DataNode extends ReconfigurableBase
       ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
       DataInputStream in = new DataInputStream(buf);
       id.readFields(in);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Got: " + id.toString());
-      }
+      LOG.debug("Got: {}", id);
       blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode,
           null, null);
     }
@@ -1959,9 +1954,9 @@ public class DataNode extends ReconfigurableBase
       for (ServicePlugin p : plugins) {
         try {
           p.stop();
-          LOG.info("Stopped plug-in " + p);
+          LOG.info("Stopped plug-in {}", p);
         } catch (Throwable t) {
-          LOG.warn("ServicePlugin " + p + " could not be stopped", t);
+          LOG.warn("ServicePlugin {} could not be stopped", p, t);
         }
       }
     }
@@ -1984,7 +1979,7 @@ public class DataNode extends ReconfigurableBase
         this.dataXceiverServer.interrupt();
       } catch (Exception e) {
         // Ignore, since the out of band messaging is advisory.
-        LOG.trace("Exception interrupting DataXceiverServer: ", e);
+        LOG.trace("Exception interrupting DataXceiverServer", e);
       }
     }
 
@@ -2038,7 +2033,7 @@ public class DataNode extends ReconfigurableBase
           this.threadGroup.interrupt();
           break;
         }
-        LOG.info("Waiting for threadgroup to exit, active threads is " +
+        LOG.info("Waiting for threadgroup to exit, active threads is {}",
                  this.threadGroup.activeCount());
         if (this.threadGroup.activeCount() == 0) {
           break;
@@ -2085,7 +2080,7 @@ public class DataNode extends ReconfigurableBase
       try {
         this.blockPoolManager.shutDownAll(bposArray);
       } catch (InterruptedException ie) {
-        LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie);
+        LOG.warn("Received exception in BlockPoolManager#shutDownAll", ie);
       }
     }
     
@@ -2093,7 +2088,7 @@ public class DataNode extends ReconfigurableBase
       try {
         this.storage.unlockAll();
       } catch (IOException ie) {
-        LOG.warn("Exception when unlocking storage: " + ie, ie);
+        LOG.warn("Exception when unlocking storage", ie);
       }
     }
     if (data != null) {
@@ -2140,8 +2135,8 @@ public class DataNode extends ReconfigurableBase
 
   private void handleDiskError(String failedVolumes) {
     final boolean hasEnoughResources = data.hasEnoughResource();
-    LOG.warn("DataNode.handleDiskError on : [" + failedVolumes +
-        "] Keep Running: " + hasEnoughResources);
+    LOG.warn("DataNode.handleDiskError on: " +
+        "[{}] Keep Running: {}", failedVolumes, hasEnoughResources);
     
     // If we have enough active valid volumes then we do not want to 
     // shutdown the DN completely.
@@ -2438,15 +2433,13 @@ public class DataNode extends ReconfigurableBase
         String[] targetStorageIds, ExtendedBlock b,
         BlockConstructionStage stage, final String clientname) {
       if (DataTransferProtocol.LOG.isDebugEnabled()) {
-        DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
-            + b + " (numBytes=" + b.getNumBytes() + ")"
-            + ", stage=" + stage
-            + ", clientname=" + clientname
-            + ", targets=" + Arrays.asList(targets)
-            + ", target storage types=" + (targetStorageTypes == null ? "[]" :
-            Arrays.asList(targetStorageTypes))
-            + ", target storage IDs=" + (targetStorageIds == null ? "[]" :
-            Arrays.asList(targetStorageIds)));
+        DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " +
+                "clientname={}, targets={}, target storage types={}, " +
+                "target storage IDs={}", getClass().getSimpleName(), b,
+            b.getNumBytes(), stage, clientname, Arrays.asList(targets),
+            targetStorageTypes == null ? "[]" :
+                Arrays.asList(targetStorageTypes),
+            targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds));
       }
       this.targets = targets;
       this.targetStorageTypes = targetStorageTypes;
@@ -2475,9 +2468,7 @@ public class DataNode extends ReconfigurableBase
       try {
         final String dnAddr = targets[0].getXferAddr(connectToDnViaHostname);
         InetSocketAddress curTarget = NetUtils.createSocketAddr(dnAddr);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Connecting to datanode " + dnAddr);
-        }
+        LOG.debug("Connecting to datanode {}", dnAddr);
         sock = newSocket();
         NetUtils.connect(sock, curTarget, dnConf.socketTimeout);
         sock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
@@ -2521,17 +2512,15 @@ public class DataNode extends ReconfigurableBase
         blockSender.sendBlock(out, unbufOut, null);
 
         // no response necessary
-        LOG.info(getClass().getSimpleName() + ", at "
-            + DataNode.this.getDisplayName() + ": Transmitted " + b
-            + " (numBytes=" + b.getNumBytes() + ") to " + curTarget);
+        LOG.info("{}, at {}: Transmitted {} (numBytes={}) to {}",
+            getClass().getSimpleName(), DataNode.this.getDisplayName(),
+            b, b.getNumBytes(), curTarget);
 
         // read ack
         if (isClient) {
           DNTransferAckProto closeAck = DNTransferAckProto.parseFrom(
               PBHelperClient.vintPrefixed(in));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(getClass().getSimpleName() + ": close-ack=" + closeAck);
-          }
+          LOG.debug("{}: close-ack={}", getClass().getSimpleName(), closeAck);
           if (closeAck.getStatus() != Status.SUCCESS) {
             if (closeAck.getStatus() == Status.ERROR_ACCESS_TOKEN) {
               throw new InvalidBlockTokenException(
@@ -2550,17 +2539,11 @@ public class DataNode extends ReconfigurableBase
           // Add the block to the front of the scanning queue if metadata file
           // is corrupt. We already add the block to front of scanner if the
           // peer disconnects.
-          LOG.info("Adding block: " + b + " for scanning");
+          LOG.info("Adding block: {} for scanning", b);
           blockScanner.markSuspectBlock(data.getVolume(b).getStorageID(), b);
         }
-        LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
-            targets[0] + " got ", ie);
-        // disk check moved to FileIoProvider
-        IOException cause = DatanodeUtil.getCauseIfDiskError(ie);
-        if (cause != null) { // possible disk error
-          LOG.warn("IOException in DataTransfer#run() "+ ie.getMessage() +". "
-                  + "Cause is ", cause);
-        }
+        LOG.warn("{}:Failed to transfer {} to {} got",
+            bpReg, b, targets[0], ie);
       } finally {
         decrementXmitsInProgress();
         IOUtils.closeStream(blockSender);
@@ -2691,14 +2674,9 @@ public class DataNode extends ReconfigurableBase
       final StorageLocation location;
       try {
         location = StorageLocation.parse(locationString);
-      } catch (IOException ioe) {
-        LOG.error("Failed to initialize storage directory " + locationString
-            + ". Exception details: " + ioe);
-        // Ignore the exception.
-        continue;
-      } catch (SecurityException se) {
-        LOG.error("Failed to initialize storage directory " + locationString
-                     + ". Exception details: " + se);
+      } catch (IOException | SecurityException ioe) {
+        LOG.error("Failed to initialize storage directory {}." +
+            "Exception details: {}", locationString, ioe.toString());
         // Ignore the exception.
         continue;
       }
@@ -2745,7 +2723,7 @@ public class DataNode extends ReconfigurableBase
           wait(2000);
         }
       } catch (InterruptedException ex) {
-        LOG.warn("Received exception in Datanode#join: " + ex);
+        LOG.warn("Received exception in Datanode#join: {}", ex.toString());
       }
     }
   }
@@ -2950,9 +2928,7 @@ public class DataNode extends ReconfigurableBase
       }
       for (TokenIdentifier tokenId : tokenIds) {
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Got: " + id.toString());
-        }
+        LOG.debug("Got: {}", id);
         blockPoolTokenSecretManager.checkAccess(id, null, block,
             BlockTokenIdentifier.AccessMode.READ, null, null);
       }
@@ -3165,11 +3141,11 @@ public class DataNode extends ReconfigurableBase
   public void deleteBlockPool(String blockPoolId, boolean force)
       throws IOException {
     checkSuperuserPrivilege();
-    LOG.info("deleteBlockPool command received for block pool " + blockPoolId
-        + ", force=" + force);
+    LOG.info("deleteBlockPool command received for block pool {}, " +
+        "force={}", blockPoolId, force);
     if (blockPoolManager.get(blockPoolId) != null) {
-      LOG.warn("The block pool "+blockPoolId+
-          " is still running, cannot be deleted.");
+      LOG.warn("The block pool {} is still running, cannot be deleted.",
+          blockPoolId);
       throw new IOException(
           "The block pool is still running. First do a refreshNamenodes to " +
           "shutdown the block pool service");
@@ -3181,8 +3157,8 @@ public class DataNode extends ReconfigurableBase
   @Override // ClientDatanodeProtocol
   public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException {
     checkSuperuserPrivilege();
-    LOG.info("shutdownDatanode command received (upgrade=" + forUpgrade +
-        "). Shutting down Datanode...");
+    LOG.info("shutdownDatanode command received (upgrade={}). " +
+        "Shutting down Datanode...", forUpgrade);
 
     // Shutdown can be called only once.
     if (shutdownInProgress) {
@@ -3381,12 +3357,9 @@ public class DataNode extends ReconfigurableBase
       // Remove all unhealthy volumes from DataNode.
       removeVolumes(unhealthyLocations, false);
     } catch (IOException e) {
-      LOG.warn("Error occurred when removing unhealthy storage dirs: "
-          + e.getMessage(), e);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(sb.toString());
+      LOG.warn("Error occurred when removing unhealthy storage dirs", e);
     }
+    LOG.debug("{}", sb);
       // send blockreport regarding volume failure
     handleDiskError(sb.toString());
   }
@@ -3568,7 +3541,7 @@ public class DataNode extends ReconfigurableBase
     case DiskBalancerConstants.DISKBALANCER_BANDWIDTH :
       return Long.toString(this.diskBalancer.getBandwidth());
     default:
-      LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: " +
+      LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: {}",
           key);
       throw new DiskBalancerException("Unknown key",
           DiskBalancerException.Result.UNKNOWN_KEY);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org