You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2016/10/20 00:25:02 UTC

[2/3] hadoop git commit: HDFS-10752. Several log refactoring/improvement suggestion in HDFS. Contributed by Hanisha Koneru.

HDFS-10752. Several log refactoring/improvement suggestion in HDFS. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3cbaf0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3cbaf0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3cbaf0c

Branch: refs/heads/branch-2
Commit: a3cbaf0c046285104d805fa9430f86eccc290e3b
Parents: ad7d3c4
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Oct 19 17:20:07 2016 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Oct 19 17:20:15 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java      | 16 ++++++++--------
 .../server/blockmanagement/CorruptReplicasMap.java  |  4 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java       | 10 +++++-----
 3 files changed, 15 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3cbaf0c/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
index 7bf93ad..e26fac5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
@@ -69,7 +69,7 @@ class OpenFileCtxCache {
     Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet()
         .iterator();
     if (LOG.isTraceEnabled()) {
-      LOG.trace("openFileMap size:" + openFileMap.size());
+      LOG.trace("openFileMap size:" + size());
     }
 
     Entry<FileHandle, OpenFileCtx> idlest = null;
@@ -117,10 +117,10 @@ class OpenFileCtxCache {
   boolean put(FileHandle h, OpenFileCtx context) {
     OpenFileCtx toEvict = null;
     synchronized (this) {
-      Preconditions.checkState(openFileMap.size() <= this.maxStreams,
-          "stream cache size " + openFileMap.size()
-              + "  is larger than maximum" + this.maxStreams);
-      if (openFileMap.size() == this.maxStreams) {
+      Preconditions.checkState(size() <= this.maxStreams,
+          "stream cache size " + size() + "  is larger than maximum" + this
+              .maxStreams);
+      if (size() == this.maxStreams) {
         Entry<FileHandle, OpenFileCtx> pairs = getEntryToEvict();
         if (pairs ==null) {
           return false;
@@ -149,7 +149,7 @@ class OpenFileCtxCache {
     Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet()
         .iterator();
     if (LOG.isTraceEnabled()) {
-      LOG.trace("openFileMap size:" + openFileMap.size());
+      LOG.trace("openFileMap size:" + size());
     }
 
     while (it.hasNext()) {
@@ -168,7 +168,7 @@ class OpenFileCtxCache {
             openFileMap.remove(handle);
             if (LOG.isDebugEnabled()) {
               LOG.debug("After remove stream " + handle.getFileId()
-                  + ", the stream number:" + openFileMap.size());
+                  + ", the stream number:" + size());
             }
             ctxToRemove.add(ctx2);
           }
@@ -201,7 +201,7 @@ class OpenFileCtxCache {
       Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet()
           .iterator();
       if (LOG.isTraceEnabled()) {
-        LOG.trace("openFileMap size:" + openFileMap.size());
+        LOG.trace("openFileMap size:" + size());
       }
 
       while (it.hasNext()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3cbaf0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index 35468da..8a097a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -84,12 +84,12 @@ public class CorruptReplicasMap{
     if (!nodes.keySet().contains(dn)) {
       NameNode.blockStateChangeLog.debug(
           "BLOCK NameSystem.addToCorruptReplicasMap: {} added as corrupt on "
-              + "{} by {} {}", blk.getBlockName(), dn, Server.getRemoteIp(),
+              + "{} by {} {}", blk, dn, Server.getRemoteIp(),
           reasonText);
     } else {
       NameNode.blockStateChangeLog.debug(
           "BLOCK NameSystem.addToCorruptReplicasMap: duplicate requested for" +
-              " {} to add as corrupt on {} by {} {}", blk.getBlockName(), dn,
+              " {} to add as corrupt on {} by {} {}", blk, dn,
               Server.getRemoteIp(), reasonText);
     }
     // Add the node or update the reason.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3cbaf0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 14646c1..6521712 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -667,7 +667,7 @@ public class NameNode extends ReconfigurableBase implements
 
   NamenodeRegistration setRegistration() {
     nodeRegistration = new NamenodeRegistration(
-        NetUtils.getHostPortString(rpcServer.getRpcAddress()),
+        NetUtils.getHostPortString(getNameNodeAddress()),
         NetUtils.getHostPortString(getHttpAddress()),
         getFSImage().getStorage(), getRole());
     return nodeRegistration;
@@ -730,7 +730,7 @@ public class NameNode extends ReconfigurableBase implements
       // This is expected for MiniDFSCluster. Set it now using 
       // the RPC server's bind address.
       clientNamenodeAddress = 
-          NetUtils.getHostPortString(rpcServer.getRpcAddress());
+          NetUtils.getHostPortString(getNameNodeAddress());
       LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
           + " this namenode/service.");
     }
@@ -817,7 +817,7 @@ public class NameNode extends ReconfigurableBase implements
         LOG.warn("ServicePlugin " + p + " could not be started", t);
       }
     }
-    LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
+    LOG.info(getRole() + " RPC up at: " + getNameNodeAddress());
     if (rpcServer.getServiceRpcAddress() != null) {
       LOG.info(getRole() + " service RPC up at: "
           + rpcServer.getServiceRpcAddress());
@@ -1050,7 +1050,7 @@ public class NameNode extends ReconfigurableBase implements
    * @return NameNode RPC address in "host:port" string form
    */
   public String getNameNodeAddressHostPortString() {
-    return NetUtils.getHostPortString(rpcServer.getRpcAddress());
+    return NetUtils.getHostPortString(getNameNodeAddress());
   }
 
   /**
@@ -1059,7 +1059,7 @@ public class NameNode extends ReconfigurableBase implements
    */
   public InetSocketAddress getServiceRpcAddress() {
     final InetSocketAddress serviceAddr = rpcServer.getServiceRpcAddress();
-    return serviceAddr == null ? rpcServer.getRpcAddress() : serviceAddr;
+    return serviceAddr == null ? getNameNodeAddress() : serviceAddr;
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org