You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/09/30 17:41:51 UTC

[51/58] [abbrv] hadoop git commit: Revert "HDFS-9170. Move libhdfs / fuse-dfs / libwebhdfs to hdfs-client. Contributed by Haohui Mai."

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index e122748..e245d2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -233,12 +233,16 @@ public class WebHdfsFileSystem extends FileSystem
       // refetch tokens.  even if ugi has credentials, don't attempt
       // to get another token to match hdfs/rpc behavior
       if (token != null) {
-        LOG.debug("Using UGI token: {}", token);
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("Using UGI token: {}", token);
+        }
         canRefreshDelegationToken = false;
       } else {
         token = getDelegationToken(null);
         if (token != null) {
-          LOG.debug("Fetched new token: {}", token);
+          if(LOG.isDebugEnabled()) {
+            LOG.debug("Fetched new token: {}", token);
+          }
         } else { // security is disabled
           canRefreshDelegationToken = false;
         }
@@ -253,7 +257,9 @@ public class WebHdfsFileSystem extends FileSystem
     boolean replaced = false;
     if (canRefreshDelegationToken) {
       Token<?> token = getDelegationToken(null);
-      LOG.debug("Replaced expired token: {}", token);
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Replaced expired token: {}", token);
+      }
       setDelegationToken(token);
       replaced = (token != null);
     }
@@ -436,7 +442,9 @@ public class WebHdfsFileSystem extends FileSystem
     InetSocketAddress nnAddr = getCurrentNNAddr();
     final URL url = new URL(getTransportScheme(), nnAddr.getHostName(),
           nnAddr.getPort(), path + '?' + query);
-    LOG.trace("url={}", url);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("url={}", url);
+    }
     return url;
   }
 
@@ -471,7 +479,9 @@ public class WebHdfsFileSystem extends FileSystem
         + Param.toSortedString("&", getAuthParameters(op))
         + Param.toSortedString("&", parameters);
     final URL url = getNamenodeURL(path, query);
-    LOG.trace("url={}", url);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("url={}", url);
+    }
     return url;
   }
 
@@ -759,7 +769,9 @@ public class WebHdfsFileSystem extends FileSystem
       } catch (Exception e) { // catch json parser errors
         final IOException ioe =
             new IOException("Response decoding failure: "+e.toString(), e);
-        LOG.debug("Response decoding failure.", e);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Response decoding failure: {}", e.toString(), e);
+        }
         throw ioe;
       } finally {
         conn.disconnect();
@@ -1230,7 +1242,9 @@ public class WebHdfsFileSystem extends FileSystem
         cancelDelegationToken(delegationToken);
       }
     } catch (IOException ioe) {
-      LOG.debug("Token cancel failed: ", ioe);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Token cancel failed: ", ioe);
+      }
     } finally {
       super.close();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ee0539e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dfd0b57..7b62b97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1000,9 +1000,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-8696. Make the lower and higher watermark in the DN Netty server
     configurable. (Xiaobing Zhou via wheat9)
 
-    HDFS-8971. Remove guards when calling LOG.debug() and LOG.trace() in client
-    package. (Mingliang Liu via wheat9)
-
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than