You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by wa...@apache.org on 2014/10/30 19:03:36 UTC

git commit: HDFS-3342. SocketTimeoutException in BlockSender.sendChunks could have a better error message. Contributed by Yongjun Zhang.

Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2b5baa671 -> ec19c85e6


HDFS-3342. SocketTimeoutException in BlockSender.sendChunks could have a better error message. Contributed by Yongjun Zhang.

(cherry picked from commit 16c0f04c50822a39610e34e0715d3d4a44be5a5f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec19c85e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec19c85e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec19c85e

Branch: refs/heads/branch-2
Commit: ec19c85e65cfcab4104aacf419f38f0beabe0af2
Parents: 2b5baa6
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Oct 30 11:03:13 2014 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu Oct 30 11:03:30 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt         |  3 +++
 .../hadoop/hdfs/server/datanode/BlockSender.java    |  7 ++-----
 .../hadoop/hdfs/server/datanode/DataXceiver.java    | 16 ++++++++++++++--
 3 files changed, 19 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec19c85e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ef95121..dccfde7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -67,6 +67,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7280. Use netty 4 in WebImageViewer. (wheat9)
 
+    HDFS-3342. SocketTimeoutException in BlockSender.sendChunks could
+    have a better error message. (Yongjun Zhang via wang)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec19c85e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index ce0e1d5..27d3e5c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -574,12 +574,9 @@ class BlockSender implements java.io.Closeable {
          * writing to client timed out.  This happens if the client reads
          * part of a block and then decides not to read the rest (but leaves
          * the socket open).
+         * 
+         * Reporting of this case is done in DataXceiver#run
          */
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Failed to send data:", e);
-        } else {
-          LOG.info("Failed to send data: " + e);
-        }
       } else {
         /* Exception while writing to the client. Connection closure from
          * the other end is mostly the case and we do not care much about

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec19c85e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index 6fc819d..9dfd33b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -39,6 +39,7 @@ import java.io.OutputStream;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.net.SocketException;
+import java.net.SocketTimeoutException;
 import java.nio.channels.ClosedChannelException;
 import java.security.MessageDigest;
 import java.util.Arrays;
@@ -240,6 +241,15 @@ class DataXceiver extends Receiver implements Runnable {
         } else {
           LOG.info(s + "; " + t);
         }
+      } else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
+        String s1 =
+            "Likely the client has stopped reading, disconnecting it";
+        s1 += " (" + s + ")";
+        if (LOG.isTraceEnabled()) {
+          LOG.trace(s1, t);
+        } else {
+          LOG.info(s1 + "; " + t);          
+        }
       } else {
         LOG.error(s, t);
       }
@@ -520,9 +530,11 @@ class DataXceiver extends Receiver implements Runnable {
       /* What exactly should we do here?
        * Earlier version shutdown() datanode if there is disk error.
        */
-      LOG.warn(dnR + ":Got exception while serving " + block + " to "
+      if (!(ioe instanceof SocketTimeoutException)) {
+        LOG.warn(dnR + ":Got exception while serving " + block + " to "
           + remoteAddress, ioe);
-      datanode.metrics.incrDatanodeNetworkErrors();
+        datanode.metrics.incrDatanodeNetworkErrors();
+      }
       throw ioe;
     } finally {
       IOUtils.closeStream(blockSender);