You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cm...@apache.org on 2015/07/14 19:58:47 UTC

hadoop git commit: HDFS-7608: hdfs dfsclient newConnectedPeer has no write timeout (Xiaoyu Yao via Colin P. McCabe)

Repository: hadoop
Updated Branches:
  refs/heads/trunk 4084eaf94 -> 1d74ccece


HDFS-7608: hdfs dfsclient newConnectedPeer has no write timeout (Xiaoyu Yao via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d74ccec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d74ccec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d74ccec

Branch: refs/heads/trunk
Commit: 1d74ccececaefffaa90c0c18b40a3645dbc819d9
Parents: 4084eaf
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue Jul 14 10:57:59 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Tue Jul 14 10:57:59 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  1 +
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 43 +++++++++++++++++---
 2 files changed, 38 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d74ccec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 4923a50..6629a83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3127,6 +3127,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
       peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
           blockToken, datanodeId);
       peer.setReadTimeout(socketTimeout);
+      peer.setWriteTimeout(socketTimeout);
       success = true;
       return peer;
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d74ccec/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index f9da472..0b77210 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -1132,10 +1132,9 @@ public class TestDistributedFileSystem {
       cluster.shutdown();
     }
   }
-  
-  
+
   @Test(timeout=10000)
-  public void testDFSClientPeerTimeout() throws IOException {
+  public void testDFSClientPeerReadTimeout() throws IOException {
     final int timeout = 1000;
     final Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
@@ -1152,11 +1151,11 @@ public class TestDistributedFileSystem {
       long start = Time.now();
       try {
         peer.getInputStream().read();
-        Assert.fail("should timeout");
+        Assert.fail("read should timeout");
       } catch (SocketTimeoutException ste) {
         long delta = Time.now() - start;
-        Assert.assertTrue("timedout too soon", delta >= timeout*0.9);
-        Assert.assertTrue("timedout too late", delta <= timeout*1.1);
+        Assert.assertTrue("read timedout too soon", delta >= timeout*0.9);
+        Assert.assertTrue("read timedout too late", delta <= timeout*1.1);
       } catch (Throwable t) {
         Assert.fail("wrong exception:"+t);
       }
@@ -1178,4 +1177,36 @@ public class TestDistributedFileSystem {
       cluster.shutdown();
     }
   }
+
+  @Test(timeout=10000)
+  public void testDFSClientPeerWriteTimeout() throws IOException {
+    final int timeout = 1000;
+    final Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
+
+    // only need cluster to create a dfs client to get a peer
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    try {
+      cluster.waitActive();
+      DistributedFileSystem dfs = cluster.getFileSystem();
+      // Write 1 MB to a dummy socket to ensure the write times out
+      ServerSocket socket = new ServerSocket(0);
+      Peer peer = dfs.getClient().newConnectedPeer(
+        (InetSocketAddress) socket.getLocalSocketAddress(), null, null);
+      long start = Time.now();
+      try {
+        byte[] buf = new byte[1024 * 1024];
+        peer.getOutputStream().write(buf);
+        Assert.fail("write should timeout");
+      } catch (SocketTimeoutException ste) {
+        long delta = Time.now() - start;
+        Assert.assertTrue("write timedout too soon", delta >= timeout * 0.9);
+        Assert.assertTrue("write timedout too late", delta <= timeout * 1.1);
+      } catch (Throwable t) {
+        Assert.fail("wrong exception:" + t);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }