You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by el...@apache.org on 2011/12/07 19:06:52 UTC

svn commit: r1211577 - in /hadoop/common/branches/branch-1: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

Author: eli
Date: Wed Dec  7 18:06:52 2011
New Revision: 1211577

URL: http://svn.apache.org/viewvc?rev=1211577&view=rev
Log:
HDFS-2637. The rpc timeout for block recovery is too low. (eli)

Modified:
    hadoop/common/branches/branch-1/CHANGES.txt
    hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1211577&r1=1211576&r2=1211577&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Wed Dec  7 18:06:52 2011
@@ -58,6 +58,8 @@ Release 1.1.0 - unreleased
     HADOOP-7879. DistributedFileSystem#createNonRecursive should also
     incrementWriteOps statistics. (Jon Hsieh via todd)
 
+    HDFS-2637. The rpc timeout for block recovery is too low. (eli)
+
   IMPROVEMENTS
 
     MAPREDUCE-3008. [Gridmix] Improve cumulative CPU usage emulation for 

Modified: hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java?rev=1211577&r1=1211576&r2=1211577&view=diff
==============================================================================
--- hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/branches/branch-1/src/hdfs/org/apache/hadoop/hdfs/DFSClient.java Wed Dec  7 18:06:52 2011
@@ -3114,7 +3114,11 @@ public class DFSClient implements FSCons
         try {
           // Pick the "least" datanode as the primary datanode to avoid deadlock.
           primaryNode = Collections.min(Arrays.asList(newnodes));
-          primary = createClientDatanodeProtocolProxy(primaryNode, conf, block, accessToken, socketTimeout);
+          // Set the timeout to reflect that recovery requires at most two rpcs
+          // to each DN and two rpcs to the NN.
+          int recoveryTimeout = (newnodes.length * 2 + 2) * socketTimeout;
+          primary = createClientDatanodeProtocolProxy(primaryNode, conf, block,
+              accessToken, recoveryTimeout);
           newBlock = primary.recoverBlock(block, isAppend, newnodes);
         } catch (IOException e) {
           LOG.warn("Failed recovery attempt #" + recoveryErrorCount +