You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2011/06/02 01:02:18 UTC

svn commit: r1130339 - in /hadoop/hdfs/trunk: CHANGES.txt src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java

Author: szetszwo
Date: Wed Jun  1 23:02:17 2011
New Revision: 1130339

URL: http://svn.apache.org/viewvc?rev=1130339&view=rev
Log:
HDFS-2021. Update numBytesAcked before sending the ack in PacketResponder.  Contributed by John George

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1130339&r1=1130338&r2=1130339&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Wed Jun  1 23:02:17 2011
@@ -653,6 +653,9 @@ Trunk (unreleased changes)
     HDFS-1936. Layout version change from HDFS-1822 causes upgrade failure.
     (suresh)
 
+    HDFS-2021. Update numBytesAcked before sending the ack in PacketResponder.
+    (John George via szetszwo)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1130339&r1=1130338&r2=1130339&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Wed Jun  1 23:02:17 2011
@@ -1009,6 +1009,10 @@ class BlockReceiver implements Closeable
             }
             PipelineAck replyAck = new PipelineAck(expected, replies);
             
+            if (replyAck.isSuccess() && 
+                 pkt.offsetInBlock > replicaInfo.getBytesAcked())
+                replicaInfo.setBytesAcked(pkt.offsetInBlock);
+
             // send my ack back to upstream datanode
             replyAck.write(upstreamOut);
             upstreamOut.flush();
@@ -1019,10 +1023,6 @@ class BlockReceiver implements Closeable
               // remove the packet from the ack queue
               removeAckHead();
               // update bytes acked
-              if (replyAck.isSuccess() && 
-                  pkt.offsetInBlock > replicaInfo.getBytesAcked()) {
-                replicaInfo.setBytesAcked(pkt.offsetInBlock);
-              }
             }
         } catch (IOException e) {
           LOG.warn("IOException in BlockReceiver.run(): ", e);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java?rev=1130339&r1=1130338&r2=1130339&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java Wed Jun  1 23:02:17 2011
@@ -42,8 +42,8 @@ import org.junit.Test;
 public class TestWriteRead {
   
   // junit test settings
-  private static final int WR_NTIMES = 4;
-  private static final int WR_CHUNK_SIZE = 1000;
+  private static final int WR_NTIMES = 350;
+  private static final int WR_CHUNK_SIZE = 10000;
 
   
   private static final int BUFFER_SIZE = 8192  * 100;