You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ra...@apache.org on 2008/03/17 18:52:52 UTC
svn commit: r637995 - in /hadoop/core/trunk: CHANGES.txt
src/java/org/apache/hadoop/dfs/DataNode.java
Author: rangadi
Date: Mon Mar 17 10:52:48 2008
New Revision: 637995
URL: http://svn.apache.org/viewvc?rev=637995&view=rev
Log:
HADOOP-3006. Fix wrong packet size reported by DataNode when a block is being replicated. (rangadi)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=637995&r1=637994&r2=637995&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Mar 17 10:52:48 2008
@@ -260,6 +260,9 @@
HADOOP-3008. SocketIOWithTimeout throws InterruptedIOException if the
thread is interrupted while it is waiting. (rangadi)
+ HADOOP-3006. Fix wrong packet size reported by DataNode when a block
+ is being replicated. (rangadi)
+
Release 0.16.1 - 2008-03-13
INCOMPATIBLE CHANGES
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?rev=637995&r1=637994&r2=637995&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Mon Mar 17 10:52:48 2008
@@ -2327,11 +2327,32 @@
" offsetInBlock " + offsetInBlock +
" lastPacketInBlock " + lastPacketInBlock);
setBlockPosition(offsetInBlock);
+
+ int len = in.readInt();
+ curPacketSize += 4; // read an integer in previous line
// send packet header to next datanode in pipeline
if (mirrorOut != null) {
try {
- mirrorOut.writeInt(packetSize);
+ int mirrorPacketSize = packetSize;
+ if (len > bytesPerChecksum) {
+ /*
+ * This is a packet with non-interleaved checksum.
+ * But we are sending interleaving checksums to mirror,
+ * which changes packet len. Adjust the packet size for mirror.
+ *
+ * As mentioned above, this is mismatch is
+ * temporary till HADOOP-1702.
+ */
+
+ //find out how many chunks are in this patcket :
+ int chunksInPkt = (len + bytesPerChecksum - 1)/bytesPerChecksum;
+
+ // we send 4 more bytes for for each of the extra
+ // checksum chunks. so :
+ mirrorPacketSize += (chunksInPkt - 1) * 4;
+ }
+ mirrorOut.writeInt(mirrorPacketSize);
mirrorOut.writeLong(offsetInBlock);
mirrorOut.writeLong(seqno);
mirrorOut.writeBoolean(lastPacketInBlock);
@@ -2357,9 +2378,6 @@
}
}
- int len = in.readInt();
- curPacketSize += 4; // read an integer in previous line
-
if (len == 0) {
LOG.info("Receiving empty packet for block " + block);
if (mirrorOut != null) {
@@ -2388,16 +2406,17 @@
int toRecv = Math.min(len, bytesPerChecksum);
- receiveChunk(toRecv, checksumBuf, checksumOff);
-
- len -= toRecv;
- checksumOff += checksumSize;
curPacketSize += (toRecv + checksumSize);
if (curPacketSize > packetSize) {
throw new IOException("Packet size for block " + block +
" too long " + curPacketSize +
" was expecting " + packetSize);
}
+
+ receiveChunk(toRecv, checksumBuf, checksumOff);
+
+ len -= toRecv;
+ checksumOff += checksumSize;
}
if (curPacketSize == packetSize) {