You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2010/09/21 01:49:35 UTC

svn commit: r999162 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/balancer/ src/java/org/apache/hadoop/hdfs/server/common/ src/java/org/apache/hadoop/hdfs/s...

Author: suresh
Date: Mon Sep 20 23:49:35 2010
New Revision: 999162

URL: http://svn.apache.org/viewvc?rev=999162&view=rev
Log:
HDFS-1407 Change DataTransferProtocol methods to use Block instead of individual elements of Block. 
Contributed by Suresh Srinivas.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/Block.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Mon Sep 20 23:49:35 2010
@@ -133,6 +133,9 @@ Trunk (unreleased changes)
 
     HDFS-1093. Change the FSNamesystem lock to a read/write lock. (dhruba)
 
+    HDFS-1407. Change DataTransferProtocol methods to use Block instead 
+    of individual elements of Block. (suresh)
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/BlockReader.java Mon Sep 20 23:49:35 2010
@@ -27,6 +27,7 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.nio.ByteBuffer;
 
@@ -34,6 +35,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
@@ -320,6 +322,7 @@ public class BlockReader extends FSInput
                        long startOffset, long firstChunkOffset,
                        long bytesToRead,
                        Socket dnSock ) {
+    // Path is used only for printing block and file information in debug
     super(new Path("/blk_" + blockId + ":of:" + file)/*too non path-like?*/,
           1, verifyChecksum,
           checksum.getChecksumSize() > 0? checksum : null, 
@@ -346,27 +349,26 @@ public class BlockReader extends FSInput
   }
 
   public static BlockReader newBlockReader(Socket sock, String file,
-      long blockId, Token<BlockTokenIdentifier> blockToken, long genStamp,
+      Block block, Token<BlockTokenIdentifier> blockToken, 
       long startOffset, long len, int bufferSize) throws IOException {
-    return newBlockReader(sock, file, blockId, blockToken, genStamp, startOffset, len, bufferSize,
+    return newBlockReader(sock, file, block, blockToken, startOffset, len, bufferSize,
         true);
   }
 
   /** Java Doc required */
-  public static BlockReader newBlockReader( Socket sock, String file, long blockId, 
+  public static BlockReader newBlockReader( Socket sock, String file, 
+                                     Block block, 
                                      Token<BlockTokenIdentifier> blockToken,
-                                     long genStamp,
                                      long startOffset, long len,
                                      int bufferSize, boolean verifyChecksum)
                                      throws IOException {
-    return newBlockReader(sock, file, blockId, blockToken, genStamp, startOffset,
+    return newBlockReader(sock, file, block, blockToken, startOffset,
                           len, bufferSize, verifyChecksum, "");
   }
 
   public static BlockReader newBlockReader( Socket sock, String file,
-                                     long blockId, 
+                                     Block block, 
                                      Token<BlockTokenIdentifier> blockToken,
-                                     long genStamp,
                                      long startOffset, long len,
                                      int bufferSize, boolean verifyChecksum,
                                      String clientName)
@@ -375,7 +377,7 @@ public class BlockReader extends FSInput
     DataTransferProtocol.Sender.opReadBlock(
         new DataOutputStream(new BufferedOutputStream(
             NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT))),
-        blockId, genStamp, startOffset, len, clientName, blockToken);
+        block, startOffset, len, clientName, blockToken);
     
     //
     // Get bytes in block, set streams
@@ -392,12 +394,14 @@ public class BlockReader extends FSInput
             "Got access token error for OP_READ_BLOCK, self="
                 + sock.getLocalSocketAddress() + ", remote="
                 + sock.getRemoteSocketAddress() + ", for file " + file
-                + ", for block " + blockId + "_" + genStamp);
+                + ", for block " + block.getBlockId() 
+                + "_" + block.getGenerationStamp());
       } else {
         throw new IOException("Got error for OP_READ_BLOCK, self="
             + sock.getLocalSocketAddress() + ", remote="
             + sock.getRemoteSocketAddress() + ", for file " + file
-            + ", for block " + blockId + "_" + genStamp);
+            + ", for block " + block.getBlockId() + "_" 
+            + block.getGenerationStamp());
       }
     }
     DataChecksum checksum = DataChecksum.newDataChecksum( in );
@@ -413,9 +417,8 @@ public class BlockReader extends FSInput
                             startOffset + " for file " + file);
     }
 
-    return new BlockReader( file, blockId, in, checksum, verifyChecksum,
-                            startOffset, firstChunkOffset, len,
-                            sock );
+    return new BlockReader(file, block.getBlockId(), in, checksum,
+        verifyChecksum, startOffset, firstChunkOffset, len, sock);
   }
 
   @Override
@@ -449,4 +452,10 @@ public class BlockReader extends FSInput
       }
     }
   }
+  
+  // File name to print when accessing a block directory from servlets
+  public static String getFileName(final InetSocketAddress s,
+      final long blockId) {
+    return s.toString() + ":" + blockId;
+  }
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSClient.java Mon Sep 20 23:49:35 2010
@@ -968,8 +968,8 @@ public class DFSClient implements FSCons
                 + BLOCK_CHECKSUM + ", block=" + block);
           }
           // get block MD5
-          DataTransferProtocol.Sender.opBlockChecksum(out, block.getBlockId(),
-              block.getGenerationStamp(), lb.getBlockToken());
+          DataTransferProtocol.Sender.opBlockChecksum(out, block,
+              lb.getBlockToken());
 
           final DataTransferProtocol.Status reply = DataTransferProtocol.Status.read(in);
           if (reply != SUCCESS) {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSInputStream.java Mon Sep 20 23:49:35 2010
@@ -387,9 +387,8 @@ public class DFSInputStream extends FSIn
         Block blk = targetBlock.getBlock();
         Token<BlockTokenIdentifier> accessToken = targetBlock.getBlockToken();
         
-        blockReader = BlockReader.newBlockReader(s, src, blk.getBlockId(), 
+        blockReader = BlockReader.newBlockReader(s, src, blk, 
             accessToken, 
-            blk.getGenerationStamp(),
             offsetIntoBlock, blk.getNumBytes() - offsetIntoBlock,
             buffersize, verifyChecksum, dfsClient.clientName);
         return chosenNode;
@@ -630,9 +629,8 @@ public class DFSInputStream extends FSIn
         int len = (int) (end - start + 1);
             
         reader = BlockReader.newBlockReader(dn, src, 
-                                            block.getBlock().getBlockId(),
+                                            block.getBlock(),
                                             blockToken,
-                                            block.getBlock().getGenerationStamp(),
                                             start, len, buffersize, 
                                             verifyChecksum, dfsClient.clientName);
         int nread = reader.readAll(buf, offset, len);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Mon Sep 20 23:49:35 2010
@@ -893,10 +893,10 @@ class DFSOutputStream extends FSOutputSu
         blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
 
         // send the request
-        DataTransferProtocol.Sender.opWriteBlock(out,
-            block.getBlockId(), block.getGenerationStamp(),
-            nodes.length, recoveryFlag?stage.getRecoveryStage():stage, newGS,
-            block.getNumBytes(), bytesSent, dfsClient.clientName, null, nodes, accessToken);
+        DataTransferProtocol.Sender.opWriteBlock(out, block, nodes.length,
+            recoveryFlag ? stage.getRecoveryStage() : stage, newGS, 
+            block.getNumBytes(), bytesSent, dfsClient.clientName, null, nodes,
+            accessToken);
         checksum.writeHeader(out);
         out.flush();
 

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/Block.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/Block.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/Block.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/Block.java Mon Sep 20 23:49:35 2010
@@ -167,6 +167,18 @@ public class Block implements Writable, 
       throw new IOException("Unexpected block size: " + numBytes);
     }
   }
+  
+  // write only the identifier part of the block
+  public void writeId(DataOutput out) throws IOException {
+    out.writeLong(blockId);
+    out.writeLong(generationStamp);
+  }
+
+  // Read only the identifier part of the block
+  public void readId(DataInput in) throws IOException {
+    this.blockId = in.readLong();
+    this.generationStamp = in.readLong();
+  }
 
   @Override // Comparable
   public int compareTo(Block b) {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java Mon Sep 20 23:49:35 2010
@@ -229,13 +229,13 @@ public interface DataTransferProtocol {
     }
 
     /** Send OP_READ_BLOCK */
-    public static void opReadBlock(DataOutputStream out, long blockId,
-        long blockGs, long blockOffset, long blockLen, String clientName,
-        Token<BlockTokenIdentifier> blockToken) throws IOException {
+    public static void opReadBlock(DataOutputStream out, Block blk,
+        long blockOffset, long blockLen, String clientName,
+        Token<BlockTokenIdentifier> blockToken)
+        throws IOException {
       op(out, Op.READ_BLOCK);
 
-      out.writeLong(blockId);
-      out.writeLong(blockGs);
+      blk.writeId(out);
       out.writeLong(blockOffset);
       out.writeLong(blockLen);
       Text.writeString(out, clientName);
@@ -244,15 +244,14 @@ public interface DataTransferProtocol {
     }
     
     /** Send OP_WRITE_BLOCK */
-    public static void opWriteBlock(DataOutputStream out, long blockId,
-        long blockGs, int pipelineSize, BlockConstructionStage stage,
-        long newGs, long minBytesRcvd, long maxBytesRcvd, String client,
-        DatanodeInfo src, DatanodeInfo[] targets,
-        Token<BlockTokenIdentifier> blockToken) throws IOException {
+    public static void opWriteBlock(DataOutputStream out, Block blk,
+        int pipelineSize, BlockConstructionStage stage, long newGs,
+        long minBytesRcvd, long maxBytesRcvd, String client, DatanodeInfo src,
+        DatanodeInfo[] targets, Token<BlockTokenIdentifier> blockToken)
+        throws IOException {
       op(out, Op.WRITE_BLOCK);
 
-      out.writeLong(blockId);
-      out.writeLong(blockGs);
+      blk.writeId(out);
       out.writeInt(pipelineSize);
       stage.write(out);
       WritableUtils.writeVLong(out, newGs);
@@ -274,12 +273,11 @@ public interface DataTransferProtocol {
     
     /** Send OP_REPLACE_BLOCK */
     public static void opReplaceBlock(DataOutputStream out,
-        long blockId, long blockGs, String storageId, DatanodeInfo src,
+        Block blk, String storageId, DatanodeInfo src,
         Token<BlockTokenIdentifier> blockToken) throws IOException {
       op(out, Op.REPLACE_BLOCK);
 
-      out.writeLong(blockId);
-      out.writeLong(blockGs);
+      blk.writeId(out);
       Text.writeString(out, storageId);
       src.write(out);
       blockToken.write(out);
@@ -287,25 +285,23 @@ public interface DataTransferProtocol {
     }
 
     /** Send OP_COPY_BLOCK */
-    public static void opCopyBlock(DataOutputStream out, long blockId,
-        long blockGs, Token<BlockTokenIdentifier> blockToken)
+    public static void opCopyBlock(DataOutputStream out, Block blk,
+        Token<BlockTokenIdentifier> blockToken)
         throws IOException {
       op(out, Op.COPY_BLOCK);
 
-      out.writeLong(blockId);
-      out.writeLong(blockGs);
+      blk.writeId(out);
       blockToken.write(out);
       out.flush();
     }
 
     /** Send OP_BLOCK_CHECKSUM */
-    public static void opBlockChecksum(DataOutputStream out, long blockId,
-        long blockGs, Token<BlockTokenIdentifier> blockToken)
+    public static void opBlockChecksum(DataOutputStream out, Block blk,
+        Token<BlockTokenIdentifier> blockToken)
         throws IOException {
       op(out, Op.BLOCK_CHECKSUM);
-
-      out.writeLong(blockId);
-      out.writeLong(blockGs);
+      
+      blk.writeId(out);
       blockToken.write(out);
       out.flush();
     }
@@ -350,27 +346,27 @@ public interface DataTransferProtocol {
 
     /** Receive OP_READ_BLOCK */
     private void opReadBlock(DataInputStream in) throws IOException {
-      final long blockId = in.readLong();          
-      final long blockGs = in.readLong();
+      final Block blk = new Block();
+      blk.readId(in);
       final long offset = in.readLong();
       final long length = in.readLong();
       final String client = Text.readString(in);
       final Token<BlockTokenIdentifier> blockToken = readBlockToken(in);
 
-      opReadBlock(in, blockId, blockGs, offset, length, client, blockToken);
+      opReadBlock(in, blk, offset, length, client, blockToken);
     }
 
     /**
      * Abstract OP_READ_BLOCK method. Read a block.
      */
-    protected abstract void opReadBlock(DataInputStream in, long blockId,
-        long blockGs, long offset, long length, String client,
+    protected abstract void opReadBlock(DataInputStream in, Block blk,
+        long offset, long length, String client,
         Token<BlockTokenIdentifier> blockToken) throws IOException;
     
     /** Receive OP_WRITE_BLOCK */
     private void opWriteBlock(DataInputStream in) throws IOException {
-      final long blockId = in.readLong();          
-      final long blockGs = in.readLong();
+      final Block blk = new Block();
+      blk.readId(in);
       final int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
       final BlockConstructionStage stage = 
         BlockConstructionStage.readFields(in);
@@ -390,7 +386,7 @@ public interface DataTransferProtocol {
       }
       final Token<BlockTokenIdentifier> blockToken = readBlockToken(in);
 
-      opWriteBlock(in, blockId, blockGs, pipelineSize, stage,
+      opWriteBlock(in, blk, pipelineSize, stage,
           newGs, minBytesRcvd, maxBytesRcvd, client, src, targets, blockToken);
     }
 
@@ -398,22 +394,21 @@ public interface DataTransferProtocol {
      * Abstract OP_WRITE_BLOCK method. 
      * Write a block.
      */
-    protected abstract void opWriteBlock(DataInputStream in,
-        long blockId, long blockGs,
-        int pipelineSize, BlockConstructionStage stage,
-        long newGs, long minBytesRcvd, long maxBytesRcvd,
-        String client, DatanodeInfo src, DatanodeInfo[] targets,
-        Token<BlockTokenIdentifier> blockToken) throws IOException;
+    protected abstract void opWriteBlock(DataInputStream in, Block blk,
+        int pipelineSize, BlockConstructionStage stage, long newGs,
+        long minBytesRcvd, long maxBytesRcvd, String client, DatanodeInfo src,
+        DatanodeInfo[] targets, Token<BlockTokenIdentifier> blockToken)
+        throws IOException;
 
     /** Receive OP_REPLACE_BLOCK */
     private void opReplaceBlock(DataInputStream in) throws IOException {
-      final long blockId = in.readLong();          
-      final long blockGs = in.readLong();
+      final Block blk = new Block();
+      blk.readId(in);
       final String sourceId = Text.readString(in); // read del hint
       final DatanodeInfo src = DatanodeInfo.read(in); // read proxy source
       final Token<BlockTokenIdentifier> blockToken = readBlockToken(in);
 
-      opReplaceBlock(in, blockId, blockGs, sourceId, src, blockToken);
+      opReplaceBlock(in, blk, sourceId, src, blockToken);
     }
 
     /**
@@ -421,41 +416,41 @@ public interface DataTransferProtocol {
      * It is used for balancing purpose; send to a destination
      */
     protected abstract void opReplaceBlock(DataInputStream in,
-        long blockId, long blockGs, String sourceId, DatanodeInfo src,
+        Block blk, String sourceId, DatanodeInfo src,
         Token<BlockTokenIdentifier> blockToken) throws IOException;
 
     /** Receive OP_COPY_BLOCK */
     private void opCopyBlock(DataInputStream in) throws IOException {
-      final long blockId = in.readLong();          
-      final long blockGs = in.readLong();
+      final Block blk = new Block();
+      blk.readId(in);
       final Token<BlockTokenIdentifier> blockToken = readBlockToken(in);
 
-      opCopyBlock(in, blockId, blockGs, blockToken);
+      opCopyBlock(in, blk, blockToken);
     }
 
     /**
      * Abstract OP_COPY_BLOCK method. It is used for balancing purpose; send to
      * a proxy source.
      */
-    protected abstract void opCopyBlock(DataInputStream in, long blockId,
-        long blockGs, Token<BlockTokenIdentifier> blockToken)
+    protected abstract void opCopyBlock(DataInputStream in, Block blk,
+        Token<BlockTokenIdentifier> blockToken)
         throws IOException;
 
     /** Receive OP_BLOCK_CHECKSUM */
     private void opBlockChecksum(DataInputStream in) throws IOException {
-      final long blockId = in.readLong();          
-      final long blockGs = in.readLong();
+      final Block blk = new Block();
+      blk.readId(in);
       final Token<BlockTokenIdentifier> blockToken = readBlockToken(in);
 
-      opBlockChecksum(in, blockId, blockGs, blockToken);
+      opBlockChecksum(in, blk, blockToken);
     }
 
     /**
      * Abstract OP_BLOCK_CHECKSUM method.
      * Get the checksum of a block 
      */
-    protected abstract void opBlockChecksum(DataInputStream in, long blockId,
-        long blockGs, Token<BlockTokenIdentifier> blockToken)
+    protected abstract void opBlockChecksum(DataInputStream in,
+        Block blk, Token<BlockTokenIdentifier> blockToken)
         throws IOException;
 
     /** Read an AccessToken */

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Mon Sep 20 23:49:35 2010
@@ -377,8 +377,8 @@ public class Balancer implements Tool {
             BlockTokenSecretManager.AccessMode.COPY));
       }
       DataTransferProtocol.Sender.opReplaceBlock(out,
-          block.getBlock().getBlockId(), block.getBlock().getGenerationStamp(),
-          source.getStorageID(), proxySource.getDatanode(), accessToken);
+          block.getBlock(), source.getStorageID(), 
+          proxySource.getDatanode(), accessToken);
     }
     
     /* Receive a block copy response from the input stream */ 
@@ -1469,7 +1469,7 @@ public class Balancer implements Tool {
   final public static int ILLEGAL_ARGS = -5;
   /** main method of Balancer
    * @param args arguments to a Balancer
-   * @exception any exception occurs during datanode balancing
+   * @throws Exception exception that occured during datanode balancing
    */
   public int run(String[] args) throws Exception {
     long startTime = Util.now();

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Mon Sep 20 23:49:35 2010
@@ -43,6 +43,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.BlockReader;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -179,10 +180,10 @@ public class JspHelper {
     return chosenNode;
   }
 
-  public static void streamBlockInAscii(InetSocketAddress addr, long blockId,
-      Token<BlockTokenIdentifier> blockToken, long genStamp, long blockSize,
-      long offsetIntoBlock, long chunkSizeToView, JspWriter out,
-      Configuration conf) throws IOException {
+  public static void streamBlockInAscii(InetSocketAddress addr, 
+      long blockId, Token<BlockTokenIdentifier> blockToken, long genStamp,
+      long blockSize, long offsetIntoBlock, long chunkSizeToView,
+      JspWriter out, Configuration conf) throws IOException {
     if (chunkSizeToView == 0) return;
     Socket s = new Socket();
     s.connect(addr, HdfsConstants.READ_TIMEOUT);
@@ -191,12 +192,10 @@ public class JspHelper {
       long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);     
       
       // Use the block name for file name. 
-      BlockReader blockReader = 
-        BlockReader.newBlockReader(s, addr.toString() + ":" + blockId,
-                                             blockId, blockToken, genStamp ,offsetIntoBlock, 
-                                             amtToRead, 
-                                             conf.getInt("io.file.buffer.size",
-                                                         4096));
+      String file = BlockReader.getFileName(addr, blockId);
+      BlockReader blockReader = BlockReader.newBlockReader(s, file,
+        new Block(blockId, 0, genStamp), blockToken,
+        offsetIntoBlock, amtToRead, conf.getInt("io.file.buffer.size", 4096));
         
     byte[] buf = new byte[(int)amtToRead];
     int readOffset = 0;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon Sep 20 23:49:35 2010
@@ -1335,8 +1335,8 @@ public class DataNode extends Configured
         out = new DataOutputStream(new BufferedOutputStream(baseStream, 
                                                             SMALL_BUFFER_SIZE));
 
-        blockSender = new BlockSender(b, 0, b.getNumBytes(), false, false, false, 
-            datanode);
+        blockSender = new BlockSender(b, 0, b.getNumBytes(), 
+            false, false, false, datanode);
         DatanodeInfo srcNode = new DatanodeInfo(dnRegistration);
 
         //
@@ -1348,8 +1348,7 @@ public class DataNode extends Configured
           EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE));
         }
         DataTransferProtocol.Sender.opWriteBlock(out,
-            b.getBlockId(), b.getGenerationStamp(), 0, 
-            BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0, 0, "",
+            b, 0, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0, 0, "",
             srcNode, targets, accessToken);
 
         // send data & checksum

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Mon Sep 20 23:49:35 2010
@@ -129,10 +129,9 @@ class DataXceiver extends DataTransferPr
    * Read a block from the disk.
    */
   @Override
-  protected void opReadBlock(DataInputStream in, long blockId, long blockGs,
+  protected void opReadBlock(DataInputStream in, Block block,
       long startOffset, long length, String clientName,
       Token<BlockTokenIdentifier> blockToken) throws IOException {
-    final Block block = new Block(blockId, 0 , blockGs);
     OutputStream baseStream = NetUtils.getOutputStream(s, 
         datanode.socketWriteTimeout);
     DataOutputStream out = new DataOutputStream(
@@ -217,7 +216,7 @@ class DataXceiver extends DataTransferPr
    * Write a block to disk.
    */
   @Override
-  protected void opWriteBlock(DataInputStream in, long blockId, long blockGs,
+  protected void opWriteBlock(DataInputStream in, Block block, 
       int pipelineSize, BlockConstructionStage stage,
       long newGs, long minBytesRcvd, long maxBytesRcvd,
       String client, DatanodeInfo srcDataNode, DatanodeInfo[] targets,
@@ -228,8 +227,7 @@ class DataXceiver extends DataTransferPr
                 " tcp no delay " + s.getTcpNoDelay());
     }
 
-    final Block block = new Block(blockId, dataXceiverServer.estimateBlockSize,
-        blockGs);
+    block.setNumBytes(dataXceiverServer.estimateBlockSize);
     LOG.info("Receiving block " + block + 
              " src: " + remoteAddress +
              " dest: " + localAddress);
@@ -303,9 +301,8 @@ class DataXceiver extends DataTransferPr
           mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));
 
           // Write header: Copied from DFSClient.java!
-          DataTransferProtocol.Sender.opWriteBlock(mirrorOut,
-              blockId, blockGs, 
-              pipelineSize, stage, newGs, minBytesRcvd, maxBytesRcvd, client, 
+          DataTransferProtocol.Sender.opWriteBlock(mirrorOut, block,
+              pipelineSize, stage, newGs, minBytesRcvd, maxBytesRcvd, client,
               srcDataNode, targets, blockToken);
 
           if (blockReceiver != null) { // send checksum header
@@ -409,9 +406,8 @@ class DataXceiver extends DataTransferPr
    * Get block checksum (MD5 of CRC32).
    */
   @Override
-  protected void opBlockChecksum(DataInputStream in, long blockId,
-      long blockGs, Token<BlockTokenIdentifier> blockToken) throws IOException {
-    final Block block = new Block(blockId, 0 , blockGs);
+  protected void opBlockChecksum(DataInputStream in, Block block,
+      Token<BlockTokenIdentifier> blockToken) throws IOException {
     DataOutputStream out = new DataOutputStream(NetUtils.getOutputStream(s,
         datanode.socketWriteTimeout));
     if (datanode.isBlockTokenEnabled) {
@@ -433,7 +429,8 @@ class DataXceiver extends DataTransferPr
       }
     }
 
-    final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block);
+    final MetaDataInputStream metadataIn = 
+      datanode.data.getMetaDataInputStream(block);
     final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
         metadataIn, BUFFER_SIZE));
 
@@ -473,10 +470,9 @@ class DataXceiver extends DataTransferPr
    * Read a block from the disk and then sends it to a destination.
    */
   @Override
-  protected void opCopyBlock(DataInputStream in, long blockId, long blockGs,
+  protected void opCopyBlock(DataInputStream in, Block block,
       Token<BlockTokenIdentifier> blockToken) throws IOException {
     // Read in the header
-    Block block = new Block(blockId, 0, blockGs);
     if (datanode.isBlockTokenEnabled) {
       try {
         datanode.blockTokenSecretManager.checkAccess(blockToken, null, block,
@@ -492,7 +488,7 @@ class DataXceiver extends DataTransferPr
     }
 
     if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
-      LOG.info("Not able to copy block " + blockId + " to " 
+      LOG.info("Not able to copy block " + block.getBlockId() + " to " 
           + s.getRemoteSocketAddress() + " because threads quota is exceeded.");
       sendResponse(s, ERROR, datanode.socketWriteTimeout);
       return;
@@ -549,11 +545,10 @@ class DataXceiver extends DataTransferPr
    */
   @Override
   protected void opReplaceBlock(DataInputStream in,
-      long blockId, long blockGs, String sourceID, DatanodeInfo proxySource,
+      Block block, String sourceID, DatanodeInfo proxySource,
       Token<BlockTokenIdentifier> blockToken) throws IOException {
     /* read header */
-    final Block block = new Block(blockId, dataXceiverServer.estimateBlockSize,
-        blockGs);
+    block.setNumBytes(dataXceiverServer.estimateBlockSize);
     if (datanode.isBlockTokenEnabled) {
       try {
         datanode.blockTokenSecretManager.checkAccess(blockToken, null, block,
@@ -568,7 +563,7 @@ class DataXceiver extends DataTransferPr
     }
 
     if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
-      LOG.warn("Not able to receive block " + blockId + " from " 
+      LOG.warn("Not able to receive block " + block.getBlockId() + " from " 
           + s.getRemoteSocketAddress() + " because threads quota is exceeded.");
       sendResponse(s, ERROR, datanode.socketWriteTimeout);
       return;
@@ -594,8 +589,7 @@ class DataXceiver extends DataTransferPr
                      new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE));
 
       /* send request to the proxy */
-      DataTransferProtocol.Sender.opCopyBlock(proxyOut, block.getBlockId(),
-          block.getGenerationStamp(), blockToken);
+      DataTransferProtocol.Sender.opCopyBlock(proxyOut, block, blockToken);
 
       // receive the response from the proxy
       proxyReply = new DataInputStream(new BufferedInputStream(

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Mon Sep 20 23:49:35 2010
@@ -372,7 +372,7 @@ public class DatanodeJspHelper {
       out.print("Invalid input (filename absent)");
       return;
     }
-
+    
     final Long blockId = JspHelper.validateLong(req.getParameter("blockId"));
     if (blockId == null) {
       out.print("Invalid input (blockId absent)");
@@ -406,9 +406,8 @@ public class DatanodeJspHelper {
       return;
     }
 
-    String blockSizeStr;
     long blockSize = 0;
-    blockSizeStr = req.getParameter("blockSize");
+    final String blockSizeStr = req.getParameter("blockSize");
     if (blockSizeStr == null) {
       out.print("Invalid input (blockSize absent)");
       return;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Mon Sep 20 23:49:35 2010
@@ -502,14 +502,9 @@ public class NamenodeFsck {
         s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
         s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
         
-        blockReader = 
-          BlockReader.newBlockReader(s, targetAddr.toString() + ":" + 
-                                               block.getBlockId(), 
-                                               block.getBlockId(), 
-                                               lblock.getBlockToken(),
-                                               block.getGenerationStamp(), 
-                                               0, -1,
-                                               conf.getInt("io.file.buffer.size", 4096));
+        String file = BlockReader.getFileName(targetAddr, block.getBlockId());
+        blockReader = BlockReader.newBlockReader(s, file, block, lblock
+            .getBlockToken(), 0, -1, conf.getInt("io.file.buffer.size", 4096));
         
       }  catch (IOException ex) {
         // Put chosen node into dead list, continue

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java Mon Sep 20 23:49:35 2010
@@ -82,7 +82,6 @@ public class TestClientBlockVerification
     int offset, int lenToRead) throws IOException {
     InetSocketAddress targetAddr = null;
     Socket s = null;
-    BlockReader blockReader = null;
     Block block = testBlock.getBlock();
     DatanodeInfo[] nodes = testBlock.getLocations();
     targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
@@ -90,11 +89,11 @@ public class TestClientBlockVerification
     s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
     s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
 
-    return BlockReader.newBlockReader(
-      s, targetAddr.toString()+ ":" + block.getBlockId(), block.getBlockId(),
-      testBlock.getBlockToken(), block.getGenerationStamp(),
-      offset, lenToRead,
-      conf.getInt("io.file.buffer.size", 4096));
+    String file = BlockReader.getFileName(targetAddr,
+        block.getBlockId());
+    return BlockReader.newBlockReader(s, file, block,
+        testBlock.getBlockToken(), offset, lenToRead, conf.getInt(
+            "io.file.buffer.size", 4096));
   }
 
   /**

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Mon Sep 20 23:49:35 2010
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.READ_BLOCK;
 import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.WRITE_BLOCK;
 import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
 import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
@@ -172,9 +171,8 @@ public class TestDataTransferProtocol ex
       String description, Boolean eofExcepted) throws IOException {
     sendBuf.reset();
     recvBuf.reset();
-    DataTransferProtocol.Sender.opWriteBlock(sendOut, 
-        block.getBlockId(), block.getGenerationStamp(), 0,
-        stage, newGS, block.getNumBytes(), block.getNumBytes(), "cl", null,
+    DataTransferProtocol.Sender.opWriteBlock(sendOut, block, 0, stage, newGS,
+        block.getNumBytes(), block.getNumBytes(), "cl", null,
         new DatanodeInfo[1], BlockTokenSecretManager.DUMMY_TOKEN);
     if (eofExcepted) {
       ERROR.write(recvOut);
@@ -359,7 +357,7 @@ public class TestDataTransferProtocol ex
     /* Test OP_WRITE_BLOCK */
     sendBuf.reset();
     DataTransferProtocol.Sender.opWriteBlock(sendOut, 
-        newBlockId, 0L, 0,
+        new Block(newBlockId), 0,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, 0L, 0L, "cl", null,
         new DatanodeInfo[1], BlockTokenSecretManager.DUMMY_TOKEN);
     sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
@@ -373,7 +371,7 @@ public class TestDataTransferProtocol ex
     sendBuf.reset();
     recvBuf.reset();
     DataTransferProtocol.Sender.opWriteBlock(sendOut,
-        ++newBlockId, 0L, 0,
+        new Block(++newBlockId), 0,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, 0L, 0L, "cl", null,
         new DatanodeInfo[1], BlockTokenSecretManager.DUMMY_TOKEN);
     sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
@@ -397,7 +395,7 @@ public class TestDataTransferProtocol ex
     sendBuf.reset();
     recvBuf.reset();
     DataTransferProtocol.Sender.opWriteBlock(sendOut, 
-        ++newBlockId, 0L, 0,
+        new Block(++newBlockId), 0,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, 0L, 0L, "cl", null,
         new DatanodeInfo[1], BlockTokenSecretManager.DUMMY_TOKEN);
     sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
@@ -420,44 +418,28 @@ public class TestDataTransferProtocol ex
     
     /* Test OP_READ_BLOCK */
 
+    Block blk = new Block(firstBlock);
+    long blkid = blk.getBlockId();
     // bad block id
     sendBuf.reset();
     recvBuf.reset();
-    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    READ_BLOCK.write(sendOut);
-    newBlockId = firstBlock.getBlockId()-1;
-    sendOut.writeLong(newBlockId);
-    sendOut.writeLong(firstBlock.getGenerationStamp());
-    sendOut.writeLong(0L);
-    sendOut.writeLong(fileLen);
-    ERROR.write(recvOut);
-    Text.writeString(sendOut, "cl");
-    BlockTokenSecretManager.DUMMY_TOKEN.write(sendOut);
+    blk.setBlockId(blkid-1);
+    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, 0L, fileLen, "cl",
+          BlockTokenSecretManager.DUMMY_TOKEN);
     sendRecvData("Wrong block ID " + newBlockId + " for read", false); 
 
-    // negative block start offset
+    // negative block start offset -1L
     sendBuf.reset();
-    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    READ_BLOCK.write(sendOut);
-    sendOut.writeLong(firstBlock.getBlockId());
-    sendOut.writeLong(firstBlock.getGenerationStamp());
-    sendOut.writeLong(-1L);
-    sendOut.writeLong(fileLen);
-    Text.writeString(sendOut, "cl");
-    BlockTokenSecretManager.DUMMY_TOKEN.write(sendOut);
+    blk.setBlockId(blkid);
+    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, -1L, fileLen, "cl",
+          BlockTokenSecretManager.DUMMY_TOKEN);
     sendRecvData("Negative start-offset for read for block " + 
                  firstBlock.getBlockId(), false);
 
     // bad block start offset
     sendBuf.reset();
-    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    READ_BLOCK.write(sendOut);
-    sendOut.writeLong(firstBlock.getBlockId());
-    sendOut.writeLong(firstBlock.getGenerationStamp());
-    sendOut.writeLong(fileLen);
-    sendOut.writeLong(fileLen);
-    Text.writeString(sendOut, "cl");
-    BlockTokenSecretManager.DUMMY_TOKEN.write(sendOut);
+    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, fileLen, fileLen, "cl",
+          BlockTokenSecretManager.DUMMY_TOKEN);
     sendRecvData("Wrong start-offset for reading block " +
                  firstBlock.getBlockId(), false);
     
@@ -465,14 +447,8 @@ public class TestDataTransferProtocol ex
     recvBuf.reset();
     SUCCESS.write(recvOut);    
     sendBuf.reset();
-    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    READ_BLOCK.write(sendOut);
-    sendOut.writeLong(firstBlock.getBlockId());
-    sendOut.writeLong(firstBlock.getGenerationStamp());
-    sendOut.writeLong(0);
-    sendOut.writeLong(-1-random.nextInt(oneMil));
-    Text.writeString(sendOut, "cl");
-    BlockTokenSecretManager.DUMMY_TOKEN.write(sendOut);
+    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, 0L, 
+        -1 - random.nextInt(oneMil), "cl", BlockTokenSecretManager.DUMMY_TOKEN);
     sendRecvData("Negative length for reading block " +
                  firstBlock.getBlockId(), false);
     
@@ -480,27 +456,15 @@ public class TestDataTransferProtocol ex
     recvBuf.reset();
     ERROR.write(recvOut);    
     sendBuf.reset();
-    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    READ_BLOCK.write(sendOut);
-    sendOut.writeLong(firstBlock.getBlockId());
-    sendOut.writeLong(firstBlock.getGenerationStamp());
-    sendOut.writeLong(0);
-    sendOut.writeLong(fileLen + 1);
-    Text.writeString(sendOut, "cl");
-    BlockTokenSecretManager.DUMMY_TOKEN.write(sendOut);
+    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, 0L, 
+        fileLen + 1, "cl", BlockTokenSecretManager.DUMMY_TOKEN);
     sendRecvData("Wrong length for reading block " +
                  firstBlock.getBlockId(), false);
     
     //At the end of all this, read the file to make sure that succeeds finally.
     sendBuf.reset();
-    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
-    READ_BLOCK.write(sendOut);
-    sendOut.writeLong(firstBlock.getBlockId());
-    sendOut.writeLong(firstBlock.getGenerationStamp());
-    sendOut.writeLong(0);
-    sendOut.writeLong(fileLen);
-    Text.writeString(sendOut, "cl");
-    BlockTokenSecretManager.DUMMY_TOKEN.write(sendOut);
+    DataTransferProtocol.Sender.opReadBlock(sendOut, blk, 0L, 
+        fileLen, "cl", BlockTokenSecretManager.DUMMY_TOKEN);
     readFile(fileSys, file, fileLen);
     } finally {
       cluster.shutdown();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Mon Sep 20 23:49:35 2010
@@ -247,13 +247,10 @@ public class TestDataNodeVolumeFailure e
     s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
     s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
 
+    String file = BlockReader.getFileName(targetAddr, block.getBlockId());
     blockReader = 
-      BlockReader.newBlockReader(s, targetAddr.toString() + ":" + 
-          block.getBlockId(), 
-          block.getBlockId(), 
-          lblock.getBlockToken(),
-          block.getGenerationStamp(), 
-          0, -1, 4096);
+      BlockReader.newBlockReader(s, file, block, lblock
+        .getBlockToken(), 0, -1, 4096);
 
     // nothing - if it fails - it will throw and exception
   }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Mon Sep 20 23:49:35 2010
@@ -117,8 +117,7 @@ public class TestDiskError extends TestC
       DataOutputStream out = new DataOutputStream(
           s.getOutputStream());
 
-      Sender.opWriteBlock(out, block.getBlock().getBlockId(), 
-          block.getBlock().getGenerationStamp(), 1, 
+      Sender.opWriteBlock(out, block.getBlock(), 1, 
           BlockConstructionStage.PIPELINE_SETUP_CREATE, 
           0L, 0L, 0L, "", null, new DatanodeInfo[0], 
           BlockTokenSecretManager.DUMMY_TOKEN);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java?rev=999162&r1=999161&r2=999162&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java Mon Sep 20 23:49:35 2010
@@ -130,11 +130,10 @@ public class TestBlockTokenWithDFS exten
       s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
       s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
 
-      blockReader = BlockReader.newBlockReader(s, targetAddr
-          .toString()
-          + ":" + block.getBlockId(), block.getBlockId(), lblock
-          .getBlockToken(), block.getGenerationStamp(), 0, -1, conf.getInt(
-          "io.file.buffer.size", 4096));
+      String file = BlockReader.getFileName(targetAddr, block.getBlockId());
+      blockReader = BlockReader.newBlockReader(s, file, block, 
+          lblock.getBlockToken(), 0, -1, 
+          conf.getInt("io.file.buffer.size", 4096));
 
     } catch (IOException ex) {
       if (ex instanceof InvalidBlockTokenException) {