You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2010/09/04 01:38:23 UTC

svn commit: r992508 [1/2] - in /hadoop/hdfs/branches/HDFS-1052: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/security/token/block/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/jav...

Author: suresh
Date: Fri Sep  3 23:38:21 2010
New Revision: 992508

URL: http://svn.apache.org/viewvc?rev=992508&view=rev
Log:
HDFS-1359 Add BlockPoolID to Block. Contributed by Suresh Srinivas.

Added:
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
Modified:
    hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/Block.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
    hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
    hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
    hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
    hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java

Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Fri Sep  3 23:38:21 2010
@@ -33,6 +33,8 @@ Trunk (unreleased changes)
     HDFS piggyback block locations to each file status when listing a
     directory.  (hairong)
 
+    HDFS-1359. Add BlockPoolID to Block. (suresh)
+
   IMPROVEMENTS
 
     HDFS-1096. fix for prev. commit. (boryas)

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSClient.java Fri Sep  3 23:38:21 2010
@@ -63,7 +63,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -95,7 +95,6 @@ import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -196,7 +195,7 @@ public class DFSClient implements FSCons
       ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr);
     }
     UserGroupInformation ticket = UserGroupInformation
-        .createRemoteUser(locatedBlock.getBlock().toString());
+        .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
     ticket.addToken(locatedBlock.getBlockToken());
     return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class,
         ClientDatanodeProtocol.versionID, addr, ticket, conf, NetUtils
@@ -940,7 +939,7 @@ public class DFSClient implements FSCons
         refetchBlocks = false;
       }
       LocatedBlock lb = locatedblocks.get(i);
-      final Block block = lb.getBlock();
+      final ExtendedBlock block = lb.getBlock();
       final DatanodeInfo[] datanodes = lb.getLocations();
       
       //try each datanode location of the block
@@ -1479,7 +1478,7 @@ public class DFSClient implements FSCons
     /**
      * Returns the block containing the target position. 
      */
-    public Block getCurrentBlock() {
+    public ExtendedBlock getCurrentBlock() {
       return ((DFSInputStream)in).getCurrentBlock();
     }
 
@@ -1498,7 +1497,7 @@ public class DFSClient implements FSCons
     }
   }
 
-  void reportChecksumFailure(String file, Block blk, DatanodeInfo dn) {
+  void reportChecksumFailure(String file, ExtendedBlock blk, DatanodeInfo dn) {
     DatanodeInfo [] dnArr = { dn };
     LocatedBlock [] lblocks = { new LocatedBlock(blk, dnArr) };
     reportChecksumFailure(file, lblocks);

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSInputStream.java Fri Sep  3 23:38:21 2010
@@ -30,7 +30,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -61,7 +61,7 @@ public class DFSInputStream extends FSIn
   private LocatedBlocks locatedBlocks = null;
   private long lastBlockBeingWrittenLength = 0;
   private DatanodeInfo currentNode = null;
-  private Block currentBlock = null;
+  private ExtendedBlock currentBlock = null;
   private long pos = 0;
   private long blockEnd = -1;
 
@@ -195,7 +195,7 @@ public class DFSInputStream extends FSIn
   /**
    * Returns the block containing the target position. 
    */
-  public Block getCurrentBlock() {
+  public ExtendedBlock getCurrentBlock() {
     return currentBlock;
   }
 
@@ -375,7 +375,7 @@ public class DFSInputStream extends FSIn
         s = dfsClient.socketFactory.createSocket();
         NetUtils.connect(s, targetAddr, dfsClient.socketTimeout);
         s.setSoTimeout(dfsClient.socketTimeout);
-        Block blk = targetBlock.getBlock();
+        ExtendedBlock blk = targetBlock.getBlock();
         Token<BlockTokenIdentifier> accessToken = targetBlock.getBlockToken();
         
         blockReader = BlockReader.newBlockReader(s, src, blk.getBlockId(), 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DFSOutputStream.java Fri Sep  3 23:38:21 2010
@@ -42,18 +42,17 @@ import org.apache.hadoop.fs.ParentNotDir
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
@@ -272,7 +271,7 @@ class DFSOutputStream extends FSOutputSu
   //
   class DataStreamer extends Daemon {
     private volatile boolean streamerClosed = false;
-    private Block block; // its length is number of bytes acked
+    private ExtendedBlock block; // its length is number of bytes acked
     private Token<BlockTokenIdentifier> accessToken;
     private DataOutputStream blockStream;
     private DataInputStream blockReplyStream;
@@ -788,8 +787,8 @@ class DFSOutputStream extends FSOutputSu
 
       if (success) {
         // update pipeline at the namenode
-        Block newBlock = new Block(
-            block.getBlockId(), block.getNumBytes(), newGS);
+        ExtendedBlock newBlock = new ExtendedBlock(
+            block.getPoolId(), block.getBlockId(), block.getNumBytes(), newGS);
         dfsClient.namenode.updatePipeline(dfsClient.clientName, block, newBlock, nodes);
         // update client side generation stamp
         block = newBlock;
@@ -987,7 +986,7 @@ class DFSOutputStream extends FSOutputSu
       } 
     }
 
-    Block getBlock() {
+    ExtendedBlock getBlock() {
       return block;
     }
 
@@ -1413,7 +1412,7 @@ class DFSOutputStream extends FSOutputSu
 
       flushInternal();             // flush all data to Datanodes
       // get last block before destroying the streamer
-      Block lastBlock = streamer.getBlock();
+      ExtendedBlock lastBlock = streamer.getBlock();
       closeThreads(false);
       completeFile(lastBlock);
       dfsClient.leasechecker.remove(src);
@@ -1424,7 +1423,7 @@ class DFSOutputStream extends FSOutputSu
 
   // should be called holding (this) lock since setTestFilename() may 
   // be called during unit tests
-  private void completeFile(Block last) throws IOException {
+  private void completeFile(ExtendedBlock last) throws IOException {
     long localstart = System.currentTimeMillis();
     boolean fileComplete = false;
     while (!fileComplete) {

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Fri Sep  3 23:38:21 2010
@@ -45,9 +45,9 @@ import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
@@ -688,7 +688,7 @@ public class DistributedFileSystem exten
 
     // Find block in data stream.
     DFSClient.DFSDataInputStream dfsIn = (DFSClient.DFSDataInputStream) in;
-    Block dataBlock = dfsIn.getCurrentBlock();
+    ExtendedBlock dataBlock = dfsIn.getCurrentBlock();
     if (dataBlock == null) {
       LOG.error("Error: Current block in data stream is null! ");
       return false;
@@ -701,7 +701,7 @@ public class DistributedFileSystem exten
 
     // Find block in checksum stream
     DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums;
-    Block sumsBlock = dfsSums.getCurrentBlock();
+    ExtendedBlock sumsBlock = dfsSums.getCurrentBlock();
     if (sumsBlock == null) {
       LOG.error("Error: Current block in checksum stream is null! ");
       return false;

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/Block.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/Block.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/Block.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/Block.java Fri Sep  3 23:38:21 2010
@@ -153,13 +153,23 @@ public class Block implements Writable, 
   /////////////////////////////////////
   // Writable
   /////////////////////////////////////
+  @Override // Writable
   public void write(DataOutput out) throws IOException {
+    writeHelper(out);
+  }
+
+  @Override // Writable
+  public void readFields(DataInput in) throws IOException {
+    readHelper(in);
+  }
+  
+  final void writeHelper(DataOutput out) throws IOException {
     out.writeLong(blockId);
     out.writeLong(numBytes);
     out.writeLong(generationStamp);
   }
-
-  public void readFields(DataInput in) throws IOException {
+  
+  final void readHelper(DataInput in) throws IOException {
     this.blockId = in.readLong();
     this.numBytes = in.readLong();
     this.generationStamp = in.readLong();

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java Fri Sep  3 23:38:21 2010
@@ -36,10 +36,10 @@ public interface ClientDatanodeProtocol 
   public static final Log LOG = LogFactory.getLog(ClientDatanodeProtocol.class);
 
   /**
-   * 6: recoverBlock() removed.
+   * 7: Add block pool ID to Block
    */
-  public static final long versionID = 6L;
+  public static final long versionID = 7L;
 
   /** Return the visible length of a replica. */
-  long getReplicaVisibleLength(Block b) throws IOException;
+  long getReplicaVisibleLength(ExtendedBlock b) throws IOException;
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Fri Sep  3 23:38:21 2010
@@ -68,9 +68,9 @@ public interface ClientProtocol extends 
    * Compared to the previous version the following changes have been introduced:
    * (Only the latest change is reflected.
    * The log of historical changes can be retrieved from the svn).
-   * 62: Allow iterative getListinng piggyback block locations.
+   * 63: Add block pool ID to Block
    */
-  public static final long versionID = 62L;
+  public static final long versionID = 63L;
   
   ///////////////////////////////////////
   // File contents
@@ -127,7 +127,7 @@ public interface ClientProtocol extends 
    * <p>
    * Blocks have a maximum size.  Clients that intend to create
    * multi-block files must also use 
-   * {@link #addBlock(String, String, Block, DatanodeInfo[])}
+   * {@link #addBlock(String, String, ExtendedBlock, DatanodeInfo[])}
    *
    * @param src path of the file being created.
    * @param masked masked permission.
@@ -260,7 +260,7 @@ public interface ClientProtocol extends 
    * @throws UnresolvedLinkException If <code>src</code> contains a symlink
    * @throws IOException If an I/O error occurred
    */
-  public void abandonBlock(Block b, String src, String holder)
+  public void abandonBlock(ExtendedBlock b, String src, String holder)
       throws AccessControlException, FileNotFoundException,
       UnresolvedLinkException, IOException;
 
@@ -294,7 +294,7 @@ public interface ClientProtocol extends 
    * @throws IOException If an I/O error occurred
    */
   public LocatedBlock addBlock(String src, String clientName,
-      @Nullable Block previous, @Nullable DatanodeInfo[] excludeNodes)
+      @Nullable ExtendedBlock previous, @Nullable DatanodeInfo[] excludeNodes)
       throws AccessControlException, FileNotFoundException,
       NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
       IOException;
@@ -306,7 +306,7 @@ public interface ClientProtocol extends 
    * The function returns whether the file has been closed successfully.
    * If the function returns false, the caller should try again.
    * 
-   * close() also commits the last block of the file by reporting
+   * close() also commits the last block of file by reporting
    * to the name-node the actual generation stamp and the length
    * of the block that the client has transmitted to data-nodes.
    *
@@ -321,7 +321,7 @@ public interface ClientProtocol extends 
    * @throws UnresolvedLinkException If <code>src</code> contains a symlink 
    * @throws IOException If an I/O error occurred
    */
-  public boolean complete(String src, String clientName, Block last)
+  public boolean complete(String src, String clientName, ExtendedBlock last)
       throws AccessControlException, FileNotFoundException, SafeModeException,
       UnresolvedLinkException, IOException;
 
@@ -825,8 +825,8 @@ public interface ClientProtocol extends 
    * @return a located block with a new generation stamp and an access token
    * @throws IOException if any error occurs
    */
-  public LocatedBlock updateBlockForPipeline(Block block, String clientName) 
-      throws IOException;
+  public LocatedBlock updateBlockForPipeline(ExtendedBlock block,
+      String clientName) throws IOException;
 
   /**
    * Update a pipeline for a block under construction
@@ -837,8 +837,8 @@ public interface ClientProtocol extends 
    * @param newNodes datanodes in the pipeline
    * @throws IOException if any error occurs
    */
-  public void updatePipeline(String clientName, Block oldBlock, 
-      Block newBlock, DatanodeID[] newNodes)
+  public void updatePipeline(String clientName, ExtendedBlock oldBlock, 
+      ExtendedBlock newBlock, DatanodeID[] newNodes)
       throws IOException;
 
   /**

Added: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java?rev=992508&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java (added)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java Fri Sep  3 23:38:21 2010
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.DeprecatedUTF8;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * Identifies a Block uniquely across the block pools
+ */
+public class ExtendedBlock implements Writable {
+  private String poolId;
+  private Block block;
+
+  static { // register a ctor
+    WritableFactories.setFactory(ExtendedBlock.class, new WritableFactory() {
+      public Writable newInstance() {
+        return new ExtendedBlock();
+      }
+    });
+  }
+
+  public ExtendedBlock() {
+    this(null, 0, 0, 0);
+  }
+
+  // TODO:FEDERATION To remove when block pool ID related coding is complete
+  public ExtendedBlock(final Block b) {
+    this("TODO", b);
+  }
+  
+  public ExtendedBlock(final ExtendedBlock b) {
+    this(b.poolId, b.block);
+  }
+  
+  public ExtendedBlock(final String poolId, final long blockId) {
+    this(poolId, blockId, 0, 0);
+  }
+  
+  public ExtendedBlock(String poolId, Block b) {
+    this.poolId = poolId;
+    this.block = b;
+  }
+
+  public ExtendedBlock(final String poolId, final long blkid, final long len,
+      final long genstamp) {
+    this.poolId = poolId;
+    block = new Block(blkid, len, genstamp);
+  }
+
+  public void write(DataOutput out) throws IOException {
+    DeprecatedUTF8.writeString(out, poolId);
+    block.writeHelper(out);
+  }
+
+  public void readFields(DataInput in) throws IOException {
+    this.poolId = DeprecatedUTF8.readString(in);
+    block.readHelper(in);
+  }
+
+  public String getPoolId() {
+    return poolId;
+  }
+
+  public String getBlockName() {
+    return poolId + ":" + block;
+  }
+
+  public long getNumBytes() {
+    return block.getNumBytes();
+  }
+
+  public long getBlockId() {
+    return block.getBlockId();
+  }
+
+  public long getGenerationStamp() {
+    return block.getGenerationStamp();
+  }
+
+  public void setBlockId(final long bid) {
+    block.setBlockId(bid);
+  }
+  
+  public void setGenerationStamp(final long genStamp) {
+    block.setGenerationStamp(genStamp);
+  }
+
+  public void setNumBytes(final long len) {
+    block.setNumBytes(len);
+  }
+
+  public static Block getLocalBlock(final ExtendedBlock b) {
+    return b == null ? null : b.getLocalBlock();
+  }
+  
+  public Block getLocalBlock() {
+    return block;
+  }
+  
+  @Override // Object
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (!(o instanceof ExtendedBlock)) {
+      return false;
+    }
+    ExtendedBlock b = (ExtendedBlock)o;
+    return b.block.equals(block) || b.poolId.equals(poolId);
+  }
+  
+  @Override // Object
+  public int hashCode() {
+    return block.hashCode();
+  }
+  
+  @Override // Object
+  public String toString() {
+    return getBlockName();
+  }
+}

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java Fri Sep  3 23:38:21 2010
@@ -42,7 +42,7 @@ public class LocatedBlock implements Wri
        });
   }
 
-  private Block b;
+  private ExtendedBlock b;
   private long offset;  // offset of the first byte of the block in the file
   private DatanodeInfo[] locs;
   // corrupt flag is true if all of the replicas of a block are corrupt.
@@ -51,27 +51,24 @@ public class LocatedBlock implements Wri
   private boolean corrupt;
   private Token<BlockTokenIdentifier> blockToken = new Token<BlockTokenIdentifier>();
 
-  /**
-   */
   public LocatedBlock() {
-    this(new Block(), new DatanodeInfo[0], 0L, false);
+    this(new ExtendedBlock(), new DatanodeInfo[0], 0L, false);
   }
 
-  /**
-   */
+  // TODO:FEDERATION To remove when block pool ID related coding is complete
   public LocatedBlock(Block b, DatanodeInfo[] locs) {
+    this(new ExtendedBlock(b), locs, -1, false); // startOffset is unknown
+  }
+
+  public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs) {
     this(b, locs, -1, false); // startOffset is unknown
   }
 
-  /**
-   */
-  public LocatedBlock(Block b, DatanodeInfo[] locs, long startOffset) {
+  public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, long startOffset) {
     this(b, locs, startOffset, false);
   }
 
-  /**
-   */
-  public LocatedBlock(Block b, DatanodeInfo[] locs, long startOffset, 
+  public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, long startOffset, 
                       boolean corrupt) {
     this.b = b;
     this.offset = startOffset;
@@ -93,7 +90,7 @@ public class LocatedBlock implements Wri
 
   /**
    */
-  public Block getBlock() {
+  public ExtendedBlock getBlock() {
     return b;
   }
 
@@ -141,7 +138,7 @@ public class LocatedBlock implements Wri
     blockToken.readFields(in);
     this.corrupt = in.readBoolean();
     offset = in.readLong();
-    this.b = new Block();
+    this.b = new ExtendedBlock();
     b.readFields(in);
     int count = in.readInt();
     this.locs = new DatanodeInfo[count];

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java Fri Sep  3 23:38:21 2010
@@ -32,6 +32,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
@@ -174,7 +175,7 @@ public class BlockTokenSecretManager ext
   }
 
   /** Generate an block token for current user */
-  public Token<BlockTokenIdentifier> generateToken(Block block,
+  public Token<BlockTokenIdentifier> generateToken(ExtendedBlock block,
       EnumSet<AccessMode> modes) throws IOException {
     return generateToken(new long [] { block.getBlockId() }, modes);
   }
@@ -203,12 +204,20 @@ public class BlockTokenSecretManager ext
   }
 
   /**
+   * TODO:FEDERATION To remove when block pool ID related coding is complete
+   */
+  public void checkAccess(Token<BlockTokenIdentifier> blockToken, String userId,
+      Block block, AccessMode mode) throws InvalidToken {
+    checkAccess(blockToken, userId, new ExtendedBlock(block), mode);
+  }
+  
+  /**
    * Check if access should be allowed. userID is not checked if null. This
    * method doesn't check if token password is correct. It should be used only
    * when token password has already been verified (e.g., in the RPC layer).
    */
-  public void checkAccess(BlockTokenIdentifier id, String userId, Block block,
-      AccessMode mode) throws InvalidToken {
+  public void checkAccess(BlockTokenIdentifier id, String userId,
+      ExtendedBlock block, AccessMode mode) throws InvalidToken {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Checking access for user=" + userId + ", block=" + block
           + ", access mode=" + mode + " using " + id.toString());
@@ -233,7 +242,7 @@ public class BlockTokenSecretManager ext
 
   /** Check if access should be allowed. userID is not checked if null */
   public void checkAccess(Token<BlockTokenIdentifier> token, String userId,
-      Block block, AccessMode mode) throws InvalidToken {
+      ExtendedBlock block, AccessMode mode) throws InvalidToken {
     BlockTokenIdentifier id = new BlockTokenIdentifier();
     try {
       id.readFields(new DataInputStream(new ByteArrayInputStream(token

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Sep  3 23:38:21 2010
@@ -67,6 +67,7 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
@@ -1057,7 +1058,8 @@ public class DataNode extends Configured
       if(delHintArray == null || delHintArray.length != blockArray.length ) {
         LOG.warn("Panic: block array & delHintArray are not the same" );
       }
-      namenode.blockReceived(dnRegistration, blockArray, delHintArray);
+      // TODO:FEDERATION add support for pool ID
+      namenode.blockReceived(dnRegistration, "TODO", blockArray, delHintArray);
       synchronized(receivedBlockList) {
         synchronized(delHints){
           for(int i=0; i<blockArray.length; i++) {
@@ -1086,7 +1088,9 @@ public class DataNode extends Configured
       long brStartTime = now();
       BlockListAsLongs bReport = data.getBlockReport();
 
-      cmd = namenode.blockReport(dnRegistration, bReport.getBlockListAsLongs());
+      // TODO:FEDERATION add support for pool ID
+      cmd = namenode.blockReport(dnRegistration, "TODO", bReport
+          .getBlockListAsLongs());
       long brTime = now() - brStartTime;
       myMetrics.blockReports.inc(brTime);
       LOG.info("BlockReport of " + bReport.getNumberOfBlocks() +
@@ -1658,7 +1662,7 @@ public class DataNode extends Configured
       public void run() {
         for(RecoveringBlock b : blocks) {
           try {
-            logRecoverBlock("NameNode", b.getBlock(), b.getLocations());
+            logRecoverBlock("NameNode", b.getBlock().getLocalBlock(), b.getLocations());
             recoverBlock(b);
           } catch (IOException e) {
             LOG.warn("recoverBlocks FAILED: " + b, e);
@@ -1695,12 +1699,12 @@ public class DataNode extends Configured
    * Update replica with the new generation stamp and length.  
    */
   @Override // InterDatanodeProtocol
-  public Block updateReplicaUnderRecovery(Block oldBlock,
+  public ExtendedBlock updateReplicaUnderRecovery(ExtendedBlock oldBlock,
                                           long recoveryId,
                                           long newLength) throws IOException {
-    ReplicaInfo r =
-      data.updateReplicaUnderRecovery(oldBlock, recoveryId, newLength);
-    return new Block(r);
+    ReplicaInfo r = data.updateReplicaUnderRecovery(oldBlock.getLocalBlock(),
+        recoveryId, newLength);
+    return new ExtendedBlock(oldBlock.getPoolId(), r);
   }
 
   /** {@inheritDoc} */
@@ -1737,7 +1741,7 @@ public class DataNode extends Configured
 
   /** Recover a block */
   private void recoverBlock(RecoveringBlock rBlock) throws IOException {
-    Block block = rBlock.getBlock();
+    ExtendedBlock block = rBlock.getBlock();
     DatanodeInfo[] targets = rBlock.getLocations();
     DatanodeID[] datanodeids = (DatanodeID[])targets;
     List<BlockRecord> syncList = new ArrayList<BlockRecord>(datanodeids.length);
@@ -1780,7 +1784,7 @@ public class DataNode extends Configured
   /** Block synchronization */
   void syncBlock(RecoveringBlock rBlock,
                          List<BlockRecord> syncList) throws IOException {
-    Block block = rBlock.getBlock();
+    ExtendedBlock block = rBlock.getBlock();
     long recoveryId = rBlock.getNewGenerationStamp();
     if (LOG.isDebugEnabled()) {
       LOG.debug("block=" + block + ", (length=" + block.getNumBytes()
@@ -1815,7 +1819,8 @@ public class DataNode extends Configured
     // Calculate list of nodes that will participate in the recovery
     // and the new block size
     List<BlockRecord> participatingList = new ArrayList<BlockRecord>();
-    Block newBlock = new Block(block.getBlockId(), -1, recoveryId);
+    final ExtendedBlock newBlock = new ExtendedBlock(block.getPoolId(), block
+        .getBlockId(), -1, recoveryId);
     switch(bestState) {
     case FINALIZED:
       assert finalizedLength > 0 : "finalizedLength is not positive";
@@ -1849,8 +1854,9 @@ public class DataNode extends Configured
     List<DatanodeID> successList = new ArrayList<DatanodeID>();
     for(BlockRecord r : participatingList) {
       try {
-        Block reply = r.datanode.updateReplicaUnderRecovery(
-            r.rInfo, recoveryId, newBlock.getNumBytes());
+        ExtendedBlock reply = r.datanode.updateReplicaUnderRecovery(
+            new ExtendedBlock(newBlock.getPoolId(), r.rInfo), recoveryId,
+            newBlock.getNumBytes());
         assert reply.equals(newBlock) &&
                reply.getNumBytes() == newBlock.getNumBytes() :
           "Updated replica must be the same as the new block.";
@@ -1894,7 +1900,7 @@ public class DataNode extends Configured
   // ClientDataNodeProtocol implementation
   /** {@inheritDoc} */
   @Override // ClientDataNodeProtocol
-  public long getReplicaVisibleLength(final Block block) throws IOException {
+  public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
     if (isBlockTokenEnabled) {
       Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
           .getTokenIdentifiers();
@@ -1913,7 +1919,7 @@ public class DataNode extends Configured
       }
     }
 
-    return data.getReplicaVisibleLength(block);
+    return data.getReplicaVisibleLength(block.getLocalBlock());
   }
   
   // Determine a Datanode's streaming address

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Fri Sep  3 23:38:21 2010
@@ -1953,7 +1953,7 @@ public class FSDataset implements FSCons
   public synchronized ReplicaRecoveryInfo initReplicaRecovery(
       RecoveringBlock rBlock) throws IOException {
     return initReplicaRecovery(
-        volumeMap, rBlock.getBlock(), rBlock.getNewGenerationStamp());
+        volumeMap, rBlock.getBlock().getLocalBlock(), rBlock.getNewGenerationStamp());
   }
 
   /** static version of {@link #initReplicaRecovery(Block, long)}. */

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java Fri Sep  3 23:38:21 2010
@@ -32,8 +32,8 @@ import org.apache.hadoop.classification.
  * be published as an interface.
  * 
  * <p>
- * Data Node runtime statistic  info is report in another MBean
- * @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeStatisticsMBean
+ * Data Node runtime statistic  info is reported in another MBean
+ * @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics
  *
  */
 @InterfaceAudience.Private

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java Fri Sep  3 23:38:21 2010
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
@@ -355,7 +356,7 @@ public class DatanodeDescriptor extends 
     BlockRecoveryCommand brCommand = new BlockRecoveryCommand(blocks.size());
     for(BlockInfoUnderConstruction b : blocks) {
       brCommand.add(new RecoveringBlock(
-          b, b.getExpectedLocations(), b.getBlockRecoveryId()));
+          new ExtendedBlock(b), b.getExpectedLocations(), b.getBlockRecoveryId()));
     }
     return brCommand;
   }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Sep  3 23:38:21 2010
@@ -186,6 +186,8 @@ public class FSNamesystem implements FSC
   //
   public FSDirectory dir;
 
+  // TODO:FEDERATION initialize from the persisted information
+  String  poolId = "TODO";
   BlockManager blockManager;
     
   /**
@@ -836,7 +838,7 @@ public class FSNamesystem implements FSC
   /** Create a LocatedBlock. */
   LocatedBlock createLocatedBlock(final Block b, final DatanodeInfo[] locations,
       final long offset, final boolean corrupt) throws IOException {
-    return new LocatedBlock(b, locations, offset, corrupt);
+    return new LocatedBlock(getExtendedBlock(b), locations, offset, corrupt);
   }
   
   /**
@@ -1397,7 +1399,7 @@ public class FSNamesystem implements FSC
           // convert last block to under-construction and set its locations
           blockManager.convertLastBlockToUnderConstruction(file, targets);
 
-          lb = new LocatedBlock(lastBlock, targets, 
+          lb = new LocatedBlock(getExtendedBlock(lastBlock), targets, 
                                 fileLength-lastBlock.getNumBytes());
           if (isBlockTokenEnabled) {
             lb.setBlockToken(blockTokenSecretManager.generateToken(lb.getBlock(), 
@@ -1434,6 +1436,10 @@ public class FSNamesystem implements FSC
     return lb;
   }
 
+  ExtendedBlock getExtendedBlock(Block blk) {
+    return new ExtendedBlock(poolId, blk);
+  }
+
   /**
    * The client would like to obtain an additional block for the indicated
    * filename (which is being written-to).  Return an array that consists
@@ -1447,12 +1453,13 @@ public class FSNamesystem implements FSC
    */
   public LocatedBlock getAdditionalBlock(String src,
                                          String clientName,
-                                         Block previous,
+                                         ExtendedBlock previous,
                                          HashMap<Node, Node> excludedNodes
                                          ) 
       throws LeaseExpiredException, NotReplicatedYetException,
       QuotaExceededException, SafeModeException, UnresolvedLinkException,
       IOException {
+    checkBlock(previous);
     long fileLength, blockSize;
     int replication;
     DatanodeDescriptor clientNode = null;
@@ -1475,7 +1482,8 @@ public class FSNamesystem implements FSC
       INodeFileUnderConstruction pendingFile  = checkLease(src, clientName);
 
       // commit the last block and complete it if it has minimum replicas
-      blockManager.commitOrCompleteLastBlock(pendingFile, previous);
+      blockManager.commitOrCompleteLastBlock(pendingFile, ExtendedBlock
+          .getLocalBlock(previous));
 
       //
       // If we fail this, bad things happen!
@@ -1519,7 +1527,7 @@ public class FSNamesystem implements FSC
     }
         
     // Create next block
-    LocatedBlock b = new LocatedBlock(newBlock, targets, fileLength);
+    LocatedBlock b = new LocatedBlock(getExtendedBlock(newBlock), targets, fileLength);
     if (isBlockTokenEnabled) {
       b.setBlockToken(blockTokenSecretManager.generateToken(b.getBlock(), 
           EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE)));
@@ -1530,9 +1538,10 @@ public class FSNamesystem implements FSC
   /**
    * The client would like to let go of the given block
    */
-  public synchronized boolean abandonBlock(Block b, String src, String holder)
+  public synchronized boolean abandonBlock(ExtendedBlock b, String src, String holder)
       throws LeaseExpiredException, FileNotFoundException,
       UnresolvedLinkException, IOException {
+    checkBlock(b);
     //
     // Remove the block from the pending creates list
     //
@@ -1541,7 +1550,7 @@ public class FSNamesystem implements FSC
                                     +b+"of file "+src);
     }
     INodeFileUnderConstruction file = checkLease(src, holder);
-    dir.removeBlock(src, file, b);
+    dir.removeBlock(src, file, ExtendedBlock.getLocalBlock(b));
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: "
                                     + b
@@ -1590,9 +1599,11 @@ public class FSNamesystem implements FSC
    *         (e.g if not all blocks have reached minimum replication yet)
    * @throws IOException on error (eg lease mismatch, file not open, file deleted)
    */
-  public boolean completeFile(String src, String holder, Block last) 
+  public boolean completeFile(String src, String holder, ExtendedBlock last) 
     throws SafeModeException, UnresolvedLinkException, IOException {
-    boolean success = completeFileInternal(src, holder, last);
+    checkBlock(last);
+    boolean success = completeFileInternal(src, holder, 
+        ExtendedBlock.getLocalBlock(last));
     getEditLog().logSync();
     return success ;
   }
@@ -1698,9 +1709,10 @@ public class FSNamesystem implements FSC
    * @param blk Block to be marked as corrupt
    * @param dn Datanode which holds the corrupt replica
    */
-  public synchronized void markBlockAsCorrupt(Block blk, DatanodeInfo dn)
+  public synchronized void markBlockAsCorrupt(ExtendedBlock blk, DatanodeInfo dn)
     throws IOException {
-    blockManager.findAndMarkBlockAsCorrupt(blk, dn);
+    checkBlock(blk);
+    blockManager.findAndMarkBlockAsCorrupt(ExtendedBlock.getLocalBlock(blk), dn);
   }
 
 
@@ -2183,7 +2195,7 @@ public class FSNamesystem implements FSC
     checkReplicationFactor(newFile);
   }
 
-  synchronized void commitBlockSynchronization(Block lastblock,
+  synchronized void commitBlockSynchronization(ExtendedBlock lastblock,
       long newgenerationstamp, long newlength,
       boolean closeFile, boolean deleteblock, DatanodeID[] newtargets)
       throws IOException, UnresolvedLinkException {
@@ -2194,7 +2206,8 @@ public class FSNamesystem implements FSC
           + ", closeFile=" + closeFile
           + ", deleteBlock=" + deleteblock
           + ")");
-    final BlockInfo storedBlock = blockManager.getStoredBlock(lastblock);
+    final BlockInfo storedBlock = blockManager.getStoredBlock(ExtendedBlock
+        .getLocalBlock(lastblock));
     if (storedBlock == null) {
       throw new IOException("Block (=" + lastblock + ") not found");
     }
@@ -2216,7 +2229,7 @@ public class FSNamesystem implements FSC
     INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction)iFile;
 
     if (deleteblock) {
-      pendingFile.removeLastBlock(lastblock);
+      pendingFile.removeLastBlock(ExtendedBlock.getLocalBlock(lastblock));
       blockManager.removeBlockFromMap(storedBlock);
     }
     else {
@@ -2867,9 +2880,9 @@ public class FSNamesystem implements FSC
    * The given node is reporting all its blocks.  Use this info to 
    * update the (machine-->blocklist) and (block-->machinelist) tables.
    */
-  public synchronized void processReport(DatanodeID nodeID, 
-                                         BlockListAsLongs newReport
-                                        ) throws IOException {
+  public synchronized void processReport(DatanodeID nodeID, String poolId,
+      BlockListAsLongs newReport) throws IOException {
+    checkPoolId(poolId);
     long startTime = now();
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("BLOCK* NameSystem.processReport: "
@@ -2998,9 +3011,11 @@ public class FSNamesystem implements FSC
    * The given node is reporting that it received a certain block.
    */
   public synchronized void blockReceived(DatanodeID nodeID,  
+                                         String poolId,
                                          Block block,
                                          String delHint
                                          ) throws IOException {
+    checkPoolId(poolId);
     DatanodeDescriptor node = getDatanode(nodeID);
     if (node == null || !node.isAlive) {
       NameNode.stateChangeLog.warn("BLOCK* NameSystem.blockReceived: " + block
@@ -3023,6 +3038,20 @@ public class FSNamesystem implements FSC
     blockManager.addBlock(node, block, delHint);
   }
 
+  private void checkPoolId(String thatPoolId) throws IOException {
+    if (!this.poolId.equals(thatPoolId)) {
+      throw new IOException("PoolId " + thatPoolId
+          + " does not belong to expected pool " + poolId);
+    }
+  }
+
+  private void checkBlock(ExtendedBlock block) throws IOException {
+    if (block != null && !this.poolId.equals(block.getPoolId())) {
+      throw new IOException("Block " + block
+          + " does not belong to expected pool " + poolId);
+    }
+  }
+
   public long getMissingBlocksCount() {
     // not locking
     return blockManager.getMissingBlocksCount();
@@ -4233,15 +4262,15 @@ public class FSNamesystem implements FSC
     return gs;
   }
 
-  private INodeFileUnderConstruction checkUCBlock(Block block, String clientName) 
-  throws IOException {
+  private INodeFileUnderConstruction checkUCBlock(ExtendedBlock block,
+      String clientName) throws IOException {
     // check safe mode
     if (isInSafeMode())
       throw new SafeModeException("Cannot get a new generation stamp and an " +
                                 "access token for block " + block, safeMode);
     
     // check stored block state
-    BlockInfo storedBlock = blockManager.getStoredBlock(block);
+    BlockInfo storedBlock = blockManager.getStoredBlock(ExtendedBlock.getLocalBlock(block));
     if (storedBlock == null || 
         storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) {
         throw new IOException(block + 
@@ -4252,7 +4281,7 @@ public class FSNamesystem implements FSC
     INodeFile file = storedBlock.getINode();
     if (file==null || !file.isUnderConstruction()) {
       throw new IOException("The file " + storedBlock + 
-          " is belonged to does not exist or it is not under construction.");
+          " belonged to does not exist or it is not under construction.");
     }
     
     // check lease
@@ -4277,8 +4306,9 @@ public class FSNamesystem implements FSC
    * @return a located block with a new generation stamp and an access token
    * @throws IOException if any error occurs
    */
-  synchronized LocatedBlock updateBlockForPipeline(Block block, 
+  synchronized LocatedBlock updateBlockForPipeline(ExtendedBlock block, 
       String clientName) throws IOException {
+    checkBlock(block);
     // check vadility of parameters
     checkUCBlock(block, clientName);
     
@@ -4302,9 +4332,12 @@ public class FSNamesystem implements FSC
    * @param newNodes datanodes in the pipeline
    * @throws IOException if any error occurs
    */
-  synchronized void updatePipeline(String clientName, Block oldBlock, 
-      Block newBlock, DatanodeID[] newNodes)
+  synchronized void updatePipeline(String clientName, ExtendedBlock oldBlock,
+      ExtendedBlock newBlock, DatanodeID[] newNodes)
       throws IOException {
+    checkBlock(oldBlock);
+    checkBlock(newBlock);
+    
     assert newBlock.getBlockId()==oldBlock.getBlockId() : newBlock + " and "
     + oldBlock + " has different block identifier";
     LOG.info("updatePipeline(block=" + oldBlock
@@ -4888,4 +4921,8 @@ public class FSNamesystem implements FSC
   private long getDfsUsed(DatanodeDescriptor alivenode) {
     return alivenode.getDfsUsed();
   }
+
+  public String getPoolId() {
+    return poolId;
+  }
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Sep  3 23:38:21 2010
@@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -801,7 +802,7 @@ public class NameNode implements Namenod
   @Override
   public LocatedBlock addBlock(String src,
                                String clientName,
-                               Block previous,
+                               ExtendedBlock previous,
                                DatanodeInfo[] excludedNodes)
       throws IOException {
     if(stateChangeLog.isDebugEnabled()) {
@@ -825,7 +826,7 @@ public class NameNode implements Namenod
   /**
    * The client needs to give up on the block.
    */
-  public void abandonBlock(Block b, String src, String holder)
+  public void abandonBlock(ExtendedBlock b, String src, String holder)
       throws IOException {
     if(stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
@@ -837,7 +838,7 @@ public class NameNode implements Namenod
   }
 
   /** {@inheritDoc} */
-  public boolean complete(String src, String clientName, Block last)
+  public boolean complete(String src, String clientName, ExtendedBlock last)
       throws IOException {
     if(stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*DIR* NameNode.complete: "
@@ -855,7 +856,7 @@ public class NameNode implements Namenod
   public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
     stateChangeLog.info("*DIR* NameNode.reportBadBlocks");
     for (int i = 0; i < blocks.length; i++) {
-      Block blk = blocks[i].getBlock();
+      ExtendedBlock blk = blocks[i].getBlock();
       DatanodeInfo[] nodes = blocks[i].getLocations();
       for (int j = 0; j < nodes.length; j++) {
         DatanodeInfo dn = nodes[j];
@@ -866,21 +867,21 @@ public class NameNode implements Namenod
 
   /** {@inheritDoc} */
   @Override
-  public LocatedBlock updateBlockForPipeline(Block block, String clientName)
+  public LocatedBlock updateBlockForPipeline(ExtendedBlock block, String clientName)
       throws IOException {
     return namesystem.updateBlockForPipeline(block, clientName);
   }
 
 
   @Override
-  public void updatePipeline(String clientName, Block oldBlock,
-      Block newBlock, DatanodeID[] newNodes)
+  public void updatePipeline(String clientName, ExtendedBlock oldBlock,
+      ExtendedBlock newBlock, DatanodeID[] newNodes)
       throws IOException {
     namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes);
   }
   
   /** {@inheritDoc} */
-  public void commitBlockSynchronization(Block block,
+  public void commitBlockSynchronization(ExtendedBlock block,
       long newgenerationstamp, long newlength,
       boolean closeFile, boolean deleteblock, DatanodeID[] newtargets)
       throws IOException {
@@ -1222,6 +1223,7 @@ public class NameNode implements Namenod
   }
 
   public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
+                                     String poolId,
                                      long[] blocks) throws IOException {
     verifyRequest(nodeReg);
     BlockListAsLongs blist = new BlockListAsLongs(blocks);
@@ -1231,13 +1233,14 @@ public class NameNode implements Namenod
            + " blocks");
     }
 
-    namesystem.processReport(nodeReg, blist);
+    namesystem.processReport(nodeReg, poolId, blist);
     if (getFSImage().isUpgradeFinalized())
       return DatanodeCommand.FINALIZE;
     return null;
   }
 
   public void blockReceived(DatanodeRegistration nodeReg, 
+                            String poolId,
                             Block blocks[],
                             String delHints[]) throws IOException {
     verifyRequest(nodeReg);
@@ -1246,7 +1249,7 @@ public class NameNode implements Namenod
           +"from "+nodeReg.getName()+" "+blocks.length+" blocks.");
     }
     for (int i = 0; i < blocks.length; i++) {
-      namesystem.blockReceived(nodeReg, blocks[i], delHints[i]);
+      namesystem.blockReceived(nodeReg, poolId, blocks[i], delHints[i]);
     }
   }
 

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Fri Sep  3 23:38:21 2010
@@ -39,7 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -316,7 +316,7 @@ public class NamenodeFsck {
     StringBuilder report = new StringBuilder();
     int i = 0;
     for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
-      Block block = lBlk.getBlock();
+      ExtendedBlock block = lBlk.getBlock();
       boolean isCorrupt = lBlk.isCorrupt();
       String blkName = block.toString();
       DatanodeInfo[] locs = lBlk.getLocations();
@@ -495,7 +495,7 @@ public class NamenodeFsck {
     TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
     Socket s = null;
     BlockReader blockReader = null; 
-    Block block = lblock.getBlock(); 
+    ExtendedBlock block = lblock.getBlock(); 
 
     while (s == null) {
       DatanodeInfo chosenNode;

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java Fri Sep  3 23:38:21 2010
@@ -25,8 +25,8 @@ import java.util.ArrayList;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
@@ -70,7 +70,7 @@ public class BlockRecoveryCommand extend
     /**
      * Create RecoveringBlock.
      */
-    public RecoveringBlock(Block b, DatanodeInfo[] locs, long newGS) {
+    public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS) {
       super(b, locs, -1, false); // startOffset is unknown
       this.newGenerationStamp = newGS;
     }

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Fri Sep  3 23:38:21 2010
@@ -23,6 +23,7 @@ import java.io.*;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.ipc.VersionedProtocol;
@@ -44,9 +45,9 @@ import org.apache.avro.reflect.Nullable;
 @InterfaceAudience.Private
 public interface DatanodeProtocol extends VersionedProtocol {
   /**
-   * 25: HDFS-1081. Performance optimization on getBlocksLocation()
+   * 26: Add block pool ID to Block
    */
-  public static final long versionID = 25L;
+  public static final long versionID = 26L;
   
   // error code
   final static int NOTIFY = 0;
@@ -101,6 +102,7 @@ public interface DatanodeProtocol extend
    * the locally-stored blocks.  It's invoked upon startup and then
    * infrequently afterwards.
    * @param registration
+   * @param poolId - the block pool ID for the blocks
    * @param blocks - the block list as an array of longs.
    *     Each block is represented as 2 longs.
    *     This is done instead of Block[] to reduce memory used by block reports.
@@ -109,6 +111,7 @@ public interface DatanodeProtocol extend
    * @throws IOException
    */
   public DatanodeCommand blockReport(DatanodeRegistration registration,
+                                     String poolId,
                                      long[] blocks) throws IOException;
     
   /**
@@ -120,6 +123,7 @@ public interface DatanodeProtocol extend
    * this DataNode, it will call blockReceived().
    */
   public void blockReceived(DatanodeRegistration registration,
+                            String poolId,
                             Block blocks[],
                             String[] delHints) throws IOException;
 
@@ -154,7 +158,7 @@ public interface DatanodeProtocol extend
   /**
    * Commit block synchronization in lease recovery
    */
-  public void commitBlockSynchronization(Block block,
+  public void commitBlockSynchronization(ExtendedBlock block,
       long newgenerationstamp, long newlength,
       boolean closeFile, boolean deleteblock, DatanodeID[] newtargets
       ) throws IOException;

Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java Fri Sep  3 23:38:21 2010
@@ -24,7 +24,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
@@ -39,9 +39,9 @@ public interface InterDatanodeProtocol e
   public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
 
   /**
-   * 5: getBlockMetaDataInfo(), updateBlock() removed.
+   * 6: Add block pool ID to Block
    */
-  public static final long versionID = 5L;
+  public static final long versionID = 6L;
 
   /**
    * Initialize a replica recovery.
@@ -55,7 +55,7 @@ public interface InterDatanodeProtocol e
   /**
    * Update replica with the new generation stamp and length.  
    */
-  Block updateReplicaUnderRecovery(Block oldBlock,
+  ExtendedBlock updateReplicaUnderRecovery(ExtendedBlock oldBlock,
                                    long recoveryId,
                                    long newLength) throws IOException;
 }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java Fri Sep  3 23:38:21 2010
@@ -41,7 +41,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.io.IOUtils;
@@ -252,7 +252,7 @@ public class DFSTestUtil {
     files = null;
   }
   
-  public static Block getFirstBlock(FileSystem fs, Path path) throws IOException {
+  public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
     DFSDataInputStream in = 
       (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
     in.readByte();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java Fri Sep  3 23:38:21 2010
@@ -19,21 +19,18 @@ package org.apache.hadoop.hdfs;
 
 import java.io.File;
 import java.io.IOException;
-import java.util.Properties;
 
 import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.BlockMissingException;
@@ -64,7 +61,7 @@ public class TestBlockMissingException e
       // extract block locations from File system. Wait till file is closed.
       LocatedBlocks locations = null;
       locations = fileSys.dfs.getNamenode().getBlockLocations(file1.toString(),
-                                                             0, numBlocks * blockSize);
+          0, numBlocks * blockSize);
       // remove block of file
       LOG.info("Remove first block of file");
       corruptBlock(file1, locations.get(0).getBlock());
@@ -139,7 +136,7 @@ public class TestBlockMissingException e
   //
   // Corrupt specified block of file
   //
-  void corruptBlock(Path file, Block blockNum) throws IOException {
+  void corruptBlock(Path file, ExtendedBlock blockNum) throws IOException {
     long id = blockNum.getBlockId();
 
     // Now deliberately remove/truncate data blocks from the block.

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java Fri Sep  3 23:38:21 2010
@@ -26,8 +26,8 @@ import java.util.List;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.fs.Path;
@@ -82,8 +82,7 @@ public class TestClientBlockVerification
     int offset, int lenToRead) throws IOException {
     InetSocketAddress targetAddr = null;
     Socket s = null;
-    BlockReader blockReader = null;
-    Block block = testBlock.getBlock();
+    ExtendedBlock block = testBlock.getBlock();
     DatanodeInfo[] nodes = testBlock.getLocations();
     targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
     s = new Socket();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java Fri Sep  3 23:38:21 2010
@@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -51,7 +51,7 @@ public class TestClientProtocolForPipeli
       Path file = new Path("dataprotocol.dat");    
       DFSTestUtil.createFile(fileSys, file, 1L, (short)numDataNodes, 0L);
       // get the first blockid for the file
-      Block firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
+      ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
 
       // test getNewStampAndToken on a finalized block
       try {
@@ -64,8 +64,8 @@ public class TestClientProtocolForPipeli
       // test getNewStampAndToken on a non-existent block
       try {
         long newBlockId = firstBlock.getBlockId() + 1;
-        Block newBlock = new Block(newBlockId, 0, 
-            firstBlock.getGenerationStamp());
+        ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getPoolId(),
+            newBlockId, 0, firstBlock.getGenerationStamp());
         namenode.updateBlockForPipeline(newBlock, "");
         Assert.fail("Cannot get a new GS from a non-existent block");
       } catch (IOException e) {

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Fri Sep  3 23:38:21 2010
@@ -42,9 +42,9 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -157,7 +157,7 @@ public class TestDFSClientRetries extend
     };
     when(mockNN.addBlock(anyString(), 
                          anyString(),
-                         any(Block.class),
+                         any(ExtendedBlock.class),
                          any(DatanodeInfo[].class))).thenAnswer(answer);
 
     final DFSClient client = new DFSClient(null, mockNN, conf, null);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Fri Sep  3 23:38:21 2010
@@ -43,10 +43,10 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
@@ -144,7 +144,7 @@ public class TestDataTransferProtocol ex
     in.readFully(arr);
   }
   
-  private void writeZeroLengthPacket(Block block, String description)
+  private void writeZeroLengthPacket(ExtendedBlock block, String description)
   throws IOException {
     sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
     sendOut.writeInt(512);         // checksum size
@@ -163,7 +163,7 @@ public class TestDataTransferProtocol ex
     sendRecvData(description, false);
   }
   
-  private void testWrite(Block block, BlockConstructionStage stage, long newGS,
+  private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
       String description, Boolean eofExcepted) throws IOException {
     sendBuf.reset();
     recvBuf.reset();
@@ -199,7 +199,7 @@ public class TestDataTransferProtocol ex
       Path file = new Path("dataprotocol.dat");    
       DFSTestUtil.createFile(fileSys, file, 1L, (short)numDataNodes, 0L);
       // get the first blockid for the file
-      Block firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
+      ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
       // test PIPELINE_SETUP_CREATE on a finalized block
       testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L,
           "Cannot create an existing block", true);
@@ -237,8 +237,8 @@ public class TestDataTransferProtocol ex
 
       /* Test writing to a new block */
       long newBlockId = firstBlock.getBlockId() + 1;
-      Block newBlock = new Block(newBlockId, 0, 
-          firstBlock.getGenerationStamp());
+      ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getPoolId(),
+          newBlockId, 0, firstBlock.getGenerationStamp());
 
       // test PIPELINE_SETUP_CREATE on a new block
       testWrite(newBlock, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L,
@@ -334,7 +334,7 @@ public class TestDataTransferProtocol ex
     createFile(fileSys, file, fileLen);
 
     // get the first blockid for the file
-    Block firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
+    ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
     long newBlockId = firstBlock.getBlockId() + 1;
 
     recvBuf.reset();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Fri Sep  3 23:38:21 2010
@@ -30,8 +30,8 @@ import java.util.Random;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.fs.FileSystem;
@@ -288,7 +288,7 @@ public class TestDatanodeBlockScanner ex
     fs = cluster.getFileSystem();
     Path file1 = new Path("/tmp/testBlockCorruptRecovery/file");
     DFSTestUtil.createFile(fs, file1, 1024, numReplicas, 0);
-    Block blk = DFSTestUtil.getFirstBlock(fs, file1);
+    ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, file1);
     String block = blk.getBlockName();
     
     dfsClient = new DFSClient(new InetSocketAddress("localhost", 
@@ -325,7 +325,7 @@ public class TestDatanodeBlockScanner ex
 
     // Loop until all corrupt replicas are reported
     int corruptReplicaSize = cluster.getNamesystem().
-                              numCorruptReplicas(blk);
+                              numCorruptReplicas(blk.getLocalBlock());
     while (corruptReplicaSize != numCorruptReplicas) {
       try {
         IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), 
@@ -339,7 +339,7 @@ public class TestDatanodeBlockScanner ex
       } catch (InterruptedException ignore) {
       }
       corruptReplicaSize = cluster.getNamesystem().
-                              numCorruptReplicas(blk);
+                              numCorruptReplicas(blk.getLocalBlock());
     }
     
     // Loop until the block recovers after replication
@@ -360,7 +360,7 @@ public class TestDatanodeBlockScanner ex
     // Make sure the corrupt replica is invalidated and removed from
     // corruptReplicasMap
     corruptReplicaSize = cluster.getNamesystem().
-                          numCorruptReplicas(blk);
+                          numCorruptReplicas(blk.getLocalBlock());
     while (corruptReplicaSize != 0 || replicaCount != numReplicas) {
       try {
         LOG.info("Looping until corrupt replica is invalidated");
@@ -368,7 +368,7 @@ public class TestDatanodeBlockScanner ex
       } catch (InterruptedException ignore) {
       }
       corruptReplicaSize = cluster.getNamesystem().
-                            numCorruptReplicas(blk);
+                            numCorruptReplicas(blk.getLocalBlock());
       blocks = dfsClient.getNamenode().
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java Fri Sep  3 23:38:21 2010
@@ -30,7 +30,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileUtil.HardLink;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -134,8 +134,8 @@ public class TestFileAppend extends Test
       // Create hard links for a few of the blocks
       //
       for (int i = 0; i < blocks.size(); i = i + 2) {
-        Block b = blocks.get(i).getBlock();
-        File f = dataset.getFile(b);
+        ExtendedBlock b = blocks.get(i).getBlock();
+        File f = dataset.getFile(b.getLocalBlock());
         File link = new File(f.toString() + ".link");
         System.out.println("Creating hardlink for File " + f + " to " + link);
         HardLink.createHardLink(f, link);
@@ -145,20 +145,20 @@ public class TestFileAppend extends Test
       // Detach all blocks. This should remove hardlinks (if any)
       //
       for (int i = 0; i < blocks.size(); i++) {
-        Block b = blocks.get(i).getBlock();
+        ExtendedBlock b = blocks.get(i).getBlock();
         System.out.println("testCopyOnWrite detaching block " + b);
         assertTrue("Detaching block " + b + " should have returned true",
-            dataset.unlinkBlock(b, 1));
+            dataset.unlinkBlock(b.getLocalBlock(), 1));
       }
 
       // Since the blocks were already detached earlier, these calls should
       // return false
       //
       for (int i = 0; i < blocks.size(); i++) {
-        Block b = blocks.get(i).getBlock();
+        ExtendedBlock b = blocks.get(i).getBlock();
         System.out.println("testCopyOnWrite detaching block " + b);
         assertTrue("Detaching block " + b + " should have returned false",
-            !dataset.unlinkBlock(b, 1));
+            !dataset.unlinkBlock(b.getLocalBlock(), 1));
       }
 
     } finally {

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java Fri Sep  3 23:38:21 2010
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -191,14 +192,14 @@ public class TestFileAppend3 extends jun
     final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(p.toString(), 0L, len1);
     assertEquals(1, locatedblocks.locatedBlockCount());
     final LocatedBlock lb = locatedblocks.get(0);
-    final Block blk = lb.getBlock();
+    final ExtendedBlock blk = lb.getBlock();
     assertEquals(len1, lb.getBlockSize());
 
     DatanodeInfo[] datanodeinfos = lb.getLocations();
     assertEquals(repl, datanodeinfos.length);
     final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
     final FSDataset data = (FSDataset)dn.getFSDataset();
-    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw");
+    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk.getLocalBlock()), "rw");
     AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
     assertEquals(len1, raf.length());
     raf.setLength(0);
@@ -260,7 +261,7 @@ public class TestFileAppend3 extends jun
     final int numblock = locatedblocks.locatedBlockCount();
     for(int i = 0; i < numblock; i++) {
       final LocatedBlock lb = locatedblocks.get(i);
-      final Block blk = lb.getBlock();
+      final ExtendedBlock blk = lb.getBlock();
       final long size = lb.getBlockSize();
       if (i < numblock - 1) {
         assertEquals(BLOCK_SIZE, size);