You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2010/09/04 01:38:23 UTC

svn commit: r992508 [2/2] - in /hadoop/hdfs/branches/HDFS-1052: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/security/token/block/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/jav...

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java Fri Sep  3 23:38:21 2010
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
@@ -158,7 +159,7 @@ public class TestFileAppend4 {
       // Delay completeFile
       DelayAnswer delayer = new DelayAnswer();
       doAnswer(delayer).when(spyNN).complete(
-          anyString(), anyString(), (Block)anyObject());
+          anyString(), anyString(), (ExtendedBlock)anyObject());
  
       DFSClient client = new DFSClient(null, spyNN, conf, null);
       file1 = new Path("/testRecoverFinalized");
@@ -228,7 +229,8 @@ public class TestFileAppend4 {
  
       // Delay completeFile
       DelayAnswer delayer = new DelayAnswer();
-      doAnswer(delayer).when(spyNN).complete(anyString(), anyString(), (Block)anyObject());
+      doAnswer(delayer).when(spyNN).complete(anyString(), anyString(),
+          (ExtendedBlock) anyObject());
  
       DFSClient client = new DFSClient(null, spyNN, conf, null);
       file1 = new Path("/testCompleteOtherLease");

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java Fri Sep  3 23:38:21 2010
@@ -38,9 +38,9 @@ import org.apache.hadoop.fs.ChecksumExce
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -131,7 +131,7 @@ public class TestFileCorruption extends 
       // get the block
       File dataDir = new File(cluster.getDataDirectory(),
           "data1" + MiniDFSCluster.FINALIZED_DIR_NAME);
-      Block blk = getBlock(dataDir);
+      ExtendedBlock blk = getBlock(dataDir);
       if (blk == null) {
         blk = getBlock(new File(cluster.getDataDirectory(),
           "dfs/data/data2" + MiniDFSCluster.FINALIZED_DIR_NAME));
@@ -159,7 +159,7 @@ public class TestFileCorruption extends 
     
   }
   
-  private Block getBlock(File dataDir) {
+  private ExtendedBlock getBlock(File dataDir) {
     assertTrue("data directory does not exist", dataDir.exists());
     File[] blocks = dataDir.listFiles();
     assertTrue("Blocks do not exist in dataDir", (blocks != null) && (blocks.length > 0));
@@ -186,7 +186,8 @@ public class TestFileCorruption extends 
         break;
       }
     }
-    return new Block(blockId, blocks[idx].length(), blockTimeStamp);
+    // TODO:FEDERATION cleanup when BlockPoolID support in Datanode is complete
+    return new ExtendedBlock("TODO", blockId, blocks[idx].length(), blockTimeStamp);
   }
 
   /** check if ClientProtocol.getCorruptFiles() returns a file that has missing blocks */

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java Fri Sep  3 23:38:21 2010
@@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -34,7 +35,7 @@ public class TestLeaseRecovery extends j
   static final short REPLICATION_NUM = (short)3;
   private static final long LEASE_PERIOD = 300L;
 
-  static void checkMetaInfo(Block b, DataNode dn
+  static void checkMetaInfo(ExtendedBlock b, DataNode dn
       ) throws IOException {
     TestInterDatanodeProtocol.checkMetaInfo(b, dn);
   }
@@ -96,7 +97,7 @@ public class TestLeaseRecovery extends j
       }
       
       //verify Block Info
-      Block lastblock = locatedblock.getBlock();
+      ExtendedBlock lastblock = locatedblock.getBlock();
       DataNode.LOG.info("newblocks=" + lastblock);
       for(int i = 0; i < REPLICATION_NUM; i++) {
         checkMetaInfo(lastblock, datanodes[i]);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java Fri Sep  3 23:38:21 2010
@@ -66,8 +66,8 @@ public class TestMissingBlocksAlert exte
 
 
       // Corrupt the block
-      String block = DFSTestUtil.getFirstBlock(dfs, corruptFile).getBlockName();
-      TestDatanodeBlockScanner.corruptReplica(block, 0);
+      String block = DFSTestUtil.getFirstBlock(dfs, corruptFile).getLocalBlock().getBlockName();
+      assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0));
 
       // read the file so that the corrupt block is reported to NN
       FSDataInputStream in = dfs.open(corruptFile); 

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Fri Sep  3 23:38:21 2010
@@ -32,6 +32,7 @@ import org.apache.commons.logging.impl.L
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.io.TestWritable;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.RPC;
@@ -81,9 +82,9 @@ public class TestBlockToken {
 
   long blockKeyUpdateInterval = 10 * 60 * 1000; // 10 mins
   long blockTokenLifetime = 2 * 60 * 1000; // 2 mins
-  Block block1 = new Block(0L);
-  Block block2 = new Block(10L);
-  Block block3 = new Block(-108L);
+  ExtendedBlock block1 = new ExtendedBlock("0", 0L);
+  ExtendedBlock block2 = new ExtendedBlock("10", 10L);
+  ExtendedBlock block3 = new ExtendedBlock("-10", -108L);
 
   private static class getLengthAnswer implements Answer<Long> {
     BlockTokenSecretManager sm;
@@ -99,7 +100,7 @@ public class TestBlockToken {
     public Long answer(InvocationOnMock invocation) throws IOException {
       Object args[] = invocation.getArguments();
       assertEquals(1, args.length);
-      Block block = (Block) args[0];
+      ExtendedBlock block = (ExtendedBlock) args[0];
       Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
           .getTokenIdentifiers();
       assertEquals("Only one BlockTokenIdentifier expected", 1, tokenIds.size());
@@ -117,7 +118,7 @@ public class TestBlockToken {
   }
 
   private BlockTokenIdentifier generateTokenId(BlockTokenSecretManager sm,
-      Block block, EnumSet<BlockTokenSecretManager.AccessMode> accessModes)
+      ExtendedBlock block, EnumSet<BlockTokenSecretManager.AccessMode> accessModes)
       throws IOException {
     Token<BlockTokenIdentifier> token = sm.generateToken(block, accessModes);
     BlockTokenIdentifier id = sm.createIdentifier();
@@ -197,7 +198,7 @@ public class TestBlockToken {
     id.readFields(new DataInputStream(new ByteArrayInputStream(token
         .getIdentifier())));
     doAnswer(new getLengthAnswer(sm, id)).when(mockDN).getReplicaVisibleLength(
-        any(Block.class));
+        any(ExtendedBlock.class));
 
     final Server server = RPC.getServer(ClientDatanodeProtocol.class, mockDN,
         ADDRESS, 0, 5, true, conf, sm);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Fri Sep  3 23:38:21 2010
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
@@ -82,7 +83,8 @@ public class TestBalancer extends TestCa
   /* fill up a cluster with <code>numNodes</code> datanodes 
    * whose used space to be <code>size</code>
    */
-  private Block[] generateBlocks(Configuration conf, long size, short numNodes) throws IOException {
+  private ExtendedBlock[] generateBlocks(Configuration conf, long size,
+      short numNodes) throws IOException {
     cluster = new MiniDFSCluster( conf, numNodes, true, null);
     try {
       cluster.waitActive();
@@ -96,10 +98,11 @@ public class TestBalancer extends TestCa
       getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
 
       int numOfBlocks = locatedBlocks.size();
-      Block[] blocks = new Block[numOfBlocks];
+      ExtendedBlock[] blocks = new ExtendedBlock[numOfBlocks];
       for(int i=0; i<numOfBlocks; i++) {
-        Block b = locatedBlocks.get(i).getBlock();
-        blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
+        ExtendedBlock b = locatedBlocks.get(i).getBlock();
+        blocks[i] = new ExtendedBlock(b.getPoolId(), b.getBlockId(), b
+            .getNumBytes(), b.getGenerationStamp());
       }
 
       return blocks;
@@ -109,7 +112,7 @@ public class TestBalancer extends TestCa
   }
 
   /* Distribute all blocks according to the given distribution */
-  Block[][] distributeBlocks(Block[] blocks, short replicationFactor, 
+  Block[][] distributeBlocks(ExtendedBlock[] blocks, short replicationFactor, 
       final long[] distribution ) {
     // make a copy
     long[] usedSpace = new long[distribution.length];
@@ -128,7 +131,7 @@ public class TestBalancer extends TestCa
           int chosenIndex = r.nextInt(usedSpace.length);
           if( usedSpace[chosenIndex]>0 ) {
             notChosen = false;
-            blockReports.get(chosenIndex).add(blocks[i]);
+            blockReports.get(chosenIndex).add(blocks[i].getLocalBlock());
             usedSpace[chosenIndex] -= blocks[i].getNumBytes();
           }
         }
@@ -159,7 +162,8 @@ public class TestBalancer extends TestCa
     }
 
     // fill the cluster
-    Block[] blocks = generateBlocks(conf, totalUsedSpace, (short)numDatanodes);
+    ExtendedBlock[] blocks = generateBlocks(conf, totalUsedSpace,
+        (short) numDatanodes);
 
     // redistribute blocks
     Block[][] blocksDN = distributeBlocks(

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Fri Sep  3 23:38:21 2010
@@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
@@ -793,8 +794,8 @@ public class SimulatedFSDataset  impleme
   @Override
   public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
   throws IOException {
-    Block b = rBlock.getBlock();
-    BInfo binfo = blockMap.get(b);
+    ExtendedBlock b = rBlock.getBlock();
+    BInfo binfo = blockMap.get(b.getLocalBlock());
     if (binfo == null) {
       throw new IOException("No such Block " + b );  
     }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Fri Sep  3 23:38:21 2010
@@ -41,9 +41,9 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
@@ -118,7 +118,7 @@ public class TestBlockReplacement extend
       LocatedBlock block = locatedBlocks.get(0);
       DatanodeInfo[]  oldNodes = block.getLocations();
       assertEquals(oldNodes.length, 3);
-      Block b = block.getBlock();
+      ExtendedBlock b = block.getBlock();
       
       // add a new datanode to the cluster
       cluster.startDataNodes(CONF, 1, true, null, NEW_RACKS);
@@ -222,7 +222,7 @@ public class TestBlockReplacement extend
    * 
    * Return true if a block is successfully copied; otherwise false.
    */
-  private boolean replaceBlock( Block block, DatanodeInfo source,
+  private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
       DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
     Socket sock = new Socket();
     sock.connect(NetUtils.createSocketAddr(

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Fri Sep  3 23:38:21 2010
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -132,6 +133,7 @@ public class TestBlockReport {
     }
     cluster.getNameNode().blockReport(
       cluster.getDataNodes().get(DN_N0).dnRegistration,
+      cluster.getNamesystem().getPoolId(),
       new BlockListAsLongs(blocks, null).getBlockListAsLongs());
 
     List<LocatedBlock> blocksAfterReport =
@@ -143,7 +145,7 @@ public class TestBlockReport {
     }
 
     for (int i = 0; i < blocksAfterReport.size(); i++) {
-      Block b = blocksAfterReport.get(i).getBlock();
+      ExtendedBlock b = blocksAfterReport.get(i).getBlock();
       assertEquals("Length of " + i + "th block is incorrect",
         oldLengths[i], b.getNumBytes());
     }
@@ -184,7 +186,7 @@ public class TestBlockReport {
     }
 
     for (Integer aRemovedIndex : removedIndex) {
-      blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock());
+      blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock().getLocalBlock());
     }
     ArrayList<Block> blocks = locatedToBlocks(lBlocks, removedIndex);
 
@@ -208,6 +210,7 @@ public class TestBlockReport {
 
     cluster.getNameNode().blockReport(
       cluster.getDataNodes().get(DN_N0).dnRegistration,
+      cluster.getNamesystem().getPoolId(),
       new BlockListAsLongs(blocks, null).getBlockListAsLongs());
 
     cluster.getNamesystem().computeDatanodeWork();
@@ -244,6 +247,7 @@ public class TestBlockReport {
     DatanodeCommand dnCmd =
       cluster.getNameNode().blockReport(
         cluster.getDataNodes().get(DN_N0).dnRegistration,
+        cluster.getNamesystem().getPoolId(),
         new BlockListAsLongs(blocks, null).getBlockListAsLongs());
     if(LOG.isDebugEnabled()) {
       LOG.debug("Got the command: " + dnCmd);
@@ -293,6 +297,7 @@ public class TestBlockReport {
 
     cluster.getNameNode().blockReport(
       cluster.getDataNodes().get(DN_N1).dnRegistration,
+      cluster.getNamesystem().getPoolId(),
       new BlockListAsLongs(blocks, null).getBlockListAsLongs());
     printStats();
     assertEquals("Wrong number of PendingReplication Blocks",
@@ -340,6 +345,7 @@ public class TestBlockReport {
     }
     cluster.getNameNode().blockReport(
       cluster.getDataNodes().get(DN_N1).dnRegistration,
+      cluster.getNamesystem().getPoolId(),
       new BlockListAsLongs(blocks, null).getBlockListAsLongs());
     printStats();
     assertEquals("Wrong number of Corrupted blocks",
@@ -362,6 +368,7 @@ public class TestBlockReport {
     }
     cluster.getNameNode().blockReport(
       cluster.getDataNodes().get(DN_N1).dnRegistration,
+      cluster.getNamesystem().getPoolId(),
       new BlockListAsLongs(blocks, null).getBlockListAsLongs());
     printStats();
 
@@ -409,6 +416,7 @@ public class TestBlockReport {
 
       cluster.getNameNode().blockReport(
         cluster.getDataNodes().get(DN_N1).dnRegistration,
+        cluster.getNamesystem().getPoolId(),
         new BlockListAsLongs(blocks, null).getBlockListAsLongs());
       printStats();
       assertEquals("Wrong number of PendingReplication blocks",
@@ -452,6 +460,7 @@ public class TestBlockReport {
                                                 
       cluster.getNameNode().blockReport(
         cluster.getDataNodes().get(DN_N1).dnRegistration,
+        cluster.getNamesystem().getPoolId(),
         new BlockListAsLongs(blocks, null).getBlockListAsLongs());
       printStats();
       assertEquals("Wrong number of PendingReplication blocks",
@@ -593,7 +602,7 @@ public class TestBlockReport {
         }
         continue;
       }
-      newList.add(new Block(locatedBlks.get(i).getBlock()));
+      newList.add(new Block(locatedBlks.get(i).getBlock().getLocalBlock()));
     }
     return newList;
   }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Fri Sep  3 23:38:21 2010
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -119,7 +120,8 @@ public class TestDataNodeVolumeFailure e
     // make sure a block report is sent 
     DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
     long[] bReport = dn.getFSDataset().getBlockReport().getBlockListAsLongs();
-    cluster.getNameNode().blockReport(dn.dnRegistration, bReport);
+    String poolId = cluster.getNamesystem().getPoolId();
+    cluster.getNameNode().blockReport(dn.dnRegistration, poolId, bReport);
 
     // verify number of blocks and files...
     verify(filename, filesize);
@@ -197,7 +199,7 @@ public class TestDataNodeVolumeFailure e
     
     for(LocatedBlock lb : locatedBlocks) {
       DatanodeInfo dinfo = lb.getLocations()[1];
-      Block b = lb.getBlock();
+      ExtendedBlock b = lb.getBlock();
     //  System.out.println(i++ + ". " + b.getBlockName());
       try {
         accessBlock(dinfo, lb);
@@ -238,8 +240,7 @@ public class TestDataNodeVolumeFailure e
     throws IOException {
     InetSocketAddress targetAddr = null;
     Socket s = null;
-    BlockReader blockReader = null; 
-    Block block = lblock.getBlock(); 
+    ExtendedBlock block = lblock.getBlock(); 
    
     targetAddr = NetUtils.createSocketAddr(datanode.getName());
       
@@ -247,7 +248,7 @@ public class TestDataNodeVolumeFailure e
     s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
     s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
 
-    blockReader = 
+    BlockReader blockReader = 
       BlockReader.newBlockReader(s, targetAddr.toString() + ":" + 
           block.getBlockId(), 
           block.getBlockId(), 

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Fri Sep  3 23:38:21 2010
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
@@ -46,7 +47,7 @@ import org.junit.Test;
  * This tests InterDataNodeProtocol for block handling. 
  */
 public class TestInterDatanodeProtocol {
-  public static void checkMetaInfo(Block b, DataNode dn) throws IOException {
+  public static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException {
     Block metainfo = dn.data.getStoredBlock(b.getBlockId());
     Assert.assertEquals(b.getBlockId(), metainfo.getBlockId());
     Assert.assertEquals(b.getNumBytes(), metainfo.getNumBytes());
@@ -100,7 +101,7 @@ public class TestInterDatanodeProtocol {
       datanode.blockScannerThread.interrupt();
 
       //verify BlockMetaDataInfo
-      Block b = locatedblock.getBlock();
+      ExtendedBlock b = locatedblock.getBlock();
       InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
       checkMetaInfo(b, datanode);
       long recoveryId = b.getGenerationStamp() + 1;
@@ -108,7 +109,7 @@ public class TestInterDatanodeProtocol {
           new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));
 
       //verify updateBlock
-      Block newblock = new Block(
+      ExtendedBlock newblock = new ExtendedBlock(b.getPoolId(),
           b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
       idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
       checkMetaInfo(newblock, datanode);
@@ -208,7 +209,7 @@ public class TestInterDatanodeProtocol {
     }
   }
 
-  /** Test {@link FSDataset#updateReplicaUnderRecovery(ReplicaUnderRecovery, long, long)} */
+  /** Test {@link FSDataset#updateReplicaUnderRecovery(Block, long, long)} */
   @Test
   public void testUpdateReplicaUnderRecovery() throws IOException {
     final Configuration conf = new HdfsConfiguration();
@@ -237,7 +238,7 @@ public class TestInterDatanodeProtocol {
       final FSDataset fsdataset = (FSDataset)datanode.data;
 
       //initReplicaRecovery
-      final Block b = locatedblock.getBlock();
+      final ExtendedBlock b = locatedblock.getBlock();
       final long recoveryid = b.getGenerationStamp() + 1;
       final long newlength = b.getNumBytes() - 1;
       final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Fri Sep  3 23:38:21 2010
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -73,7 +74,7 @@ import org.apache.log4j.LogManager;
  * <li>-logLevel L specifies the logging level when the benchmark runs.
  * The default logging level is {@link Level#ERROR}.</li>
  * <li>-UGCacheRefreshCount G will cause the benchmark to call
- * {@link NameNode#refreshUserToGroupsMappings(Configuration)} after
+ * {@link NameNode#refreshUserToGroupsMappings()} after
  * every G operations, which purges the name-node's user group cache.
  * By default the refresh is never called.</li>
  * </ol>
@@ -842,6 +843,7 @@ public class NNThroughputBenchmark {
                           new DataStorage(nsInfo, dnInfo.getStorageID()));
           receivedDNReg.setInfoPort(dnInfo.getInfoPort());
           nameNode.blockReceived( receivedDNReg, 
+                                  nameNode.getNamesystem().getPoolId(),
                                   new Block[] {blocks[i]},
                                   new String[] {DataNode.EMPTY_DEL_HINT});
         }
@@ -937,7 +939,7 @@ public class NNThroughputBenchmark {
         nameNode.create(fileName, FsPermission.getDefault(), clientName,
             new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.OVERWRITE)), true, replication,
             BLOCK_SIZE);
-        Block lastBlock = addBlocks(fileName, clientName);
+        ExtendedBlock lastBlock = addBlocks(fileName, clientName);
         nameNode.complete(fileName, clientName, lastBlock);
       }
       // prepare block reports
@@ -946,18 +948,19 @@ public class NNThroughputBenchmark {
       }
     }
 
-    private Block addBlocks(String fileName, String clientName)
+    private ExtendedBlock addBlocks(String fileName, String clientName)
     throws IOException {
-      Block prevBlock = null;
+      ExtendedBlock prevBlock = null;
       for(int jdx = 0; jdx < blocksPerFile; jdx++) {
         LocatedBlock loc = nameNode.addBlock(fileName, clientName, prevBlock, null);
         prevBlock = loc.getBlock();
         for(DatanodeInfo dnInfo : loc.getLocations()) {
           int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName());
-          datanodes[dnIdx].addBlock(loc.getBlock());
+          datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
           nameNode.blockReceived(
               datanodes[dnIdx].dnRegistration, 
-              new Block[] {loc.getBlock()},
+              loc.getBlock().getPoolId(),
+              new Block[] {loc.getBlock().getLocalBlock()},
               new String[] {""});
         }
       }
@@ -975,7 +978,8 @@ public class NNThroughputBenchmark {
       assert daemonId < numThreads : "Wrong daemonId.";
       TinyDatanode dn = datanodes[daemonId];
       long start = System.currentTimeMillis();
-      nameNode.blockReport(dn.dnRegistration, dn.getBlockReportList());
+      nameNode.blockReport(dn.dnRegistration, nameNode.getNamesystem()
+          .getPoolId(), dn.getBlockReportList());
       long end = System.currentTimeMillis();
       return end-start;
     }

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java Fri Sep  3 23:38:21 2010
@@ -31,8 +31,8 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.token.block.*;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
@@ -122,7 +122,7 @@ public class TestBlockTokenWithDFS exten
     InetSocketAddress targetAddr = null;
     Socket s = null;
     BlockReader blockReader = null;
-    Block block = lblock.getBlock();
+    ExtendedBlock block = lblock.getBlock();
     try {
       DatanodeInfo[] nodes = lblock.getLocations();
       targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
@@ -352,7 +352,8 @@ public class TestBlockTokenWithDFS exten
       // read should succeed
       tryRead(conf, lblock, true);
       // use a token with wrong blockID
-      Block wrongBlock = new Block(lblock.getBlock().getBlockId() + 1);
+      ExtendedBlock wrongBlock = new ExtendedBlock(lblock.getBlock()
+          .getPoolId(), lblock.getBlock().getBlockId() + 1);
       lblock.setBlockToken(cluster.getNameNode().getNamesystem()
           .blockTokenSecretManager.generateToken(wrongBlock,
               EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java Fri Sep  3 23:38:21 2010
@@ -163,7 +163,7 @@ public class TestBlockUnderConstruction 
       final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
       final List<LocatedBlock> blocks = lb.getLocatedBlocks();
       assertEquals(i, blocks.size());
-      final Block b = blocks.get(blocks.size() - 1).getBlock();
+      final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
       assertTrue(b instanceof BlockInfoUnderConstruction);
 
       if (++i < NUM_BLOCKS) {

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java Fri Sep  3 23:38:21 2010
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -64,10 +65,10 @@ public class TestBlocksWithNotEnoughRack
       DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
       DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
       
-      Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
       final FSNamesystem namesystem = cluster.getNamesystem();
-      int numRacks = namesystem.blockManager.getNumberOfRacks(b);
-      NumberReplicas number = namesystem.blockManager.countNodes(b);
+      int numRacks = namesystem.blockManager.getNumberOfRacks(b.getLocalBlock());
+      NumberReplicas number = namesystem.blockManager.countNodes(b.getLocalBlock());
       int curReplicas = number.liveReplicas();
       int neededReplicationSize = 
                            namesystem.blockManager.neededReplications.size();
@@ -80,8 +81,8 @@ public class TestBlocksWithNotEnoughRack
               (neededReplicationSize > 0) ) {
         LOG.info("Waiting for replication");
         Thread.sleep(600);
-        numRacks = namesystem.blockManager.getNumberOfRacks(b);
-        number = namesystem.blockManager.countNodes(b);
+        numRacks = namesystem.blockManager.getNumberOfRacks(b.getLocalBlock());
+        number = namesystem.blockManager.countNodes(b.getLocalBlock());
         curReplicas = number.liveReplicas();
         neededReplicationSize = 
                            namesystem.blockManager.neededReplications.size();
@@ -118,10 +119,10 @@ public class TestBlocksWithNotEnoughRack
       DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
       DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
       
-      Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
       final FSNamesystem namesystem = cluster.getNamesystem();
-      int numRacks = namesystem.blockManager.getNumberOfRacks(b);
-      NumberReplicas number = namesystem.blockManager.countNodes(b);
+      int numRacks = namesystem.blockManager.getNumberOfRacks(b.getLocalBlock());
+      NumberReplicas number = namesystem.blockManager.countNodes(b.getLocalBlock());
       int curReplicas = number.liveReplicas();
       int neededReplicationSize = 
                            namesystem.blockManager.neededReplications.size();
@@ -136,8 +137,8 @@ public class TestBlocksWithNotEnoughRack
               (neededReplicationSize > 0) ) {
         LOG.info("Waiting for replication");
         Thread.sleep(600);
-        numRacks = namesystem.blockManager.getNumberOfRacks(b);
-        number = namesystem.blockManager.countNodes(b);
+        numRacks = namesystem.blockManager.getNumberOfRacks(b.getLocalBlock());
+        number = namesystem.blockManager.countNodes(b.getLocalBlock());
         curReplicas = number.liveReplicas();
         neededReplicationSize = 
                            namesystem.blockManager.neededReplications.size();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Fri Sep  3 23:38:21 2010
@@ -97,13 +97,13 @@ public class TestDeadDatanode {
     waitForDatanodeState(reg.getStorageID(), false, 20000);
 
     DatanodeProtocol dnp = cluster.getNameNode();
-    Block block = new Block(0);
-    Block[] blocks = new Block[] { block };
+    String poolId = cluster.getNamesystem().getPoolId();
+    Block[] blocks = new Block[] { new Block(0) };
     String[] delHints = new String[] { "" };
     
     // Ensure blockReceived call from dead datanode is rejected with IOException
     try {
-      dnp.blockReceived(reg, blocks, delHints);
+      dnp.blockReceived(reg, poolId, blocks, delHints);
       Assert.fail("Expected IOException is not thrown");
     } catch (IOException ex) {
       // Expected
@@ -112,7 +112,7 @@ public class TestDeadDatanode {
     // Ensure blockReport from dead datanode is rejected with IOException
     long[] blockReport = new long[] { 0L, 0L, 0L };
     try {
-      dnp.blockReport(reg, blockReport);
+      dnp.blockReport(reg, poolId, blockReport);
       Assert.fail("Expected IOException is not thrown");
     } catch (IOException ex) {
       // Expected

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java Fri Sep  3 23:38:21 2010
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
 
 import junit.framework.TestCase;
@@ -51,7 +52,7 @@ public class TestNodeCount extends TestC
       final Path FILE_PATH = new Path("/testfile");
       DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
       DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
-      Block block = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
 
       // keep a copy of all datanode descriptor
       DatanodeDescriptor[] datanodes = 
@@ -80,12 +81,13 @@ public class TestNodeCount extends TestC
       NumberReplicas num = null;
       do {
        synchronized (namesystem) {
-         num = namesystem.blockManager.countNodes(block);
+         num = namesystem.blockManager.countNodes(block.getLocalBlock());
        }
       } while (num.excessReplicas() == 0);
       
       // find out a non-excess node
-      Iterator<DatanodeDescriptor> iter = namesystem.blockManager.blocksMap.nodeIterator(block);
+      Iterator<DatanodeDescriptor> iter = namesystem.blockManager.blocksMap
+          .nodeIterator(block.getLocalBlock());
       DatanodeDescriptor nonExcessDN = null;
       while (iter.hasNext()) {
         DatanodeDescriptor dn = iter.next();
@@ -107,7 +109,7 @@ public class TestNodeCount extends TestC
       
       // The block should be replicated
       do {
-        num = namesystem.blockManager.countNodes(block);
+        num = namesystem.blockManager.countNodes(block.getLocalBlock());
       } while (num.liveReplicas() != REPLICATION_FACTOR);
       
       // restart the first datanode
@@ -116,7 +118,7 @@ public class TestNodeCount extends TestC
       
       // check if excessive replica is detected
       do {
-       num = namesystem.blockManager.countNodes(block);
+       num = namesystem.blockManager.countNodes(block.getLocalBlock());
       } while (num.excessReplicas() != 2);
     } finally {
       cluster.shutdown();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java Fri Sep  3 23:38:21 2010
@@ -28,8 +28,8 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 import junit.framework.TestCase;
@@ -53,8 +53,9 @@ public class TestOverReplicatedBlocks ex
       DFSTestUtil.waitReplication(fs, fileName, (short)3);
       
       // corrupt the block on datanode 0
-      Block block = DFSTestUtil.getFirstBlock(fs, fileName);
-      TestDatanodeBlockScanner.corruptReplica(block.getBlockName(), 0);
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+      TestDatanodeBlockScanner.corruptReplica(block.getLocalBlock()
+          .getBlockName(), 0);
       DataNodeProperties dnProps = cluster.stopDataNode(0);
       // remove block scanner log to trigger block scanning
       File scanLog = new File(System.getProperty("test.build.data"),
@@ -89,7 +90,8 @@ public class TestOverReplicatedBlocks ex
 
         // corrupt one won't be chosen to be excess one
         // without 4910 the number of live replicas would be 0: block gets lost
-        assertEquals(1, namesystem.blockManager.countNodes(block).liveReplicas());
+        assertEquals(1, namesystem.blockManager.countNodes(block.getLocalBlock())
+            .liveReplicas());
       }
     } finally {
       cluster.shutdown();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java Fri Sep  3 23:38:21 2010
@@ -24,7 +24,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 
 import junit.framework.TestCase;
 
@@ -44,10 +44,10 @@ public class TestUnderReplicatedBlocks e
       // remove one replica from the blocksMap so block becomes under-replicated
       // but the block does not get put into the under-replicated blocks queue
       final FSNamesystem namesystem = cluster.getNamesystem();
-      Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
-      DatanodeDescriptor dn = namesystem.blockManager.blocksMap.nodeIterator(b).next();
-      namesystem.blockManager.addToInvalidates(b, dn);
-      namesystem.blockManager.blocksMap.removeNode(b, dn);
+      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
+      DatanodeDescriptor dn = namesystem.blockManager.blocksMap.nodeIterator(b.getLocalBlock()).next();
+      namesystem.blockManager.addToInvalidates(b.getLocalBlock(), dn);
+      namesystem.blockManager.blocksMap.removeNode(b.getLocalBlock(), dn);
       
       // increment this file's replication factor
       FsShell shell = new FsShell(conf);

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Fri Sep  3 23:38:21 2010
@@ -27,9 +27,9 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -70,12 +70,15 @@ public class TestBlockRecovery {
   private DataNode dn;
   private Configuration conf;
   private final static long RECOVERY_ID = 3000L;
+  // TODO:FEDERATION fix pool ID
+  private final static String POOL_ID = "TODO";
   private final static long BLOCK_ID = 1000L;
   private final static long GEN_STAMP = 2000L;
   private final static long BLOCK_LEN = 3000L;
   private final static long REPLICA_LEN1 = 6000L;
   private final static long REPLICA_LEN2 = 5000L;
-  private final static Block block = new Block(BLOCK_ID, BLOCK_LEN, GEN_STAMP);
+  private final static ExtendedBlock block = new ExtendedBlock(POOL_ID,
+      BLOCK_ID, BLOCK_LEN, GEN_STAMP);
 
   static {
     ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
@@ -447,7 +450,7 @@ public class TestBlockRecovery {
     if(LOG.isDebugEnabled()) {
       LOG.debug("Running " + GenericTestUtils.getMethodName());
     }
-    dn.data.createRbw(block);
+    dn.data.createRbw(block.getLocalBlock());
     try {
       dn.syncBlock(rBlock, initBlockRecords(dn));
       fail("Sync should fail");
@@ -455,7 +458,7 @@ public class TestBlockRecovery {
       e.getMessage().startsWith("Cannot recover ");
     }
     verify(dn.namenode, never()).commitBlockSynchronization(
-        any(Block.class), anyLong(), anyLong(), anyBoolean(),
+        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
         anyBoolean(), any(DatanodeID[].class));
   }
 
@@ -469,7 +472,7 @@ public class TestBlockRecovery {
     if(LOG.isDebugEnabled()) {
       LOG.debug("Running " + GenericTestUtils.getMethodName());
     }
-    ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
+    ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block.getLocalBlock());
     BlockWriteStreams streams = null;
     try {
       streams = replicaInfo.createStreams(true, 0, 0);
@@ -482,7 +485,7 @@ public class TestBlockRecovery {
         e.getMessage().startsWith("Cannot recover ");
       }
       verify(dn.namenode, never()).commitBlockSynchronization(
-          any(Block.class), anyLong(), anyLong(), anyBoolean(),
+          any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
           anyBoolean(), any(DatanodeID[].class));
     } finally {
       streams.close();

Modified: hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java?rev=992508&r1=992507&r2=992508&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java Fri Sep  3 23:38:21 2010
@@ -265,7 +265,7 @@ public class TestNNLeaseRecovery {
     
     BlockInfo lastBlock = fsn.dir.getFileINode(anyString()).getLastBlock(); 
     try {
-      fsn.commitBlockSynchronization(lastBlock,
+      fsn.commitBlockSynchronization(fsn.getExtendedBlock(lastBlock),
         recoveryId, newSize, true, false, new DatanodeID[1]);
     } catch (IOException ioe) {
       assertTrue(ioe.getMessage().startsWith("Block (="));
@@ -293,7 +293,7 @@ public class TestNNLeaseRecovery {
     when(lastBlock.isComplete()).thenReturn(true);
     
     try {
-      fsn.commitBlockSynchronization(lastBlock,
+      fsn.commitBlockSynchronization(fsn.getExtendedBlock(lastBlock),
         recoveryId, newSize, true, false, new DatanodeID[1]);
     } catch (IOException ioe) {
       assertTrue(ioe.getMessage().startsWith("Unexpected block (="));
@@ -321,7 +321,7 @@ public class TestNNLeaseRecovery {
     when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId-100);
     
     try {
-      fsn.commitBlockSynchronization(lastBlock,
+      fsn.commitBlockSynchronization(fsn.getExtendedBlock(lastBlock),
         recoveryId, newSize, true, false, new DatanodeID[1]);
     } catch (IOException ioe) {
       assertTrue(ioe.getMessage().startsWith("The recovery id " + recoveryId + " does not match current recovery id " + (recoveryId-100)));
@@ -349,7 +349,7 @@ public class TestNNLeaseRecovery {
     when(((BlockInfoUnderConstruction)lastBlock).getBlockRecoveryId()).thenReturn(recoveryId+100);
     
     try {           
-      fsn.commitBlockSynchronization(lastBlock,
+      fsn.commitBlockSynchronization(fsn.getExtendedBlock(lastBlock),
         recoveryId, newSize, true, false, new DatanodeID[1]);
     } catch (IOException ioe) {
       assertTrue(ioe.getMessage().startsWith("The recovery id " + recoveryId + " does not match current recovery id " + (recoveryId+100)));
@@ -378,7 +378,7 @@ public class TestNNLeaseRecovery {
     
     boolean recoveryChecked = false;
     try {
-      fsn.commitBlockSynchronization(lastBlock,
+      fsn.commitBlockSynchronization(fsn.getExtendedBlock(lastBlock),
         recoveryId, newSize, true, false, new DatanodeID[1]);
     } catch (NullPointerException ioe) {
       // It is fine to get NPE here because the datanodes array is empty