You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/02/10 02:49:24 UTC

svn commit: r1242635 [4/5] - in /hadoop/common/branches/HDFS-1623/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs/ hadoop-hdfs/src/main/bin/ hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/ hadoop-hdfs/src/main/java/ hadoop-hdfs/src/main...

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto Fri Feb 10 01:49:08 2012
@@ -104,7 +104,7 @@ service ClientDatanodeProtocolService {
    * Refresh the list of federated namenodes from updated configuration.
    * Adds new namenodes and stops the deleted namenodes.
    */
-  rpc refreshNamenode(RefreshNamenodesRequestProto)
+  rpc refreshNamenodes(RefreshNamenodesRequestProto)
       returns(RefreshNamenodesResponseProto);
 
   /**

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Fri Feb 10 01:49:08 2012
@@ -36,6 +36,19 @@ message DatanodeRegistrationProto {
 }
 
 /**
+ * Represents a storage available on the datanode
+ */
+message DatanodeStorageProto {
+  enum StorageState {
+    NORMAL = 0;
+    READ_ONLY = 1;
+  }
+
+  required string storageID = 1;    // Unique identifier for the storage
+  optional StorageState state = 2 [default = NORMAL];
+}
+
+/**
  * Commands sent from namenode to the datanodes
  */
 message DatanodeCommandProto {
@@ -136,6 +149,7 @@ message UpgradeCommandProto {
  */
 message RegisterDatanodeRequestProto {
   required DatanodeRegistrationProto registration = 1; // Datanode info
+  repeated DatanodeStorageProto storages = 2; // Storages on the datanode
 }
 
 /**
@@ -159,13 +173,19 @@ message RegisterDatanodeResponseProto {
  */
 message HeartbeatRequestProto {
   required DatanodeRegistrationProto registration = 1; // Datanode info
-  required uint64 capacity = 2;
-  required uint64 dfsUsed = 3;
-  required uint64 remaining = 4;
-  required uint64 blockPoolUsed = 5;
-  required uint32 xmitsInProgress = 6;
-  required uint32 xceiverCount = 7;
-  required uint32 failedVolumes = 8;
+  repeated StorageReportProto reports = 2;
+  optional uint32 xmitsInProgress = 3 [ default = 0 ];
+  optional uint32 xceiverCount = 4 [ default = 0 ];
+  optional uint32 failedVolumes = 5 [ default = 0 ];
+}
+
+message StorageReportProto {
+  required string storageID = 1;
+  optional bool failed = 2 [ default = false ];
+  optional uint64 capacity = 3 [ default = 0 ];
+  optional uint64 dfsUsed = 4 [ default = 0 ];
+  optional uint64 remaining = 5 [ default = 0 ];
+  optional uint64 blockPoolUsed = 6 [ default = 0 ];
 }
 
 /**
@@ -200,7 +220,15 @@ message HeartbeatResponseProto {
 message BlockReportRequestProto {
   required DatanodeRegistrationProto registration = 1;
   required string blockPoolId = 2;
-  repeated uint64 blocks = 3 [packed=true];
+  repeated StorageBlockReportProto reports = 3;
+}
+
+/**
+ * Report of blocks in a storage
+ */
+message StorageBlockReportProto {
+  required string storageID = 1;    // Storage ID
+  repeated uint64 blocks = 2 [packed=true];
 }
 
 /**
@@ -227,6 +255,14 @@ message ReceivedDeletedBlockInfoProto {
 }
 
 /**
+ * List of blocks received and deleted for a storage.
+ */
+message StorageReceivedDeletedBlocksProto {
+  required string storageID = 1;
+  repeated ReceivedDeletedBlockInfoProto blocks = 2;
+}
+
+/**
  * registration - datanode registration information
  * blockPoolID  - block pool ID of the reported blocks
  * blocks       - Received/deleted block list
@@ -234,7 +270,7 @@ message ReceivedDeletedBlockInfoProto {
 message BlockReceivedAndDeletedRequestProto {
   required DatanodeRegistrationProto registration = 1;
   required string blockPoolId = 2;
-  repeated ReceivedDeletedBlockInfoProto blocks = 3;
+  repeated StorageReceivedDeletedBlocksProto blocks = 3;
 }
 
 /**

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1237154
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:1159757-1242632
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/datanode:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
 /hadoop/core/trunk/src/webapps/datanode:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1237154
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:1152502-1242632
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/hdfs:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
 /hadoop/core/trunk/src/webapps/hdfs:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1237154
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:1152502-1242632
 /hadoop/core/branches/branch-0.19/hdfs/src/main/webapps/secondary:713112
 /hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
 /hadoop/core/trunk/src/webapps/secondary:776175-784663

Propchange: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Feb 10 01:49:08 2012
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1237154
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:1159757-1242632
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Feb 10 01:49:08 2012
@@ -56,10 +56,6 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
-import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
-import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
-import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -71,7 +67,6 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
@@ -80,11 +75,8 @@ import org.apache.hadoop.metrics2.lib.De
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
-import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
-import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -764,10 +756,6 @@ public class MiniDFSCluster {
     nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf));
   }
 
-  private void setRpcEngine(Configuration conf, Class<?> protocol, Class<?> engine) {
-    conf.setClass("rpc.engine."+protocol.getName(), engine, Object.class);
-  }
-
   /**
    * @return URI of the namenode from a single namenode MiniDFSCluster
    */
@@ -1011,6 +999,8 @@ public class MiniDFSCluster {
       if(dn == null)
         throw new IOException("Cannot start DataNode in "
             + dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
+      //NOTE: the following is true if and only if:
+      //      hadoop.security.token.service.use_ip=true
       //since the HDFS does things based on IP:port, we need to add the mapping
       //for IP:port to rackId
       String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java Fri Feb 10 01:49:08 2012
@@ -72,12 +72,20 @@ public class TestAbandonBlock {
 
     // Now abandon the last block
     DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
-    LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1);
+    LocatedBlocks blocks =
+      dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
+    int orginalNumBlocks = blocks.locatedBlockCount();
     LocatedBlock b = blocks.getLastLocatedBlock();
     dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName);
 
     // And close the file
     fout.close();
+
+    // Close cluster and check the block has been abandoned after restart
+    cluster.restartNameNode();
+    blocks = dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
+    assert orginalNumBlocks == blocks.locatedBlockCount() + 1 :
+      "Blocks " + b + " has not been abandoned.";
   }
 
   @Test

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java Fri Feb 10 01:49:08 2012
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.ByteArrayOutputStream;
+import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
@@ -36,16 +37,15 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.ToolRunner;
-
-import org.junit.Before;
 import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
-import junit.framework.Assert;
 
 /**
  * Class is used to test client reporting corrupted block replica to name node.
@@ -306,9 +306,9 @@ public class TestClientReportBadBlock {
    */
   private static void corruptBlock(final ExtendedBlock block, final DataNode dn)
       throws FileNotFoundException, IOException {
-    final FSDataset data = (FSDataset) dn.getFSDataset();
-    final RandomAccessFile raFile = new RandomAccessFile(
-        data.getBlockFile(block), "rw");
+    final File f = DataNodeTestUtils.getBlockFile(
+        dn, block.getBlockPoolId(), block.getLocalBlock());
+    final RandomAccessFile raFile = new RandomAccessFile(f, "rw");
     final byte[] bytes = new byte[(int) BLOCK_SIZE];
     for (int i = 0; i < BLOCK_SIZE; i++) {
       bytes[i] = (byte) (i);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Fri Feb 10 01:49:08 2012
@@ -45,7 +45,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -1163,9 +1163,9 @@ public class TestDFSShell extends TestCa
     String poolId = cluster.getNamesystem().getBlockPoolId();
     Iterable<Block>[] blocks = cluster.getAllBlockReports(poolId);
     for(int i = 0; i < blocks.length; i++) {
-      FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset();
+      DataNode dn = datanodes.get(i);
       for(Block b : blocks[i]) {
-        files.add(ds.getBlockFile(poolId, b));
+        files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId()));
       }        
     }
     return files;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java Fri Feb 10 01:49:08 2012
@@ -17,28 +17,27 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertTrue;
 
 import java.io.File;
-import java.io.IOException;
 import java.io.FileNotFoundException;
+import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.List;
 
-import org.junit.Test;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.HardLink;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.junit.Test;
 
 /**
  * This class tests the building blocks that are needed to
@@ -132,14 +131,14 @@ public class TestFileAppend{
       LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       List<LocatedBlock> blocks = locations.getLocatedBlocks();
-      FSDataset dataset = (FSDataset) dn[0].data;
 
       //
       // Create hard links for a few of the blocks
       //
       for (int i = 0; i < blocks.size(); i = i + 2) {
         ExtendedBlock b = blocks.get(i).getBlock();
-        File f = dataset.getFile(b.getBlockPoolId(), b.getLocalBlock());
+        final File f = DataNodeTestUtils.getFile(dn[0],
+            b.getBlockPoolId(), b.getLocalBlock().getBlockId());
         File link = new File(f.toString() + ".link");
         System.out.println("Creating hardlink for File " + f + " to " + link);
         HardLink.createHardLink(f, link);
@@ -152,7 +151,7 @@ public class TestFileAppend{
         ExtendedBlock b = blocks.get(i).getBlock();
         System.out.println("testCopyOnWrite detaching block " + b);
         assertTrue("Detaching block " + b + " should have returned true",
-            dataset.unlinkBlock(b, 1));
+            DataNodeTestUtils.unlinkBlock(dn[0], b, 1));
       }
 
       // Since the blocks were already detached earlier, these calls should
@@ -162,7 +161,7 @@ public class TestFileAppend{
         ExtendedBlock b = blocks.get(i).getBlock();
         System.out.println("testCopyOnWrite detaching block " + b);
         assertTrue("Detaching block " + b + " should have returned false",
-            !dataset.unlinkBlock(b, 1));
+            !DataNodeTestUtils.unlinkBlock(dn[0], b, 1));
       }
 
     } finally {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java Fri Feb 10 01:49:08 2012
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 
@@ -36,7 +37,7 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -200,8 +201,9 @@ public class TestFileAppend3 extends jun
     DatanodeInfo[] datanodeinfos = lb.getLocations();
     assertEquals(repl, datanodeinfos.length);
     final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
-    final FSDataset data = (FSDataset)dn.getFSDataset();
-    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw");
+    final File f = DataNodeTestUtils.getBlockFile(
+        dn, blk.getBlockPoolId(), blk.getLocalBlock());
+    final RandomAccessFile raf = new RandomAccessFile(f, "rw");
     AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
     assertEquals(len1, raf.length());
     raf.setLength(0);

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Fri Feb 10 01:49:08 2012
@@ -17,6 +17,21 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -37,7 +52,6 @@ import org.apache.hadoop.fs.FsServerDefa
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -45,7 +59,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
@@ -828,10 +842,10 @@ public class TestFileCreation extends ju
       int successcount = 0;
       for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
         DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
-        FSDataset dataset = (FSDataset)datanode.data;
         ExtendedBlock blk = locatedblock.getBlock();
-        Block b = dataset.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId());
-        File blockfile = dataset.findBlockFile(blk.getBlockPoolId(), b.getBlockId());
+        Block b = datanode.data.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId());
+        final File blockfile = DataNodeTestUtils.getFile(datanode,
+            blk.getBlockPoolId(), b.getBlockId());
         System.out.println("blockfile=" + blockfile);
         if (blockfile != null) {
           BufferedReader in = new BufferedReader(new FileReader(blockfile));

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java Fri Feb 10 01:49:08 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
 import java.net.URISyntaxException;
+import java.net.URI;
 import java.net.URL;
 import java.net.HttpURLConnection;
 import java.util.Random;
@@ -232,4 +233,164 @@ public class TestHftpFileSystem {
     in.seek(7);
     assertEquals('7', in.read());
   }
+
+  public void resetFileSystem() throws IOException {
+    // filesystem caching has a quirk/bug that it caches based on the user's
+    // given uri.  the result is if a filesystem is instantiated with no port,
+    // it gets the default port.  then if the default port is changed,
+    // and another filesystem is instantiated with no port, the prior fs
+    // is returned, not a new one using the changed port.  so let's flush
+    // the cache between tests...
+    FileSystem.closeAll();
+  }
+  
+  @Test
+  public void testHftpDefaultPorts() throws IOException {
+    resetFileSystem();
+    Configuration conf = new Configuration();
+    URI uri = URI.create("hftp://localhost");
+    HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort());
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
+
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
+        fs.getCanonicalServiceName()
+    );
+  }
+  
+  @Test
+  public void testHftpCustomDefaultPorts() throws IOException {
+    resetFileSystem();
+    Configuration conf = new Configuration();
+    conf.setInt("dfs.http.port", 123);
+    conf.setInt("dfs.https.port", 456);
+
+    URI uri = URI.create("hftp://localhost");
+    HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(123, fs.getDefaultPort());
+    assertEquals(456, fs.getDefaultSecurePort());
+    
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:456",
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  @Test
+  public void testHftpCustomUriPortWithDefaultPorts() throws IOException {
+    resetFileSystem();
+    Configuration conf = new Configuration();
+    URI uri = URI.create("hftp://localhost:123");
+    HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort());
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
+
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  @Test
+  public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
+    resetFileSystem();
+    Configuration conf = new Configuration();
+    conf.setInt("dfs.http.port", 123);
+    conf.setInt("dfs.https.port", 456);
+
+    URI uri = URI.create("hftp://localhost:789");
+    HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(123, fs.getDefaultPort());
+    assertEquals(456, fs.getDefaultSecurePort());
+   
+    assertEquals(uri, fs.getUri()); 
+    assertEquals(
+        "127.0.0.1:456",
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  ///
+
+  @Test
+  public void testHsftpDefaultPorts() throws IOException {
+    resetFileSystem();
+    Configuration conf = new Configuration();
+    URI uri = URI.create("hsftp://localhost");
+    HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort());
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
+
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  @Test
+  public void testHsftpCustomDefaultPorts() throws IOException {
+    resetFileSystem();
+    Configuration conf = new Configuration();
+    conf.setInt("dfs.http.port", 123);
+    conf.setInt("dfs.https.port", 456);
+
+    URI uri = URI.create("hsftp://localhost");
+    HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(456, fs.getDefaultPort());
+    assertEquals(456, fs.getDefaultSecurePort());
+    
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:456",
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  @Test
+  public void testHsftpCustomUriPortWithDefaultPorts() throws IOException {
+    resetFileSystem();
+    Configuration conf = new Configuration();
+    URI uri = URI.create("hsftp://localhost:123");
+    HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort());
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort());
+
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:123",
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  @Test
+  public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException {
+    resetFileSystem();
+    Configuration conf = new Configuration();
+    conf.setInt("dfs.http.port", 123);
+    conf.setInt("dfs.https.port", 456);
+
+    URI uri = URI.create("hsftp://localhost:789");
+    HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(456, fs.getDefaultPort());
+    assertEquals(456, fs.getDefaultSecurePort());
+
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:789",
+        fs.getCanonicalServiceName()
+    );
+  }
 }

Copied: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java (from r1242632, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java)
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java?p2=hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java&p1=hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java&r1=1242632&r2=1242635&rev=1242635&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java Fri Feb 10 01:49:08 2012
@@ -49,7 +49,7 @@ public class TestIsMethodSupported {
   
   @BeforeClass
   public static void setUp() throws Exception {
-    cluster = (new MiniDFSCluster.Builder(conf)).numNameNodes(1)
+    cluster = (new MiniDFSCluster.Builder(conf))
         .numDataNodes(1).build();
     nnAddress = cluster.getNameNode().getNameNodeAddress();
     dnAddress = new InetSocketAddress(cluster.getDataNodes().get(0)

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java Fri Feb 10 01:49:08 2012
@@ -19,8 +19,11 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
+import java.io.File;
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 
 /**
@@ -37,5 +40,19 @@ public class DataNodeTestUtils {
   getDNRegistrationForBP(DataNode dn, String bpid) throws IOException {
     return dn.getDNRegistrationForBP(bpid);
   }
-  
+
+  public static File getFile(DataNode dn, String bpid, long bid) {
+    return ((FSDataset)dn.getFSDataset()).getFile(bpid, bid);
+  }
+
+  public static File getBlockFile(DataNode dn, String bpid, Block b
+      ) throws IOException {
+    return ((FSDataset)dn.getFSDataset()).getBlockFile(bpid, b);
+  }
+
+  public static boolean unlinkBlock(DataNode dn, ExtendedBlock block, int numLinks
+      ) throws IOException {
+    ReplicaInfo info = ((FSDataset)dn.getFSDataset()).getReplicaInfo(block);
+    return info.unlinkBlock(numLinks);
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Fri Feb 10 01:49:08 2012
@@ -17,12 +17,14 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
 
@@ -38,11 +40,10 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.BlockPoolSlice;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolumeSet;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
-import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -81,8 +82,8 @@ public class SimulatedFSDataset  impleme
                               CHECKSUM_NULL, 16*1024 );
     byte[] nullCrcHeader = checksum.getHeader();
     nullCrcFileData =  new byte[2 + nullCrcHeader.length];
-    nullCrcFileData[0] = (byte) ((FSDataset.METADATA_VERSION >>> 8) & 0xff);
-    nullCrcFileData[1] = (byte) (FSDataset.METADATA_VERSION & 0xff);
+    nullCrcFileData[0] = (byte) ((BlockMetadataHeader.VERSION >>> 8) & 0xff);
+    nullCrcFileData[1] = (byte) (BlockMetadataHeader.VERSION & 0xff);
     for (int i = 0; i < nullCrcHeader.length; i++) {
       nullCrcFileData[i+2] = nullCrcHeader[i];
     }
@@ -390,9 +391,7 @@ public class SimulatedFSDataset  impleme
       Iterable<Block> injectBlocks) throws IOException {
     ExtendedBlock blk = new ExtendedBlock();
     if (injectBlocks != null) {
-      int numInjectedBlocks = 0;
       for (Block b: injectBlocks) { // if any blocks in list is bad, reject list
-        numInjectedBlocks++;
         if (b == null) {
           throw new NullPointerException("Null blocks in block list");
         }
@@ -555,31 +554,27 @@ public class SimulatedFSDataset  impleme
     }
   }
 
+  private BInfo getBInfo(final ExtendedBlock b) {
+    final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
+    return map == null? null: map.get(b.getLocalBlock());
+  }
+
+  @Override // {@link FSDatasetInterface}
+  public boolean contains(ExtendedBlock block) {
+    return getBInfo(block) != null;
+  }
+
   @Override // FSDatasetInterface
   public synchronized boolean isValidBlock(ExtendedBlock b) {
-    final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
-    if (map == null) {
-      return false;
-    }
-    BInfo binfo = map.get(b.getLocalBlock());
-    if (binfo == null) {
-      return false;
-    }
-    return binfo.isFinalized();
+    final BInfo binfo = getBInfo(b);
+    return binfo != null && binfo.isFinalized();
   }
 
   /* check if a block is created but not finalized */
   @Override
   public synchronized boolean isValidRbw(ExtendedBlock b) {
-    final Map<Block, BInfo> map = blockMap.get(b.getBlockPoolId());
-    if (map == null) {
-      return false;
-    }
-    BInfo binfo = map.get(b.getLocalBlock());
-    if (binfo == null) {
-      return false;
-    }
-    return !binfo.isFinalized();  
+    final BInfo binfo = getBInfo(b);
+    return binfo != null && !binfo.isFinalized();  
   }
 
   @Override
@@ -994,8 +989,33 @@ public class SimulatedFSDataset  impleme
   }
 
   @Override
-  public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b)
-      throws IOException {
-    throw new IOException("getBlockLocalPathInfo not supported.");
+  public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public String[] getBlockPoolList() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void checkAndUpdate(String bpid, long blockId, File diskFile,
+      File diskMetaFile, FSVolumeInterface vol) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<FSVolumeInterface> getVolumes() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public List<Block> getFinalizedBlocks(String bpid) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Map<String, Object> getVolumeInfoMap() {
+    throw new UnsupportedOperationException();
   }
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java Fri Feb 10 01:49:08 2012
@@ -37,11 +37,15 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat.State;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Before;
@@ -111,15 +115,13 @@ public class TestBPOfferService {
       .when(mock).versionRequest();
     
     Mockito.doReturn(new DatanodeRegistration("fake-node"))
-      .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
+      .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class),
+          Mockito.any(DatanodeStorage[].class));
     
     Mockito.doAnswer(new HeartbeatAnswer(nnIdx))
       .when(mock).sendHeartbeat(
           Mockito.any(DatanodeRegistration.class),
-          Mockito.anyLong(),
-          Mockito.anyLong(),
-          Mockito.anyLong(),
-          Mockito.anyLong(),
+          Mockito.any(StorageReport[].class),
           Mockito.anyInt(),
           Mockito.anyInt(),
           Mockito.anyInt());
@@ -160,10 +162,10 @@ public class TestBPOfferService {
       waitForInitialization(bpos);
       
       // The DN should have register to both NNs.
-      Mockito.verify(mockNN1).registerDatanode(
-          (DatanodeRegistration) Mockito.anyObject());
-      Mockito.verify(mockNN2).registerDatanode(
-          (DatanodeRegistration) Mockito.anyObject());
+      Mockito.verify(mockNN1).registerDatanode(Mockito.any(DatanodeRegistration.class),
+          Mockito.any(DatanodeStorage[].class));
+      Mockito.verify(mockNN2).registerDatanode(Mockito.any(DatanodeRegistration.class),
+          Mockito.any(DatanodeStorage[].class));
       
       // Should get block reports from both NNs
       waitForBlockReport(mockNN1);
@@ -199,7 +201,7 @@ public class TestBPOfferService {
         .when(mockNN2).blockReport(
             Mockito.<DatanodeRegistration>anyObject(),  
             Mockito.eq(FAKE_BPID),
-            Mockito.<long[]>anyObject());
+            Mockito.<StorageBlockReport[]>anyObject());
 
     bpos.start();
     try {
@@ -343,7 +345,7 @@ public class TestBPOfferService {
           Mockito.verify(mockNN).blockReport(
               Mockito.<DatanodeRegistration>anyObject(),  
               Mockito.eq(FAKE_BPID),
-              Mockito.<long[]>anyObject());
+              Mockito.<StorageBlockReport[]>anyObject());
           return true;
         } catch (Throwable t) {
           LOG.info("waiting on block report: " + t.getMessage());
@@ -380,8 +382,8 @@ public class TestBPOfferService {
   private ReceivedDeletedBlockInfo[] waitForBlockReceived(
       ExtendedBlock fakeBlock,
       DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {
-    final ArgumentCaptor<ReceivedDeletedBlockInfo[]> captor =
-      ArgumentCaptor.forClass(ReceivedDeletedBlockInfo[].class);
+    final ArgumentCaptor<StorageReceivedDeletedBlocks[]> captor =
+      ArgumentCaptor.forClass(StorageReceivedDeletedBlocks[].class);
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
 
       @Override
@@ -397,7 +399,7 @@ public class TestBPOfferService {
         }
       }
     }, 100, 10000);
-    return captor.getValue();
+    return captor.getValue()[0].getBlocks();
   }
 
 }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Fri Feb 10 01:49:08 2012
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.log4j.Level;
@@ -146,8 +147,9 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N0);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    cluster.getNameNodeRpc().blockReport(dnR, poolId,
-      new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
 
     List<LocatedBlock> blocksAfterReport =
       DFSTestUtil.getAllBlocks(fs.open(filePath));
@@ -180,7 +182,7 @@ public class TestBlockReport {
 
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     DFSTestUtil.createFile(fs, filePath,
-      (long) FILE_SIZE, REPL_FACTOR, rand.nextLong());
+      FILE_SIZE, REPL_FACTOR, rand.nextLong());
 
     // mock around with newly created blocks and delete some
     File dataDir = new File(cluster.getDataDirectory());
@@ -226,8 +228,9 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N0);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    cluster.getNameNodeRpc().blockReport(dnR, poolId,
-      new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
 
     BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem()
         .getBlockManager());
@@ -266,9 +269,10 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N0);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
     DatanodeCommand dnCmd =
-      cluster.getNameNodeRpc().blockReport(dnR, poolId,
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+      cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
     if(LOG.isDebugEnabled()) {
       LOG.debug("Got the command: " + dnCmd);
     }
@@ -284,9 +288,8 @@ public class TestBlockReport {
    * This test isn't a representative case for BlockReport
    * The empty method is going to be left here to keep the naming
    * of the test plan in synch with the actual implementation
-   * @throws IOException in case of errors
    */
-  public void blockReport_04() throws IOException {
+  public void blockReport_04() {
   }
 
   // Client requests new block from NN. The test corrupts this very block
@@ -295,7 +298,7 @@ public class TestBlockReport {
   // BlockScanner which is out of scope of this test
   // Keeping the name to be in synch with the test plan
   //
-  public void blockReport_05() throws IOException {
+  public void blockReport_05() {
   }
 
   /**
@@ -319,8 +322,9 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N1);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    cluster.getNameNodeRpc().blockReport(dnR, poolId,
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
     printStats();
     assertEquals("Wrong number of PendingReplication Blocks",
       0, cluster.getNamesystem().getUnderReplicatedBlocks());
@@ -368,8 +372,9 @@ public class TestBlockReport {
     DataNode dn = cluster.getDataNodes().get(DN_N1);
     String poolId = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-    cluster.getNameNodeRpc().blockReport(dnR, poolId,
-        new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
     printStats();
     assertEquals("Wrong number of Corrupted blocks",
       1, cluster.getNamesystem().getCorruptReplicaBlocks() +
@@ -390,8 +395,9 @@ public class TestBlockReport {
       LOG.debug("Done corrupting length of " + corruptedBlock.getBlockName());
     }
     
-    cluster.getNameNodeRpc().blockReport(dnR, poolId,
+    report[0] = new StorageBlockReport(dnR.getStorageID(),
         new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
     printStats();
 
     assertEquals("Wrong number of Corrupted blocks",
@@ -440,8 +446,9 @@ public class TestBlockReport {
       DataNode dn = cluster.getDataNodes().get(DN_N1);
       String poolId = cluster.getNamesystem().getBlockPoolId();
       DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-      cluster.getNameNodeRpc().blockReport(dnR, poolId,
-          new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+      StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+          new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+      cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
       printStats();
       assertEquals("Wrong number of PendingReplication blocks",
         blocks.size(), cluster.getNamesystem().getPendingReplicationBlocks());
@@ -486,8 +493,9 @@ public class TestBlockReport {
       DataNode dn = cluster.getDataNodes().get(DN_N1);
       String poolId = cluster.getNamesystem().getBlockPoolId();
       DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
-      cluster.getNameNodeRpc().blockReport(dnR, poolId,
-          new BlockListAsLongs(blocks, null).getBlockListAsLongs());
+      StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+          new BlockListAsLongs(blocks, null).getBlockListAsLongs()) };
+      cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
       printStats();
       assertEquals("Wrong number of PendingReplication blocks",
         2, cluster.getNamesystem().getPendingReplicationBlocks());
@@ -550,7 +558,7 @@ public class TestBlockReport {
         .when(spy).blockReport(
           Mockito.<DatanodeRegistration>anyObject(),
           Mockito.anyString(),
-          Mockito.<long[]>anyObject());
+          Mockito.<StorageBlockReport[]>anyObject());
       
       // Force a block report to be generated. The block report will have
       // an RBW replica in it. Wait for the RPC to be sent, but block
@@ -638,8 +646,7 @@ public class TestBlockReport {
   // Write file and start second data node.
   private ArrayList<Block> writeFile(final String METHOD_NAME,
                                                final long fileSize,
-                                               Path filePath)
-    throws IOException {
+                                               Path filePath) {
     ArrayList<Block> blocks = null;
     try {
       REPL_FACTOR = 2;

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java Fri Feb 10 01:49:08 2012
@@ -65,6 +65,11 @@ public class TestDataNodeMXBean {
       String volumeInfo = (String)mbs.getAttribute(mxbeanName, "VolumeInfo");
       Assert.assertEquals(replaceDigits(datanode.getVolumeInfo()),
           replaceDigits(volumeInfo));
+      // Ensure mxbean's XceiverCount is same as the DataNode's
+      // live value.
+      int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
+          "XceiverCount");
+      Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java Fri Feb 10 01:49:08 2012
@@ -23,7 +23,7 @@ import static org.junit.Assert.assertNot
 import static org.junit.Assert.assertNotSame;
 
 import java.io.IOException;
-import java.util.Collection;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.junit.Assert;
@@ -83,11 +82,11 @@ public class TestDataNodeMultipleRegistr
 
       // check number of volumes in fsdataset
       DataNode dn = cluster.getDataNodes().get(0);
-      Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
-      assertNotNull("No volumes in the fsdataset", volInfos);
+      final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
+      Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
       int i = 0;
-      for (VolumeInfo vi : volInfos) {
-        LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
+      for (Map.Entry<String, Object> e : volInfos.entrySet()) {
+        LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
       }
       // number of volumes should be 2 - [data1, data2]
       assertEquals("number of volumes is wrong", 2, volInfos.size());
@@ -144,11 +143,11 @@ public class TestDataNodeMultipleRegistr
 
       // check number of vlumes in fsdataset
       DataNode dn = cluster.getDataNodes().get(0);
-      Collection<VolumeInfo> volInfos = ((FSDataset) dn.data).getVolumeInfo();
-      assertNotNull("No volumes in the fsdataset", volInfos);
+      final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
+      Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
       int i = 0;
-      for (VolumeInfo vi : volInfos) {
-        LOG.info("vol " + i++ + ";dir=" + vi.directory + ";fs= " + vi.freeSpace);
+      for (Map.Entry<String, Object> e : volInfos.entrySet()) {
+        LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
       }
       // number of volumes should be 2 - [data1, data2]
       assertEquals("number of volumes is wrong", 2, volInfos.size());

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Fri Feb 10 01:49:08 2012
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
 import java.io.File;
 import java.io.FilenameFilter;
 import java.io.IOException;
@@ -29,8 +32,8 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.BlockReaderFactory;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -42,13 +45,11 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.net.NetUtils;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 /**
  * Fine-grain testing of block files and locations after volume failure.
@@ -144,8 +145,9 @@ public class TestDataNodeVolumeFailure {
     DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
     String bpid = cluster.getNamesystem().getBlockPoolId();
     DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
-    long[] bReport = dn.getFSDataset().getBlockReport(bpid).getBlockListAsLongs();
-    cluster.getNameNodeRpc().blockReport(dnR, bpid, bReport);
+    StorageBlockReport[] report = { new StorageBlockReport(dnR.getStorageID(),
+        dn.getFSDataset().getBlockReport(bpid).getBlockListAsLongs()) };
+    cluster.getNameNodeRpc().blockReport(dnR, bpid, report);
 
     // verify number of blocks and files...
     verify(filename, filesize);
@@ -272,8 +274,7 @@ public class TestDataNodeVolumeFailure {
     String file = BlockReaderFactory.getFileName(targetAddr, 
         "test-blockpoolid",
         block.getBlockId());
-    BlockReader blockReader = 
-      BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
+    BlockReaderFactory.newBlockReader(conf, s, file, block, lblock
         .getBlockToken(), 0, -1);
 
     // nothing - if it fails - it will throw and exception
@@ -370,7 +371,7 @@ public class TestDataNodeVolumeFailure {
         new FilenameFilter() {
           public boolean accept(File dir, String name) {
             return name.startsWith("blk_") &&
-            name.endsWith(FSDataset.METADATA_EXTENSION);
+            name.endsWith(DatanodeUtil.METADATA_EXTENSION);
           }
         }
     );

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java Fri Feb 10 01:49:08 2012
@@ -30,17 +30,17 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-
-import org.junit.Test;
 import org.junit.Assert;
+import org.junit.Test;
 
 /** Test if a datanode can correctly upgrade itself */
 public class TestDatanodeRestart {
@@ -98,8 +98,9 @@ public class TestDatanodeRestart {
       out.write(writeBuf);
       out.hflush();
       DataNode dn = cluster.getDataNodes().get(0);
-      for (FSVolume volume : ((FSDataset)dn.data).volumes.getVolumes()) {
-        File currentDir = volume.getDir().getParentFile();
+      for (FSVolumeInterface v : dn.data.getVolumes()) {
+        FSVolume volume = (FSVolume)v;
+        File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
         File rbwDir = new File(currentDir, "rbw");
         for (File file : rbwDir.listFiles()) {
           if (isCorrupt && Block.isBlockFilename(file)) {
@@ -188,7 +189,7 @@ public class TestDatanodeRestart {
     } else {
       src = replicaInfo.getMetaFile();
     }
-    File dst = FSDataset.getUnlinkTmpFile(src);
+    File dst = DatanodeUtil.getUnlinkTmpFile(src);
     if (isRename) {
       src.renameTo(dst);
     } else {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Fri Feb 10 01:49:08 2012
@@ -25,20 +25,20 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Random;
 
+import junit.framework.TestCase;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
-
-import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 
 /**
  * Tests {@link DirectoryScanner} handling of differences
@@ -142,10 +142,10 @@ public class TestDirectoryScanner extend
 
   /** Create a block file in a random volume*/
   private long createBlockFile() throws IOException {
-    List<FSVolume> volumes = fds.volumes.getVolumes();
+    List<FSVolumeInterface> volumes = fds.getVolumes();
     int index = rand.nextInt(volumes.size() - 1);
     long id = getFreeBlockId();
-    File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+    File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
     File file = new File(finalizedDir, getBlockFile(id));
     if (file.createNewFile()) {
       LOG.info("Created block file " + file.getName());
@@ -155,10 +155,10 @@ public class TestDirectoryScanner extend
 
   /** Create a metafile in a random volume*/
   private long createMetaFile() throws IOException {
-    List<FSVolume> volumes = fds.volumes.getVolumes();
+    List<FSVolumeInterface> volumes = fds.getVolumes();
     int index = rand.nextInt(volumes.size() - 1);
     long id = getFreeBlockId();
-    File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+    File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
     File file = new File(finalizedDir, getMetaFile(id));
     if (file.createNewFile()) {
       LOG.info("Created metafile " + file.getName());
@@ -168,10 +168,10 @@ public class TestDirectoryScanner extend
 
   /** Create block file and corresponding metafile in a rondom volume */
   private long createBlockMetaFile() throws IOException {
-    List<FSVolume> volumes = fds.volumes.getVolumes();
+    List<FSVolumeInterface> volumes = fds.getVolumes();
     int index = rand.nextInt(volumes.size() - 1);
     long id = getFreeBlockId();
-    File finalizedDir = volumes.get(index).getBlockPoolSlice(bpid).getFinalizedDir();
+    File finalizedDir = volumes.get(index).getFinalizedDir(bpid);
     File file = new File(finalizedDir, getBlockFile(id));
     if (file.createNewFile()) {
       LOG.info("Created block file " + file.getName());
@@ -352,7 +352,7 @@ public class TestDirectoryScanner extend
 
     // Added block has the same file as the one created by the test
     File file = new File(getBlockFile(blockId));
-    assertEquals(file.getName(), fds.findBlockFile(bpid, blockId).getName());
+    assertEquals(file.getName(), fds.getFile(bpid, blockId).getName());
 
     // Generation stamp is same as that of created file
     assertEquals(genStamp, replicainfo.getGenerationStamp());

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRoundRobinVolumesPolicy.java Fri Feb 10 01:49:08 2012
@@ -21,10 +21,10 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import junit.framework.Assert;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
-import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.FSVolumeInterface;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -33,14 +33,14 @@ public class TestRoundRobinVolumesPolicy
   // Test the Round-Robin block-volume choosing algorithm.
   @Test
   public void testRR() throws Exception {
-    final List<FSVolume> volumes = new ArrayList<FSVolume>();
+    final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
 
     // First volume, with 100 bytes of space.
-    volumes.add(Mockito.mock(FSVolume.class));
+    volumes.add(Mockito.mock(FSVolumeInterface.class));
     Mockito.when(volumes.get(0).getAvailable()).thenReturn(100L);
 
     // Second volume, with 200 bytes of space.
-    volumes.add(Mockito.mock(FSVolume.class));
+    volumes.add(Mockito.mock(FSVolumeInterface.class));
     Mockito.when(volumes.get(1).getAvailable()).thenReturn(200L);
 
     RoundRobinVolumesPolicy policy = ReflectionUtils.newInstance(
@@ -69,14 +69,14 @@ public class TestRoundRobinVolumesPolicy
   @Test
   public void testRRPolicyExceptionMessage()
       throws Exception {
-    final List<FSVolume> volumes = new ArrayList<FSVolume>();
+    final List<FSVolumeInterface> volumes = new ArrayList<FSVolumeInterface>();
 
     // First volume, with 500 bytes of space.
-    volumes.add(Mockito.mock(FSVolume.class));
+    volumes.add(Mockito.mock(FSVolumeInterface.class));
     Mockito.when(volumes.get(0).getAvailable()).thenReturn(500L);
 
     // Second volume, with 600 bytes of space.
-    volumes.add(Mockito.mock(FSVolume.class));
+    volumes.add(Mockito.mock(FSVolumeInterface.class));
     Mockito.when(volumes.get(1).getAvailable()).thenReturn(600L);
 
     RoundRobinVolumesPolicy policy = new RoundRobinVolumesPolicy();

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Fri Feb 10 01:49:08 2012
@@ -101,7 +101,7 @@ public class TestSimulatedFSDataset exte
     InputStream metaInput = fsdataset.getMetaDataInputStream(b);
     DataInputStream metaDataInput = new DataInputStream(metaInput);
     short version = metaDataInput.readShort();
-    assertEquals(FSDataset.METADATA_VERSION, version);
+    assertEquals(BlockMetadataHeader.VERSION, version);
     DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
     assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
     assertEquals(0, checksum.getChecksumSize());  

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestWriteToReplica.java Fri Feb 10 01:49:08 2012
@@ -140,7 +140,7 @@ public class TestWriteToReplica {
     ReplicasMap replicasMap = dataSet.volumeMap;
     FSVolume vol = dataSet.volumes.getNextVolume(0);
     ReplicaInfo replicaInfo = new FinalizedReplica(
-        blocks[FINALIZED].getLocalBlock(), vol, vol.getDir());
+        blocks[FINALIZED].getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
     replicasMap.add(bpid, replicaInfo);
     replicaInfo.getBlockFile().createNewFile();
     replicaInfo.getMetaFile().createNewFile();
@@ -160,15 +160,15 @@ public class TestWriteToReplica {
         blocks[RWR].getLocalBlock(), vol, vol.createRbwFile(bpid,
             blocks[RWR].getLocalBlock()).getParentFile()));
     replicasMap.add(bpid, new ReplicaUnderRecovery(new FinalizedReplica(blocks[RUR]
-        .getLocalBlock(), vol, vol.getDir()), 2007));    
+        .getLocalBlock(), vol, vol.getCurrentDir().getParentFile()), 2007));    
     
     return blocks;
   }
   
   private void testAppend(String bpid, FSDataset dataSet, ExtendedBlock[] blocks) throws IOException {
     long newGS = blocks[FINALIZED].getGenerationStamp()+1;
-    FSVolume v = dataSet.volumeMap.get(bpid, blocks[FINALIZED].getLocalBlock())
-        .getVolume();
+    final FSVolume v = (FSVolume)dataSet.volumeMap.get(
+        bpid, blocks[FINALIZED].getLocalBlock()).getVolume();
     long available = v.getCapacity()-v.getDfsUsed();
     long expectedLen = blocks[FINALIZED].getNumBytes();
     try {

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Fri Feb 10 01:49:08 2012
@@ -93,7 +93,9 @@ public class CreateEditsLog {
         dirInode = new INodeDirectory(p, 0L);
         editLog.logMkDir(currentDir, dirInode);
       }
-      editLog.logOpenFile(filePath, inode);
+      editLog.logOpenFile(filePath, 
+          new INodeFileUnderConstruction(
+              p, replication, 0, blockSize, "", "", null));
       editLog.logCloseFile(filePath, inode);
 
       if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Fri Feb 10 01:49:08 2012
@@ -66,7 +66,7 @@ import static org.mockito.Mockito.mock;
  */
 public abstract class FSImageTestUtil {
   
-  public static final Log LOG = LogFactory.getLog(FSImageTestUtil.class.getName());
+  public static final Log LOG = LogFactory.getLog(FSImageTestUtil.class);
   
   /**
    * The position in the fsimage header where the txid is
@@ -413,6 +413,8 @@ public abstract class FSImageTestUtil {
       int nnIdx, List<Integer> txids) {
 
     for (File nameDir : getNameNodeCurrentDirs(cluster, nnIdx)) {
+      LOG.info("examining name dir with files: " +
+          Joiner.on(",").join(nameDir.listFiles()));
       // Should have fsimage_N for the three checkpoints
       LOG.info("Examining storage dir " + nameDir + " with contents: "
           + StringUtils.join(nameDir.listFiles(), ", "));

Modified: hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1242635&r1=1242634&r2=1242635&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-1623/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Fri Feb 10 01:49:08 2012
@@ -44,9 +44,13 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.EnumSetWritable;
@@ -793,7 +797,10 @@ public class NNThroughputBenchmark {
       dnRegistration.setStorageInfo(new DataStorage(nsInfo, ""));
       DataNode.setNewStorageID(dnRegistration);
       // register datanode
-      dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
+      
+      DatanodeStorage[] storages = { new DatanodeStorage(
+          dnRegistration.getStorageID(), DatanodeStorage.State.NORMAL) };
+      dnRegistration = nameNodeProto.registerDatanode(dnRegistration, storages);
     }
 
     /**
@@ -803,9 +810,10 @@ public class NNThroughputBenchmark {
     void sendHeartbeat() throws IOException {
       // register datanode
       // TODO:FEDERATION currently a single block pool is supported
+      StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
+          false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
       DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
-          DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0)
-          .getCommands();
+          rep, 0, 0, 0).getCommands();
       if(cmds != null) {
         for (DatanodeCommand cmd : cmds ) {
           if(LOG.isDebugEnabled()) {
@@ -848,10 +856,10 @@ public class NNThroughputBenchmark {
     @SuppressWarnings("unused") // keep it for future blockReceived benchmark
     int replicateBlocks() throws IOException {
       // register datanode
-      // TODO:FEDERATION currently a single block pool is supported
+      StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
+          false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
       DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
-          DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0)
-          .getCommands();
+          rep, 0, 0, 0).getCommands();
       if (cmds != null) {
         for (DatanodeCommand cmd : cmds) {
           if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
@@ -881,11 +889,14 @@ public class NNThroughputBenchmark {
           receivedDNReg.setStorageInfo(
                           new DataStorage(nsInfo, dnInfo.getStorageID()));
           receivedDNReg.setInfoPort(dnInfo.getInfoPort());
-          nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
-              .getNamesystem().getBlockPoolId(),
-              new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(
+          ReceivedDeletedBlockInfo[] rdBlocks = {
+            new ReceivedDeletedBlockInfo(
                   blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
-                  null) });
+                  null) };
+          StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
+              receivedDNReg.getStorageID(), rdBlocks) };
+          nameNodeProto.blockReceivedAndDeleted(receivedDNReg, nameNode
+              .getNamesystem().getBlockPoolId(), report);
         }
       }
       return blocks.length;
@@ -997,11 +1008,13 @@ public class NNThroughputBenchmark {
         for(DatanodeInfo dnInfo : loc.getLocations()) {
           int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName());
           datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
+          ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
+              loc.getBlock().getLocalBlock(),
+              ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
+          StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
+              datanodes[dnIdx].dnRegistration.getStorageID(), rdBlocks) };
           nameNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration, loc
-              .getBlock().getBlockPoolId(),
-              new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(loc
-                  .getBlock().getLocalBlock(),
-                  ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) });
+              .getBlock().getBlockPoolId(), report);
         }
       }
       return prevBlock;
@@ -1018,8 +1031,10 @@ public class NNThroughputBenchmark {
       assert daemonId < numThreads : "Wrong daemonId.";
       TinyDatanode dn = datanodes[daemonId];
       long start = System.currentTimeMillis();
+      StorageBlockReport[] report = { new StorageBlockReport(
+          dn.dnRegistration.getStorageID(), dn.getBlockReportList()) };
       nameNodeProto.blockReport(dn.dnRegistration, nameNode.getNamesystem()
-          .getBlockPoolId(), dn.getBlockReportList());
+          .getBlockPoolId(), report);
       long end = System.currentTimeMillis();
       return end-start;
     }