You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by el...@apache.org on 2012/04/02 00:12:15 UTC

svn commit: r1308205 [2/2] - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolPB/ src/main/java/org/apache/hado...

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Sun Apr  1 22:12:12 2012
@@ -854,7 +854,7 @@ class NameNodeRpcServer implements Namen
     BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
     if(stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
-           + "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks()
+           + "from " + nodeReg + " " + blist.getNumberOfBlocks()
            + " blocks");
     }
 
@@ -870,7 +870,7 @@ class NameNodeRpcServer implements Namen
     verifyRequest(nodeReg);
     if(stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
-          +"from "+nodeReg.getName()+" "+receivedAndDeletedBlocks.length
+          +"from "+nodeReg+" "+receivedAndDeletedBlocks.length
           +" blocks.");
     }
     namesystem.getBlockManager().processIncrementalBlockReport(
@@ -880,7 +880,8 @@ class NameNodeRpcServer implements Namen
   @Override // DatanodeProtocol
   public void errorReport(DatanodeRegistration nodeReg,
                           int errorCode, String msg) throws IOException { 
-    String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName());
+    String dnName = 
+       (nodeReg == null) ? "Unknown DataNode" : nodeReg.toString();
 
     if (errorCode == DatanodeProtocol.NOTIFY) {
       LOG.info("Error report from " + dnName + ": " + msg);
@@ -909,13 +910,10 @@ class NameNodeRpcServer implements Namen
   }
 
   /** 
-   * Verify request.
+   * Verifies the given registration.
    * 
-   * Verifies correctness of the datanode version, registration ID, and 
-   * if the datanode does not need to be shutdown.
-   * 
-   * @param nodeReg data node registration
-   * @throws IOException
+   * @param nodeReg node registration
+   * @throws UnregisteredNodeException if the registration is invalid
    */
   void verifyRequest(NodeRegistration nodeReg) throws IOException {
     verifyVersion(nodeReg.getVersion());

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Sun Apr  1 22:12:12 2012
@@ -496,7 +496,7 @@ public class NamenodeFsck {
       
       try {
         chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
-        targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
+        targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
       }  catch (IOException ie) {
         if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
           throw new IOException("Could not obtain block " + lblock);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Sun Apr  1 22:12:12 2012
@@ -260,14 +260,14 @@ class NamenodeJspHelper {
       // Find out common suffix. Should this be before or after the sort?
       String port_suffix = null;
       if (live.size() > 0) {
-        String name = live.get(0).getName();
+        String name = live.get(0).getXferAddr();
         int idx = name.indexOf(':');
         if (idx > 0) {
           port_suffix = name.substring(idx);
         }
 
         for (int i = 1; port_suffix != null && i < live.size(); i++) {
-          if (live.get(i).getName().endsWith(port_suffix) == false) {
+          if (live.get(i).getXferAddr().endsWith(port_suffix) == false) {
             port_suffix = null;
             break;
           }
@@ -404,7 +404,7 @@ class NamenodeJspHelper {
     final String nodeToRedirect;
     int redirectPort;
     if (datanode != null) {
-      nodeToRedirect = datanode.getHost();
+      nodeToRedirect = datanode.getIpAddr();
       redirectPort = datanode.getInfoPort();
     } else {
       nodeToRedirect = nn.getHttpAddress().getHostName();
@@ -466,14 +466,14 @@ class NamenodeJspHelper {
           + URLEncoder.encode("/", "UTF-8")
           + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnaddr);
 
-      String name = d.getHostName() + ":" + d.getPort();
+      String name = d.getXferAddrWithHostname();
       if (!name.matches("\\d+\\.\\d+.\\d+\\.\\d+.*"))
         name = name.replaceAll("\\.[^.:]*", "");
       int idx = (suffix != null && name.endsWith(suffix)) ? name
           .indexOf(suffix) : -1;
 
-      out.print(rowTxt() + "<td class=\"name\"><a title=\"" + d.getHost() + ":"
-          + d.getPort() + "\" href=\"" + url + "\">"
+      out.print(rowTxt() + "<td class=\"name\"><a title=\"" + d.getXferAddr()
+          + "\" href=\"" + url + "\">"
           + ((idx > 0) ? name.substring(0, idx) : name) + "</a>"
           + ((alive) ? "" : "\n"));
     }
@@ -599,14 +599,14 @@ class NamenodeJspHelper {
       // Find out common suffix. Should this be before or after the sort?
       String port_suffix = null;
       if (live.size() > 0) {
-        String name = live.get(0).getName();
+        String name = live.get(0).getXferAddr();
         int idx = name.indexOf(':');
         if (idx > 0) {
           port_suffix = name.substring(idx);
         }
 
         for (int i = 1; port_suffix != null && i < live.size(); i++) {
-          if (live.get(i).getName().endsWith(port_suffix) == false) {
+          if (live.get(i).getXferAddr().endsWith(port_suffix) == false) {
             port_suffix = null;
             break;
           }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Sun Apr  1 22:12:12 2012
@@ -62,8 +62,8 @@ implements Writable, NodeRegistration {
   /**
    * Create DatanodeRegistration
    */
-  public DatanodeRegistration(String nodeName) {
-    this(nodeName, new StorageInfo(), new ExportedBlockKeys());
+  public DatanodeRegistration(String ipAddr) {
+    this(ipAddr, new StorageInfo(), new ExportedBlockKeys());
   }
   
   public DatanodeRegistration(DatanodeID dn, StorageInfo info,
@@ -73,9 +73,9 @@ implements Writable, NodeRegistration {
     this.exportedKeys = keys;
   }
   
-  public DatanodeRegistration(String nodeName, StorageInfo info,
+  public DatanodeRegistration(String ipAddr, StorageInfo info,
       ExportedBlockKeys keys) {
-    super(nodeName);
+    super(ipAddr);
     this.storageInfo = info;
     this.exportedKeys = keys;
   }
@@ -108,13 +108,13 @@ implements Writable, NodeRegistration {
 
   @Override // NodeRegistration
   public String getAddress() {
-    return getName();
+    return getXferAddr();
   }
 
   @Override
   public String toString() {
     return getClass().getSimpleName()
-      + "(" + name
+      + "(" + ipAddr
       + ", storageID=" + storageID
       + ", infoPort=" + infoPort
       + ", ipcPort=" + ipcPort

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java Sun Apr  1 22:12:12 2012
@@ -38,6 +38,6 @@ public class DisallowedDatanodeException
   private static final long serialVersionUID = 1L;
 
   public DisallowedDatanodeException(DatanodeID nodeID) {
-    super("Datanode denied communication with namenode: " + nodeID.getName());
+    super("Datanode denied communication with namenode: " + nodeID);
   }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java Sun Apr  1 22:12:12 2012
@@ -28,7 +28,7 @@ import org.apache.hadoop.classification.
 public interface NodeRegistration {
   /**
    * Get address of the server node.
-   * @return hostname:portNumber
+   * @return ipAddr:portNumber
    */
   public String getAddress();
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Sun Apr  1 22:12:12 2012
@@ -280,10 +280,11 @@ public class JsonUtil {
     }
 
     final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("name", datanodeinfo.getName());
+    m.put("ipAddr", datanodeinfo.getIpAddr());
+    m.put("hostName", datanodeinfo.getHostName());
     m.put("storageID", datanodeinfo.getStorageID());
+    m.put("xferPort", datanodeinfo.getXferPort());
     m.put("infoPort", datanodeinfo.getInfoPort());
-
     m.put("ipcPort", datanodeinfo.getIpcPort());
 
     m.put("capacity", datanodeinfo.getCapacity());
@@ -293,7 +294,6 @@ public class JsonUtil {
     m.put("lastUpdate", datanodeinfo.getLastUpdate());
     m.put("xceiverCount", datanodeinfo.getXceiverCount());
     m.put("networkLocation", datanodeinfo.getNetworkLocation());
-    m.put("hostName", datanodeinfo.getHostName());
     m.put("adminState", datanodeinfo.getAdminState().name());
     return m;
   }
@@ -308,6 +308,7 @@ public class JsonUtil {
         (String)m.get("name"),
         (String)m.get("hostName"),
         (String)m.get("storageID"),
+        (int)(long)(Long)m.get("xferPort"),
         (int)(long)(Long)m.get("infoPort"),
         (int)(long)(Long)m.get("ipcPort"),
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Sun Apr  1 22:12:12 2012
@@ -48,11 +48,12 @@ message BlockTokenIdentifierProto {
  * Identifies a Datanode
  */
 message DatanodeIDProto {
-  required string name = 1;      // IP:port (data transfer port)
+  required string ipAddr = 1;    // IP address
   required string hostName = 2;  // hostname
   required string storageID = 3; // unique storage id
-  required uint32 infoPort = 4;  // info server port
-  required uint32 ipcPort = 5;   // ipc server port
+  required uint32 xferPort = 4;  // data streaming port
+  required uint32 infoPort = 5;  // info server port
+  required uint32 ipcPort = 6;   // ipc server port
 }
 
 /**

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java Sun Apr  1 22:12:12 2012
@@ -143,7 +143,7 @@ public class BlockReaderTestUtil {
     Socket sock = null;
     ExtendedBlock block = testBlock.getBlock();
     DatanodeInfo[] nodes = testBlock.getLocations();
-    targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
+    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
     sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
     sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
     sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Sun Apr  1 22:12:12 2012
@@ -339,7 +339,7 @@ public class DFSTestUtil {
   }
 
   /*
-   * Wait up to 20s for the given DN (host:port) to be decommissioned.
+   * Wait up to 20s for the given DN (IP:port) to be decommissioned
    */
   public static void waitForDecommission(FileSystem fs, String name) 
       throws IOException, InterruptedException, TimeoutException {
@@ -351,7 +351,7 @@ public class DFSTestUtil {
       Thread.sleep(1000);
       DistributedFileSystem dfs = (DistributedFileSystem)fs;
       for (DatanodeInfo info : dfs.getDataNodeStats()) {
-        if (name.equals(info.getName())) {
+        if (name.equals(info.getXferAddr())) {
           dn = info;
         }
       }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Sun Apr  1 22:12:12 2012
@@ -1439,7 +1439,7 @@ public class MiniDFSCluster {
       DataNode dn = dataNodes.get(i).datanode;
       LOG.info("DN name=" + dnName + " found DN=" + dn +
           " with name=" + dn.getDisplayName());
-      if (dnName.equals(dn.getDatanodeId().getName())) {
+      if (dnName.equals(dn.getDatanodeId().getXferAddr())) {
         break;
       }
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java Sun Apr  1 22:12:12 2012
@@ -220,7 +220,7 @@ public class TestClientReportBadBlock {
       final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
       corruptBlock(block, dn);
       LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
-          + dninfo.getName());
+          + dninfo);
 
     }
   }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Sun Apr  1 22:12:12 2012
@@ -334,7 +334,7 @@ public class TestDFSClientRetries extend
       LocatedBlock badLocatedBlock = new LocatedBlock(
         goodLocatedBlock.getBlock(),
         new DatanodeInfo[] {
-          new DatanodeInfo(new DatanodeID("255.255.255.255:234"))
+          new DatanodeInfo(new DatanodeID("255.255.255.255", 234))
         },
         goodLocatedBlock.getStartOffset(),
         false);
@@ -608,7 +608,7 @@ public class TestDFSClientRetries extend
           cluster.getNameNodeRpc(), f, 0, Long.MAX_VALUE)
             .getLocatedBlocks();
       final DatanodeInfo first = locatedblocks.get(0).getLocations()[0];
-      cluster.stopDataNode(first.getName());
+      cluster.stopDataNode(first.getXferAddr());
 
       //get checksum again
       final FileChecksum cs2 = fs.getFileChecksum(p);
@@ -629,7 +629,7 @@ public class TestDFSClientRetries extend
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     DatanodeID fakeDnId = new DatanodeID(
-        "localhost:" + addr.getPort(), "localhost", "fake-storage", 0, addr.getPort());
+        "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
     
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
     LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Sun Apr  1 22:12:12 2012
@@ -128,8 +128,7 @@ public class TestDataTransferProtocol ex
       
       if (eofExpected) {
         throw new IOException("Did not recieve IOException when an exception " +
-                              "is expected while reading from " + 
-                              datanode.getName());
+                              "is expected while reading from " + datanode); 
       }
       
       byte[] needed = recvBuf.toByteArray();
@@ -215,7 +214,7 @@ public class TestDataTransferProtocol ex
       String poolId = cluster.getNamesystem().getBlockPoolId(); 
       datanode = DataNodeTestUtils.getDNRegistrationForBP(
           cluster.getDataNodes().get(0), poolId);
-      dnAddr = NetUtils.createSocketAddr(datanode.getName());
+      dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
       FileSystem fileSys = cluster.getFileSystem();
 
       /* Test writing to finalized replicas */
@@ -349,7 +348,7 @@ public class TestDataTransferProtocol ex
                  new InetSocketAddress("localhost", cluster.getNameNodePort()),
                  conf);                
     datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
-    dnAddr = NetUtils.createSocketAddr(datanode.getName());
+    dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
     FileSystem fileSys = cluster.getFileSystem();
     
     int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java Sun Apr  1 22:12:12 2012
@@ -389,9 +389,8 @@ public class TestDatanodeDeath extends T
         cluster.stopDataNode(victim);
       } else {
         int victim = datanodeToKill;
-        System.out.println("SimpleTest stopping datanode " +
-                            targets[victim].getName());
-        cluster.stopDataNode(targets[victim].getName());
+        System.out.println("SimpleTest stopping datanode " + targets[victim]);
+        cluster.stopDataNode(targets[victim].getXferAddr());
       }
       System.out.println("SimpleTest stopping datanode complete");
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Sun Apr  1 22:12:12 2012
@@ -151,27 +151,27 @@ public class TestDecommission {
       int hasdown = 0;
       DatanodeInfo[] nodes = blk.getLocations();
       for (int j = 0; j < nodes.length; j++) { // for each replica
-        if (isNodeDown && nodes[j].getName().equals(downnode)) {
+        if (isNodeDown && nodes[j].getXferAddr().equals(downnode)) {
           hasdown++;
           //Downnode must actually be decommissioned
           if (!nodes[j].isDecommissioned()) {
             return "For block " + blk.getBlock() + " replica on " +
-              nodes[j].getName() + " is given as downnode, " +
+              nodes[j] + " is given as downnode, " +
               "but is not decommissioned";
           }
           //Decommissioned node (if any) should only be last node in list.
           if (j != nodes.length - 1) {
             return "For block " + blk.getBlock() + " decommissioned node "
-              + nodes[j].getName() + " was not last node in list: "
+              + nodes[j] + " was not last node in list: "
               + (j + 1) + " of " + nodes.length;
           }
           LOG.info("Block " + blk.getBlock() + " replica on " +
-            nodes[j].getName() + " is decommissioned.");
+            nodes[j] + " is decommissioned.");
         } else {
           //Non-downnodes must not be decommissioned
           if (nodes[j].isDecommissioned()) {
             return "For block " + blk.getBlock() + " replica on " +
-              nodes[j].getName() + " is unexpectedly decommissioned";
+              nodes[j] + " is unexpectedly decommissioned";
           }
         }
       }
@@ -215,7 +215,7 @@ public class TestDecommission {
         found = true;
       }
     }
-    String nodename = info[index].getName();
+    String nodename = info[index].getXferAddr();
     LOG.info("Decommissioning node: " + nodename);
 
     // write nodename into the exclude file.
@@ -236,7 +236,7 @@ public class TestDecommission {
 
   /* stop decommission of the datanode and wait for each to reach the NORMAL state */
   private void recomissionNode(DatanodeInfo decommissionedNode) throws IOException {
-    LOG.info("Recommissioning node: " + decommissionedNode.getName());
+    LOG.info("Recommissioning node: " + decommissionedNode);
     writeConfigFile(excludeFile, null);
     refreshNodes(cluster.getNamesystem(), conf);
     waitNodeState(decommissionedNode, AdminStates.NORMAL);
@@ -373,7 +373,7 @@ public class TestDecommission {
         DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
         assertEquals("All datanodes must be alive", numDatanodes, 
             client.datanodeReport(DatanodeReportType.LIVE).length);
-        assertNull(checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes));
+        assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(), numDatanodes));
         cleanupFile(fileSys, file1);
       }
     }
@@ -414,7 +414,7 @@ public class TestDecommission {
       DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
       assertEquals("All datanodes must be alive", numDatanodes, 
           client.datanodeReport(DatanodeReportType.LIVE).length);
-      assertNull(checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes));
+      assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(), numDatanodes));
 
       // stop decommission and check if the new replicas are removed
       recomissionNode(decomNode);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java Sun Apr  1 22:12:12 2012
@@ -147,7 +147,7 @@ public class TestHftpFileSystem {
     // if we were redirected to the right DN.
     BlockLocation[] locations = 
         hdfs.getFileBlockLocations(path, 0, 10);
-    String locationName = locations[0].getNames()[0];
+    String xferAddr = locations[0].getNames()[0];
 
     // Connect to the NN to get redirected
     URL u = hftpFs.getNamenodeURL(
@@ -164,7 +164,7 @@ public class TestHftpFileSystem {
     for (DataNode node : cluster.getDataNodes()) {
       DatanodeRegistration dnR = 
         DataNodeTestUtils.getDNRegistrationForBP(node, blockPoolId);
-      if (dnR.getName().equals(locationName)) {
+      if (dnR.getXferAddr().equals(xferAddr)) {
         checked = true;
         assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
       }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java Sun Apr  1 22:12:12 2012
@@ -60,7 +60,7 @@ public class TestIsMethodSupported {
         .numDataNodes(1).build();
     nnAddress = cluster.getNameNode().getNameNodeAddress();
     DataNode dn = cluster.getDataNodes().get(0);
-    dnAddress = new InetSocketAddress(dn.getDatanodeId().getHost(),
+    dnAddress = new InetSocketAddress(dn.getDatanodeId().getIpAddr(),
                                       dn.getIpcPort());
   }
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java Sun Apr  1 22:12:12 2012
@@ -117,7 +117,7 @@ public class TestReplication extends Tes
       isOnSameRack = false;
       isNotOnSameRack = false;
       for (int i = 0; i < datanodes.length-1; i++) {
-        LOG.info("datanode "+ i + ": "+ datanodes[i].getName());
+        LOG.info("datanode "+ i + ": "+ datanodes[i]);
         boolean onRack = false;
         for( int j=i+1; j<datanodes.length; j++) {
            if( datanodes[i].getNetworkLocation().equals(

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Sun Apr  1 22:12:12 2012
@@ -130,20 +130,19 @@ public class TestPBHelper {
 
   @Test
   public void testConvertDatanodeID() {
-    DatanodeID dn = new DatanodeID("node", "node", "sid", 1, 2);
+    DatanodeID dn = new DatanodeID("node", "node", "sid", 1, 2, 3);
     DatanodeIDProto dnProto = PBHelper.convert(dn);
     DatanodeID dn2 = PBHelper.convert(dnProto);
     compare(dn, dn2);
   }
   
   void compare(DatanodeID dn, DatanodeID dn2) {
-    assertEquals(dn.getHost(), dn2.getHost());
-    assertEquals(dn.getInfoPort(), dn2.getInfoPort());
-    assertEquals(dn.getIpcPort(), dn2.getIpcPort());
-    assertEquals(dn.getName(), dn2.getName());
+    assertEquals(dn.getIpAddr(), dn2.getIpAddr());
     assertEquals(dn.getHostName(), dn2.getHostName());
-    assertEquals(dn.getPort(), dn2.getPort());
     assertEquals(dn.getStorageID(), dn2.getStorageID());
+    assertEquals(dn.getXferPort(), dn2.getXferPort());
+    assertEquals(dn.getInfoPort(), dn2.getInfoPort());
+    assertEquals(dn.getIpcPort(), dn2.getIpcPort());
   }
 
   @Test
@@ -281,7 +280,7 @@ public class TestPBHelper {
   }
   
   private DatanodeInfo getDNInfo() {
-    return new DatanodeInfo(new DatanodeID("node", "node", "sid", 1, 2));
+    return new DatanodeInfo(new DatanodeID("node", "node", "sid", 0, 1, 2));
   }
   
   private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
@@ -292,7 +291,7 @@ public class TestPBHelper {
       assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
       assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
       assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent());
-      assertEquals(dn1.getHost(), dn2.getHost());
+      assertEquals(dn1.getIpAddr(), dn2.getIpAddr());
       assertEquals(dn1.getHostName(), dn2.getHostName());
       assertEquals(dn1.getInfoPort(), dn2.getInfoPort());
       assertEquals(dn1.getIpcPort(), dn2.getIpcPort());
@@ -401,11 +400,11 @@ public class TestPBHelper {
   @Test
   public void testConvertLocatedBlock() {
     DatanodeInfo [] dnInfos = new DatanodeInfo[3];
-    dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 20000, 10001, 9999,
+    dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 5002, 20000, 10001, 9999,
         59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS);
-    dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 20000, 10001, 9999,
+    dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 5002, 20000, 10001, 9999,
         59, 69, 32, "local", AdminStates.DECOMMISSIONED);
-    dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 20000, 10001, 9999,
+    dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 5002, 20000, 10001, 9999,
         59, 69, 32, "local", AdminStates.NORMAL);
     LocatedBlock lb = new LocatedBlock(
         new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
@@ -424,7 +423,7 @@ public class TestPBHelper {
   
   @Test
   public void testConvertDatanodeRegistration() {
-    DatanodeID dnId = new DatanodeID("host", "host", "xyz", 1, 0);
+    DatanodeID dnId = new DatanodeID("host", "host", "xyz", 0, 1, 0);
     BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
     ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
         getBlockKey(1), keys);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Sun Apr  1 22:12:12 2012
@@ -279,8 +279,8 @@ public class TestBlockToken {
     server.start();
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-    DatanodeID fakeDnId = new DatanodeID("localhost:" + addr.getPort(),
-        "localhost", "fake-storage", 0, addr.getPort());
+    DatanodeID fakeDnId = new DatanodeID("localhost",
+        "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
 
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
     LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java Sun Apr  1 22:12:12 2012
@@ -165,7 +165,7 @@ public class BlockManagerTestUtil {
       DatanodeDescriptor[] dnds = hbm.getDatanodes();
       DatanodeDescriptor theDND = null;
       for (DatanodeDescriptor dnd : dnds) {
-        if (dnd.getName().equals(dnName)) {
+        if (dnd.getXferAddr().equals(dnName)) {
           theDND = dnd;
         }
       }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Sun Apr  1 22:12:12 2012
@@ -48,12 +48,12 @@ import com.google.common.collect.Lists;
 
 public class TestBlockManager {
   private final List<DatanodeDescriptor> nodes = ImmutableList.of( 
-      new DatanodeDescriptor(new DatanodeID("h1:5020"), "/rackA"),
-      new DatanodeDescriptor(new DatanodeID("h2:5020"), "/rackA"),
-      new DatanodeDescriptor(new DatanodeID("h3:5020"), "/rackA"),
-      new DatanodeDescriptor(new DatanodeID("h4:5020"), "/rackB"),
-      new DatanodeDescriptor(new DatanodeID("h5:5020"), "/rackB"),
-      new DatanodeDescriptor(new DatanodeID("h6:5020"), "/rackB")
+      new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
+      new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
+      new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
+      new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackB"),
+      new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackB"),
+      new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackB")
     );
   private final List<DatanodeDescriptor> rackA = nodes.subList(0, 3);
   private final List<DatanodeDescriptor> rackB = nodes.subList(3, 6);
@@ -272,7 +272,7 @@ public class TestBlockManager {
 
     // the block is still under-replicated. Add a new node. This should allow
     // the third off-rack replica.
-    DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7:5020"), "/rackC");
+    DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7", 100), "/rackC");
     addNodes(ImmutableList.of(rackCNode));
     try {
       DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java Sun Apr  1 22:12:12 2012
@@ -137,7 +137,7 @@ public class TestBlockTokenWithDFS {
     ExtendedBlock block = lblock.getBlock();
     try {
       DatanodeInfo[] nodes = lblock.getLocations();
-      targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
+      targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
       s = NetUtils.getDefaultSocketFactory(conf).createSocket();
       s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
       s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java Sun Apr  1 22:12:12 2012
@@ -28,13 +28,13 @@ import org.junit.Test;
 public class TestHost2NodesMap {
   private Host2NodesMap map = new Host2NodesMap();
   private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
-    new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("ip1", "h1", "", 5020, -1, -1), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("ip2", "h1", "", 5020, -1, -1), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5020, -1, -1), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5030, -1, -1), "/d1/r2"),
   };
   private final DatanodeDescriptor NULL_NODE = null; 
-  private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
+  private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3", 5040),
       "/d1/r4");
 
   @Before
@@ -56,24 +56,11 @@ public class TestHost2NodesMap {
 
   @Test
   public void testGetDatanodeByHost() throws Exception {
-    assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
-    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
-    DatanodeDescriptor node = map.getDatanodeByHost("h3");
+    assertTrue(map.getDatanodeByHost("ip1")==dataNodes[0]);
+    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+    DatanodeDescriptor node = map.getDatanodeByHost("ip3");
     assertTrue(node==dataNodes[2] || node==dataNodes[3]);
-    assertTrue(null==map.getDatanodeByHost("h4"));
-  }
-
-  @Test
-  public void testGetDatanodeByName() throws Exception {
-    assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
-    assertTrue(map.getDatanodeByName("h1:5030")==null);
-    assertTrue(map.getDatanodeByName("h2:5020")==dataNodes[1]);
-    assertTrue(map.getDatanodeByName("h2:5030")==null);
-    assertTrue(map.getDatanodeByName("h3:5020")==dataNodes[2]);
-    assertTrue(map.getDatanodeByName("h3:5030")==dataNodes[3]);
-    assertTrue(map.getDatanodeByName("h3:5040")==null);
-    assertTrue(map.getDatanodeByName("h4")==null);
-    assertTrue(map.getDatanodeByName(null)==null);
+    assertTrue(null==map.getDatanodeByHost("ip4"));
   }
 
   @Test
@@ -81,21 +68,21 @@ public class TestHost2NodesMap {
     assertFalse(map.remove(NODE));
     
     assertTrue(map.remove(dataNodes[0]));
-    assertTrue(map.getDatanodeByHost("h1")==null);
-    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
-    DatanodeDescriptor node = map.getDatanodeByHost("h3");
+    assertTrue(map.getDatanodeByHost("ip1")==null);
+    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+    DatanodeDescriptor node = map.getDatanodeByHost("ip3");
     assertTrue(node==dataNodes[2] || node==dataNodes[3]);
-    assertTrue(null==map.getDatanodeByHost("h4"));
+    assertTrue(null==map.getDatanodeByHost("ip4"));
     
     assertTrue(map.remove(dataNodes[2]));
-    assertTrue(map.getDatanodeByHost("h1")==null);
-    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
-    assertTrue(map.getDatanodeByHost("h3")==dataNodes[3]);
+    assertTrue(map.getDatanodeByHost("ip1")==null);
+    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+    assertTrue(map.getDatanodeByHost("ip3")==dataNodes[3]);
     
     assertTrue(map.remove(dataNodes[3]));
-    assertTrue(map.getDatanodeByHost("h1")==null);
-    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
-    assertTrue(map.getDatanodeByHost("h3")==null);
+    assertTrue(map.getDatanodeByHost("ip1")==null);
+    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+    assertTrue(map.getDatanodeByHost("ip3")==null);
     
     assertFalse(map.remove(NULL_NODE));
     assertTrue(map.remove(dataNodes[1]));

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java Sun Apr  1 22:12:12 2012
@@ -78,11 +78,11 @@ public class TestNodeCount extends TestC
       
       // bring down first datanode
       DatanodeDescriptor datanode = datanodes[0];
-      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
+      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getXferAddr());
       
       // make sure that NN detects that the datanode is down
       BlockManagerTestUtil.noticeDeadDatanode(
-          cluster.getNameNode(), datanode.getName());
+          cluster.getNameNode(), datanode.getXferAddr());
       
       // the block will be replicated
       DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
@@ -112,10 +112,10 @@ public class TestNodeCount extends TestC
       assertTrue(nonExcessDN!=null);
       
       // bring down non excessive datanode
-      dnprop = cluster.stopDataNode(nonExcessDN.getName());
+      dnprop = cluster.stopDataNode(nonExcessDN.getXferAddr());
       // make sure that NN detects that the datanode is down
       BlockManagerTestUtil.noticeDeadDatanode(
-          cluster.getNameNode(), nonExcessDN.getName());
+          cluster.getNameNode(), nonExcessDN.getXferAddr());
 
       // The block should be replicated
       initializeTimeout(TIMEOUT);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Sun Apr  1 22:12:12 2012
@@ -91,9 +91,9 @@ public class TestOverReplicatedBlocks ex
         synchronized(hm) {
           // set live datanode's remaining space to be 0 
           // so they will be chosen to be deleted when over-replication occurs
-          String corruptMachineName = corruptDataNode.getName();
+          String corruptMachineName = corruptDataNode.getXferAddr();
           for (DatanodeDescriptor datanode : hm.getDatanodes()) {
-            if (!corruptMachineName.equals(datanode.getName())) {
+            if (!corruptMachineName.equals(datanode.getXferAddr())) {
               datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0, 0);
             }
           }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java Sun Apr  1 22:12:12 2012
@@ -40,7 +40,7 @@ public class TestPendingDataNodeMessages
   private final Block block2Gs1 = new Block(2, 0, 1);
   
   private final DatanodeDescriptor fakeDN = new DatanodeDescriptor(
-      new DatanodeID("fake"));
+      new DatanodeID("fake", 100));
   
   @Test
   public void testQueues() {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Sun Apr  1 22:12:12 2012
@@ -52,16 +52,16 @@ public class TestReplicationPolicy {
   private static final String filename = "/dummyfile.txt";
   private static final DatanodeDescriptor dataNodes[] = 
     new DatanodeDescriptor[] {
-      new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h4:5020"), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h5:5020"), "/d2/r3"),
-      new DatanodeDescriptor(new DatanodeID("h6:5020"), "/d2/r3")
+      new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
+      new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
+      new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
+      new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
+      new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d2/r3"),
+      new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3")
     };
    
   private final static DatanodeDescriptor NODE = 
-    new DatanodeDescriptor(new DatanodeID("h7:5020"), "/d2/r4");
+    new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r4");
   
   static {
     try {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Sun Apr  1 22:12:12 2012
@@ -197,9 +197,9 @@ public class TestBlockRecovery {
         locs, RECOVERY_ID);
     ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
     BlockRecord record1 = new BlockRecord(
-        new DatanodeID("xx", "yy", "zz", 44, 55), dn1, replica1);
+        new DatanodeID("xx", "yy", "zz", 1, 2, 3), dn1, replica1);
     BlockRecord record2 = new BlockRecord(
-        new DatanodeID("aa", "bb", "cc", 11, 22), dn2, replica2);
+        new DatanodeID("aa", "bb", "cc", 1, 2, 3), dn2, replica2);
     syncList.add(record1);
     syncList.add(record2);
     
@@ -402,7 +402,7 @@ public class TestBlockRecovery {
   private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
     Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
     DatanodeInfo mockOtherDN = new DatanodeInfo(
-        new DatanodeID("127.0.0.1", "localhost", "storage-1234", 0, 0));
+        new DatanodeID("127.0.0.1", "localhost", "storage-1234", 0, 0, 0));
     DatanodeInfo[] locs = new DatanodeInfo[] {
         new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
         mockOtherDN };

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Sun Apr  1 22:12:12 2012
@@ -162,16 +162,16 @@ public class TestBlockReplacement extend
       
       // start to replace the block
       // case 1: proxySource does not contain the block
-      LOG.info("Testcase 1: Proxy " + newNode.getName() 
+      LOG.info("Testcase 1: Proxy " + newNode
            + " does not contain the block " + b);
       assertFalse(replaceBlock(b, source, newNode, proxies.get(0)));
       // case 2: destination already contains the block
-      LOG.info("Testcase 2: Destination " + proxies.get(1).getName() 
+      LOG.info("Testcase 2: Destination " + proxies.get(1)
           + " contains the block " + b);
       assertFalse(replaceBlock(b, source, proxies.get(0), proxies.get(1)));
       // case 3: correct case
-      LOG.info("Testcase 3: Source=" + source.getName() + " Proxy=" + 
-          proxies.get(0).getName() + " Destination=" + newNode.getName() );
+      LOG.info("Testcase 3: Source=" + source + " Proxy=" + 
+          proxies.get(0) + " Destination=" + newNode );
       assertTrue(replaceBlock(b, source, proxies.get(0), newNode));
       // after cluster has time to resolve the over-replication,
       // block locations should contain two proxies and newNode
@@ -181,7 +181,7 @@ public class TestBlockReplacement extend
           DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client);
       // case 4: proxies.get(0) is not a valid del hint
       // expect either source or newNode replica to be deleted instead
-      LOG.info("Testcase 4: invalid del hint " + proxies.get(0).getName() );
+      LOG.info("Testcase 4: invalid del hint " + proxies.get(0) );
       assertTrue(replaceBlock(b, proxies.get(0), proxies.get(1), source));
       // after cluster has time to resolve the over-replication,
       // block locations should contain two proxies,
@@ -222,7 +222,7 @@ public class TestBlockReplacement extend
         for (DatanodeInfo node : includeNodes) {
           if (!nodeLocations.contains(node) ) {
             notDone=true; 
-            LOG.info("Block is not located at " + node.getName() );
+            LOG.info("Block is not located at " + node );
             break;
           }
         }
@@ -231,9 +231,9 @@ public class TestBlockReplacement extend
         String expectedNodesList = "";
         String currentNodesList = "";
         for (DatanodeInfo dn : includeNodes) 
-          expectedNodesList += dn.getName() + ", ";
+          expectedNodesList += dn + ", ";
         for (DatanodeInfo dn : nodes) 
-          currentNodesList += dn.getName() + ", ";
+          currentNodesList += dn + ", ";
         LOG.info("Expected replica nodes are: " + expectedNodesList);
         LOG.info("Current actual replica nodes are: " + currentNodesList);
         throw new TimeoutException(
@@ -254,7 +254,7 @@ public class TestBlockReplacement extend
       DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
     Socket sock = new Socket();
     sock.connect(NetUtils.createSocketAddr(
-        destination.getName()), HdfsServerConstants.READ_TIMEOUT);
+        destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT); 
     sock.setKeepAlive(true);
     // sendRequest
     DataOutputStream out = new DataOutputStream(sock.getOutputStream());

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java Sun Apr  1 22:12:12 2012
@@ -161,7 +161,7 @@ public class TestDataNodeMultipleRegistr
       assertEquals("number of volumes is wrong", 2, volInfos.size());
 
       for (BPOfferService bpos : dn.getAllBpOs()) {
-        LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration.getName() + "; sid="
+        LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid="
             + bpos.bpRegistration.getStorageID() + "; nna=" +
             getNNSocketAddress(bpos));
       }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Sun Apr  1 22:12:12 2012
@@ -270,7 +270,7 @@ public class TestDataNodeVolumeFailure {
     Socket s = null;
     ExtendedBlock block = lblock.getBlock(); 
    
-    targetAddr = NetUtils.createSocketAddr(datanode.getName());
+    targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
       
     s = NetUtils.getDefaultSocketFactory(conf).createSocket();
     s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java Sun Apr  1 22:12:12 2012
@@ -183,7 +183,7 @@ public class TestDeleteBlockPool {
       Assert.assertEquals(1, dn1.getAllBpOs().length);
       
       DFSAdmin admin = new DFSAdmin(nn1Conf);
-      String dn1Address = dn1.getDatanodeId().getHost() + ":" + dn1.getIpcPort();
+      String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
       String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
       
       int ret = admin.run(args);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Sun Apr  1 22:12:12 2012
@@ -348,7 +348,7 @@ public class TestInterDatanodeProtocol {
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     DatanodeID fakeDnId = new DatanodeID(
-        "localhost:" + addr.getPort(), "localhost", "fake-storage", 0, addr.getPort());
+        "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
     DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
     InterDatanodeProtocol proxy = null;
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Sun Apr  1 22:12:12 2012
@@ -766,28 +766,33 @@ public class NNThroughputBenchmark {
     long[] blockReportList;
 
     /**
-     * Get data-node in the form 
-     * <host name> : <port>
-     * where port is a 6 digit integer.
+     * Return a a 6 digit integer port.
      * This is necessary in order to provide lexocographic ordering.
      * Host names are all the same, the ordering goes by port numbers.
      */
-    private static String getNodeName(int port) throws IOException {
-      String machineName = DNS.getDefaultHost("default", "default");
-      String sPort = String.valueOf(100000 + port);
-      if(sPort.length() > 6)
-        throw new IOException("Too many data-nodes.");
-      return machineName + ":" + sPort;
+    private static int getNodePort(int num) throws IOException {
+      int port = 100000 + num;
+      if (String.valueOf(port).length() > 6) {
+        throw new IOException("Too many data-nodes");
+      }
+      return port;
     }
 
     TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
-      dnRegistration = new DatanodeRegistration(getNodeName(dnIdx));
+      String hostName = DNS.getDefaultHost("default", "default");
+      dnRegistration = new DatanodeRegistration(hostName);
+      dnRegistration.setXferPort(getNodePort(dnIdx));
+      dnRegistration.setHostName(hostName);
       this.blocks = new ArrayList<Block>(blockCapacity);
       this.nrBlocks = 0;
     }
 
-    String getName() {
-      return dnRegistration.getName();
+    public String toString() {
+      return dnRegistration.toString();
+    }
+
+    String getXferAddr() {
+      return dnRegistration.getXferAddr();
     }
 
     void register() throws IOException {
@@ -850,8 +855,8 @@ public class NNThroughputBenchmark {
       return blockReportList;
     }
 
-    public int compareTo(String name) {
-      return getName().compareTo(name);
+    public int compareTo(String xferAddr) {
+      return getXferAddr().compareTo(xferAddr);
     }
 
     /**
@@ -889,10 +894,12 @@ public class NNThroughputBenchmark {
         for(int t = 0; t < blockTargets.length; t++) {
           DatanodeInfo dnInfo = blockTargets[t];
           DatanodeRegistration receivedDNReg;
-          receivedDNReg = new DatanodeRegistration(dnInfo.getName());
+          receivedDNReg = new DatanodeRegistration(dnInfo.getIpAddr());
           receivedDNReg.setStorageInfo(
                           new DataStorage(nsInfo, dnInfo.getStorageID()));
+          receivedDNReg.setXferPort(dnInfo.getXferPort());
           receivedDNReg.setInfoPort(dnInfo.getInfoPort());
+          receivedDNReg.setIpcPort(dnInfo.getIpcPort());
           ReceivedDeletedBlockInfo[] rdBlocks = {
             new ReceivedDeletedBlockInfo(
                   blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
@@ -977,10 +984,10 @@ public class NNThroughputBenchmark {
       for(int idx=0; idx < nrDatanodes; idx++) {
         datanodes[idx] = new TinyDatanode(idx, blocksPerReport);
         datanodes[idx].register();
-        assert datanodes[idx].getName().compareTo(prevDNName) > 0
+        assert datanodes[idx].getXferAddr().compareTo(prevDNName) > 0
           : "Data-nodes must be sorted lexicographically.";
         datanodes[idx].sendHeartbeat();
-        prevDNName = datanodes[idx].getName();
+        prevDNName = datanodes[idx].getXferAddr();
       }
 
       // create files 
@@ -1010,7 +1017,7 @@ public class NNThroughputBenchmark {
         LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, prevBlock, null);
         prevBlock = loc.getBlock();
         for(DatanodeInfo dnInfo : loc.getLocations()) {
-          int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName());
+          int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());
           datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock());
           ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo(
               loc.getBlock().getLocalBlock(),
@@ -1165,9 +1172,9 @@ public class NNThroughputBenchmark {
       for(int i=0; i < nodesToDecommission; i++) {
         TinyDatanode dn = blockReportObject.datanodes[nrDatanodes-1-i];
         numDecommissionedBlocks += dn.nrBlocks;
-        excludeFile.write(dn.getName().getBytes());
+        excludeFile.write(dn.getXferAddr().getBytes());
         excludeFile.write('\n');
-        LOG.info("Datanode " + dn.getName() + " is decommissioned.");
+        LOG.info("Datanode " + dn + " is decommissioned.");
       }
       excludeFile.close();
       nameNodeProto.refreshNodes();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java Sun Apr  1 22:12:12 2012
@@ -156,7 +156,7 @@ public class TestDecommissioningStatus {
       throws IOException {
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
 
-    String nodename = info[nodeIndex].getName();
+    String nodename = info[nodeIndex].getXferAddr();
     System.out.println("Decommissioning node: " + nodename);
 
     // write nodename into the exclude file.

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java Sun Apr  1 22:12:12 2012
@@ -167,7 +167,7 @@ public class TestStandbyIsHot {
       
       // Stop the DN.
       DataNode dn = cluster.getDataNodes().get(0);
-      String dnName = dn.getDatanodeId().getName(); 
+      String dnName = dn.getDatanodeId().getXferAddr(); 
       DataNodeProperties dnProps = cluster.stopDataNode(0);
       
       // Make sure both NNs register it as dead.

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java Sun Apr  1 22:12:12 2012
@@ -30,16 +30,16 @@ import org.apache.hadoop.hdfs.server.blo
 public class TestNetworkTopology extends TestCase {
   private final static NetworkTopology cluster = new NetworkTopology();
   private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
-    new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h4:5020"), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h5:5020"), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h6:5020"), "/d2/r3"),
-    new DatanodeDescriptor(new DatanodeID("h7:5020"), "/d2/r3")
+    new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3"),
+    new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r3")
   };
   private final static DatanodeDescriptor NODE = 
-    new DatanodeDescriptor(new DatanodeID("h8:5020"), "/d2/r4");
+    new DatanodeDescriptor(new DatanodeID("h8", 5020), "/d2/r4");
   
   static {
     for(int i=0; i<dataNodes.length; i++) {
@@ -61,9 +61,9 @@ public class TestNetworkTopology extends
   public void testCreateInvalidTopology() throws Exception {
     NetworkTopology invalCluster = new NetworkTopology();
     DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
-      new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1")
+      new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
+      new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
+      new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1")
     };
     invalCluster.add(invalDataNodes[0]);
     invalCluster.add(invalDataNodes[1]);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Sun Apr  1 22:12:12 2012
@@ -15741,6 +15741,10 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
+          <expected-output>Hostname: [-.a-zA-z0-9\.]+</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
           <expected-output>Decommission Status : [a-zA-Z]+</expected-output>
         </comparator>
         <comparator>
@@ -15838,6 +15842,10 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
+          <expected-output>Hostname: [-.a-zA-z0-9\.]+</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
           <expected-output>Decommission Status : [a-zA-Z]+</expected-output>
         </comparator>
         <comparator>