You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by el...@apache.org on 2012/04/02 00:12:15 UTC

svn commit: r1308205 [1/2] - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolPB/ src/main/java/org/apache/hado...

Author: eli
Date: Sun Apr  1 22:12:12 2012
New Revision: 1308205

URL: http://svn.apache.org/viewvc?rev=1308205&view=rev
Log:
HDFS-3144. Refactor DatanodeID#getName by use. Contributed by Eli Collins

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sun Apr  1 22:12:12 2012
@@ -307,6 +307,8 @@ Release 2.0.0 - UNRELEASED 
     HDFS-2476. More CPU efficient data structure for under-replicated,
     over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
 
+    HDFS-3144. Refactor DatanodeID#getName by use. (eli)
+
   BUG FIXES
 
     HDFS-2481. Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol.

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java Sun Apr  1 22:12:12 2012
@@ -1340,7 +1340,8 @@ public class DFSClient implements java.i
           //connect to a datanode
           sock = socketFactory.createSocket();
           NetUtils.connect(sock,
-              NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
+              NetUtils.createSocketAddr(datanodes[j].getXferAddr()),
+              timeout);
           sock.setSoTimeout(timeout);
 
           out = new DataOutputStream(
@@ -1349,7 +1350,7 @@ public class DFSClient implements java.i
           in = new DataInputStream(NetUtils.getInputStream(sock));
 
           if (LOG.isDebugEnabled()) {
-            LOG.debug("write to " + datanodes[j].getName() + ": "
+            LOG.debug("write to " + datanodes[j] + ": "
                 + Op.BLOCK_CHECKSUM + ", block=" + block);
           }
           // get block MD5
@@ -1364,7 +1365,7 @@ public class DFSClient implements java.i
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
                     + "for file " + src + " for block " + block
-                    + " from datanode " + datanodes[j].getName()
+                    + " from datanode " + datanodes[j]
                     + ". Will retry the block once.");
               }
               lastRetriedIndex = i;
@@ -1374,7 +1375,7 @@ public class DFSClient implements java.i
               break;
             } else {
               throw new IOException("Bad response " + reply + " for block "
-                  + block + " from datanode " + datanodes[j].getName());
+                  + block + " from datanode " + datanodes[j]);
             }
           }
           
@@ -1409,12 +1410,10 @@ public class DFSClient implements java.i
               LOG.debug("set bytesPerCRC=" + bytesPerCRC
                   + ", crcPerBlock=" + crcPerBlock);
             }
-            LOG.debug("got reply from " + datanodes[j].getName()
-                + ": md5=" + md5);
+            LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
           }
         } catch (IOException ie) {
-          LOG.warn("src=" + src + ", datanodes[" + j + "].getName()="
-              + datanodes[j].getName(), ie);
+          LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
         } finally {
           IOUtils.closeStream(in);
           IOUtils.closeStream(out);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java Sun Apr  1 22:12:12 2012
@@ -543,7 +543,7 @@ public class DFSInputStream extends FSIn
         return reader.doRead(blockReader, off, len);
       } catch ( ChecksumException ce ) {
         DFSClient.LOG.warn("Found Checksum error for "
-            + getCurrentBlock() + " from " + currentNode.getName()
+            + getCurrentBlock() + " from " + currentNode
             + " at " + ce.getPos());        
         ioe = ce;
         retryCurrentNode = false;
@@ -671,7 +671,7 @@ public class DFSInputStream extends FSIn
       try {
         DatanodeInfo chosenNode = bestNode(nodes, deadNodes);
         InetSocketAddress targetAddr = 
-                          NetUtils.createSocketAddr(chosenNode.getName());
+          NetUtils.createSocketAddr(chosenNode.getXferAddr());
         return new DNAddrPair(chosenNode, targetAddr);
       } catch (IOException ie) {
         String blockInfo = block.getBlock() + " file=" + src;
@@ -746,7 +746,7 @@ public class DFSInputStream extends FSIn
       } catch (ChecksumException e) {
         DFSClient.LOG.warn("fetchBlockByteRange(). Got a checksum exception for " +
                  src + " at " + block.getBlock() + ":" + 
-                 e.getPos() + " from " + chosenNode.getName());
+                 e.getPos() + " from " + chosenNode);
         // we want to remember what we have tried
         addIntoCorruptedBlockMap(block.getBlock(), chosenNode, corruptedBlockMap);
       } catch (AccessControlException ex) {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java Sun Apr  1 22:12:12 2012
@@ -667,7 +667,7 @@ class DFSOutputStream extends FSOutputSu
                 throw new IOException("Bad response " + reply +
                     " for block " + block +
                     " from datanode " + 
-                    targets[i].getName());
+                    targets[i]);
               }
             }
             
@@ -898,7 +898,7 @@ class DFSOutputStream extends FSOutputSu
         if (errorIndex >= 0) {
           StringBuilder pipelineMsg = new StringBuilder();
           for (int j = 0; j < nodes.length; j++) {
-            pipelineMsg.append(nodes[j].getName());
+            pipelineMsg.append(nodes[j]);
             if (j < nodes.length - 1) {
               pipelineMsg.append(", ");
             }
@@ -911,7 +911,7 @@ class DFSOutputStream extends FSOutputSu
           }
           DFSClient.LOG.warn("Error Recovery for block " + block +
               " in pipeline " + pipelineMsg + 
-              ": bad datanode " + nodes[errorIndex].getName());
+              ": bad datanode " + nodes[errorIndex]);
           failed.add(nodes[errorIndex]);
 
           DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
@@ -1005,7 +1005,7 @@ class DFSOutputStream extends FSOutputSu
       String firstBadLink = "";
       if (DFSClient.LOG.isDebugEnabled()) {
         for (int i = 0; i < nodes.length; i++) {
-          DFSClient.LOG.debug("pipeline = " + nodes[i].getName());
+          DFSClient.LOG.debug("pipeline = " + nodes[i]);
         }
       }
 
@@ -1061,7 +1061,7 @@ class DFSOutputStream extends FSOutputSu
         // find the datanode that matches
         if (firstBadLink.length() != 0) {
           for (int i = 0; i < nodes.length; i++) {
-            if (nodes[i].getName().equals(firstBadLink)) {
+            if (nodes[i].getXferAddr().equals(firstBadLink)) {
               errorIndex = i;
               break;
             }
@@ -1165,9 +1165,10 @@ class DFSOutputStream extends FSOutputSu
   static Socket createSocketForPipeline(final DatanodeInfo first,
       final int length, final DFSClient client) throws IOException {
     if(DFSClient.LOG.isDebugEnabled()) {
-      DFSClient.LOG.debug("Connecting to datanode " + first.getName());
+      DFSClient.LOG.debug("Connecting to datanode " + first);
     }
-    final InetSocketAddress isa = NetUtils.createSocketAddr(first.getName());
+    final InetSocketAddress isa =
+      NetUtils.createSocketAddr(first.getXferAddr());
     final Socket sock = client.socketFactory.createSocket();
     final int timeout = client.getDatanodeReadTimeout(length);
     NetUtils.connect(sock, isa, timeout);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Sun Apr  1 22:12:12 2012
@@ -295,16 +295,16 @@ public class DFSUtil {
       assert idx < nrBlocks : "Incorrect index";
       DatanodeInfo[] locations = blk.getLocations();
       String[] hosts = new String[locations.length];
-      String[] names = new String[locations.length];
+      String[] xferAddrs = new String[locations.length];
       String[] racks = new String[locations.length];
       for (int hCnt = 0; hCnt < locations.length; hCnt++) {
         hosts[hCnt] = locations[hCnt].getHostName();
-        names[hCnt] = locations[hCnt].getName();
-        NodeBase node = new NodeBase(names[hCnt], 
+        xferAddrs[hCnt] = locations[hCnt].getXferAddr();
+        NodeBase node = new NodeBase(xferAddrs[hCnt], 
                                      locations[hCnt].getNetworkLocation());
         racks[hCnt] = node.toString();
       }
-      blkLocations[idx] = new BlockLocation(names, hosts, racks,
+      blkLocations[idx] = new BlockLocation(xferAddrs, hosts, racks,
                                             blk.getStartOffset(),
                                             blk.getBlockSize(),
                                             blk.isCorrupt());

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Sun Apr  1 22:12:12 2012
@@ -688,7 +688,7 @@ public class DistributedFileSystem exten
     lblocks[0] = new LocatedBlock(dataBlock, dataNode);
     LOG.info("Found checksum error in data stream at block="
         + dataBlock + " on datanode="
-        + dataNode[0].getName());
+        + dataNode[0]);
 
     // Find block in checksum stream
     DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums;
@@ -700,8 +700,7 @@ public class DistributedFileSystem exten
     DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()}; 
     lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
     LOG.info("Found checksum error in checksum stream at block="
-        + sumsBlock + " on datanode="
-        + sumsNode[0].getName());
+        + sumsBlock + " on datanode=" + sumsNode[0]);
 
     // Ask client to delete blocks.
     dfs.reportChecksumFailure(f.toString(), lblocks);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java Sun Apr  1 22:12:12 2012
@@ -32,23 +32,32 @@ import org.apache.hadoop.io.WritableComp
  * Datanodes are identified by how they can be contacted (hostname
  * and ports) and their storage ID, a unique number that associates
  * the Datanodes blocks with a particular Datanode.
+ *
+ * {@link DatanodeInfo#getName()} should be used to get the network
+ * location (for topology) of a datanode, instead of using
+ * {@link DatanodeID#getXferAddr()} here. Helpers are defined below
+ * for each context in which a DatanodeID is used.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class DatanodeID implements WritableComparable<DatanodeID> {
   public static final DatanodeID[] EMPTY_ARRAY = {}; 
 
-  protected String name;       // IP:port (data transfer port)
+  protected String ipAddr;     // IP address
   protected String hostName;   // hostname
   protected String storageID;  // unique per cluster storageID
+  protected int xferPort;      // data streaming port
   protected int infoPort;      // info server port
   protected int ipcPort;       // IPC server port
 
   /** Equivalent to DatanodeID(""). */
   public DatanodeID() {this("");}
 
-  /** Equivalent to DatanodeID(nodeName, "", -1, -1). */
-  public DatanodeID(String nodeName) {this(nodeName, "", "", -1, -1);}
+  /** Equivalent to DatanodeID(ipAddr, "", -1, -1, -1). */
+  public DatanodeID(String ipAddr) {this(ipAddr, "", "", -1, -1, -1);}
+
+  /** Equivalent to DatanodeID(ipAddr, "", xferPort, -1, -1). */
+  public DatanodeID(String ipAddr, int xferPort) {this(ipAddr, "", "", xferPort, -1, -1);}
 
   /**
    * DatanodeID copy constructor
@@ -56,38 +65,45 @@ public class DatanodeID implements Writa
    * @param from
    */
   public DatanodeID(DatanodeID from) {
-    this(from.getName(),
+    this(from.getIpAddr(),
         from.getHostName(),
         from.getStorageID(),
+        from.getXferPort(),
         from.getInfoPort(),
         from.getIpcPort());
   }
   
   /**
    * Create DatanodeID
-   * @param node IP:port
+   * @param ipAddr IP
    * @param hostName hostname
    * @param storageID data storage ID
+   * @param xferPort data transfer port
    * @param infoPort info server port 
    * @param ipcPort ipc server port
    */
-  public DatanodeID(String name, String hostName,
-      String storageID, int infoPort, int ipcPort) {
-    this.name = name;
+  public DatanodeID(String ipAddr, String hostName, String storageID,
+      int xferPort, int infoPort, int ipcPort) {
+    this.ipAddr = ipAddr;
     this.hostName = hostName;
     this.storageID = storageID;
+    this.xferPort = xferPort;
     this.infoPort = infoPort;
     this.ipcPort = ipcPort;
   }
   
-  public void setName(String name) {
-    this.name = name;
+  public void setIpAddr(String ipAddr) {
+    this.ipAddr = ipAddr;
   }
 
   public void setHostName(String hostName) {
     this.hostName = hostName;
   }
 
+  public void setXferPort(int xferPort) {
+    this.xferPort = xferPort;
+  }
+
   public void setInfoPort(int infoPort) {
     this.infoPort = infoPort;
   }
@@ -95,67 +111,79 @@ public class DatanodeID implements Writa
   public void setIpcPort(int ipcPort) {
     this.ipcPort = ipcPort;
   }
-  
+
+  public void setStorageID(String storageID) {
+    this.storageID = storageID;
+  }
+
   /**
-   * @return hostname:portNumber.
+   * @return ipAddr;
    */
-  public String getName() {
-    return name;
+  public String getIpAddr() {
+    return ipAddr;
   }
 
   /**
    * @return hostname
    */
   public String getHostName() {
-    return (hostName == null || hostName.length() == 0) ? getHost() : hostName;
+    return hostName;
   }
 
   /**
-   * @return data storage ID.
+   * @return IP:xferPort string
    */
-  public String getStorageID() {
-    return this.storageID;
+  public String getXferAddr() {
+    return ipAddr + ":" + xferPort;
   }
 
   /**
-   * @return infoPort (the port at which the HTTP server bound to)
+   * @return IP:ipcPort string
    */
-  public int getInfoPort() {
-    return infoPort;
+  public String getIpcAddr() {
+    return ipAddr + ":" + ipcPort;
   }
 
   /**
-   * @return ipcPort (the port at which the IPC server bound to)
+   * @return IP:infoPort string
    */
-  public int getIpcPort() {
-    return ipcPort;
+  public String getInfoAddr() {
+    return ipAddr + ":" + infoPort;
   }
 
   /**
-   * sets the data storage ID.
+   * @return hostname:xferPort
    */
-  public void setStorageID(String storageID) {
-    this.storageID = storageID;
+  public String getXferAddrWithHostname() {
+    return hostName + ":" + xferPort;
   }
 
   /**
-   * @return hostname and no :portNumber.
+   * @return data storage ID.
    */
-  public String getHost() {
-    int colon = name.indexOf(":");
-    if (colon < 0) {
-      return name;
-    } else {
-      return name.substring(0, colon);
-    }
+  public String getStorageID() {
+    return storageID;
   }
-  
-  public int getPort() {
-    int colon = name.indexOf(":");
-    if (colon < 0) {
-      return 50010; // default port.
-    }
-    return Integer.parseInt(name.substring(colon+1));
+
+  /**
+   * @return xferPort (the port for data streaming)
+   */
+  public int getXferPort() {
+    return xferPort;
+  }
+
+  /**
+   * @return infoPort (the port at which the HTTP server bound to)
+   */
+  public int getInfoPort() {
+    return infoPort;
+  }
+
+  /**
+   * @return ipcPort (the port at which the IPC server bound to)
+   */
+  public int getIpcPort() {
+    return ipcPort;
   }
 
   public boolean equals(Object to) {
@@ -165,16 +193,16 @@ public class DatanodeID implements Writa
     if (!(to instanceof DatanodeID)) {
       return false;
     }
-    return (name.equals(((DatanodeID)to).getName()) &&
+    return (getXferAddr().equals(((DatanodeID)to).getXferAddr()) &&
             storageID.equals(((DatanodeID)to).getStorageID()));
   }
   
   public int hashCode() {
-    return name.hashCode()^ storageID.hashCode();
+    return getXferAddr().hashCode()^ storageID.hashCode();
   }
   
   public String toString() {
-    return name;
+    return getXferAddr();
   }
   
   /**
@@ -182,43 +210,44 @@ public class DatanodeID implements Writa
    * Note that this does not update storageID.
    */
   public void updateRegInfo(DatanodeID nodeReg) {
-    name = nodeReg.getName();
+    ipAddr = nodeReg.getIpAddr();
     hostName = nodeReg.getHostName();
+    xferPort = nodeReg.getXferPort();
     infoPort = nodeReg.getInfoPort();
     ipcPort = nodeReg.getIpcPort();
   }
     
-  /** Comparable.
-   * Basis of compare is the String name (host:portNumber) only.
+  /**
+   * Compare based on data transfer address.
+   *
    * @param that
-   * @return as specified by Comparable.
+   * @return as specified by Comparable
    */
   public int compareTo(DatanodeID that) {
-    return name.compareTo(that.getName());
+    return getXferAddr().compareTo(that.getXferAddr());
   }
 
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
   @Override
   public void write(DataOutput out) throws IOException {
-    Text.writeString(out, name);
+    Text.writeString(out, ipAddr);
     Text.writeString(out, hostName);
     Text.writeString(out, storageID);
+    out.writeShort(xferPort);
     out.writeShort(infoPort);
     out.writeShort(ipcPort);
   }
 
   @Override
   public void readFields(DataInput in) throws IOException {
-    name = Text.readString(in);
+    ipAddr = Text.readString(in);
     hostName = Text.readString(in);
     storageID = Text.readString(in);
     // The port read could be negative, if the port is a large number (more
     // than 15 bits in storage size (but less than 16 bits).
     // So chop off the first two bytes (and hence the signed bits) before 
     // setting the field.
-    this.infoPort = in.readShort() & 0x0000ffff;
-    this.ipcPort = in.readShort() & 0x0000ffff;
+    xferPort = in.readShort() & 0x0000ffff;
+    infoPort = in.readShort() & 0x0000ffff;
+    ipcPort = in.readShort() & 0x0000ffff;
   }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java Sun Apr  1 22:12:12 2012
@@ -116,18 +116,18 @@ public class DatanodeInfo extends Datano
       final long capacity, final long dfsUsed, final long remaining,
       final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
       final AdminStates adminState) {
-    this(nodeID.getName(), nodeID.getHostName(), nodeID.getStorageID(), nodeID.getInfoPort(), nodeID
-        .getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed, lastUpdate,
-        xceiverCount, location, adminState);
+    this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(), nodeID.getXferPort(),
+        nodeID.getInfoPort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining,
+        blockPoolUsed, lastUpdate, xceiverCount, location, adminState);
   }
 
   /** Constructor */
   public DatanodeInfo(final String name, final String hostName,
-      final String storageID, final int infoPort, final int ipcPort,
+      final String storageID, final int xferPort, final int infoPort, final int ipcPort,
       final long capacity, final long dfsUsed, final long remaining,
       final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
       final String networkLocation, final AdminStates adminState) {
-    super(name, hostName, storageID, infoPort, ipcPort);
+    super(name, hostName, storageID, xferPort, infoPort, ipcPort);
     this.capacity = capacity;
     this.dfsUsed = dfsUsed;
     this.remaining = remaining;
@@ -138,6 +138,11 @@ public class DatanodeInfo extends Datano
     this.adminState = adminState;
   }
   
+  /** Network location name */
+  public String getName() {
+    return getXferAddr();
+  }
+  
   /** The raw capacity. */
   public long getCapacity() { return capacity; }
   
@@ -224,9 +229,9 @@ public class DatanodeInfo extends Datano
     long nonDFSUsed = getNonDfsUsed();
     float usedPercent = getDfsUsedPercent();
     float remainingPercent = getRemainingPercent();
-    String lookupName = NetUtils.getHostNameOfIP(name);
+    String lookupName = NetUtils.getHostNameOfIP(getName());
 
-    buffer.append("Name: "+ name);
+    buffer.append("Name: "+ getName());
     if (lookupName != null) {
       buffer.append(" (" + lookupName + ")");
     }
@@ -260,7 +265,7 @@ public class DatanodeInfo extends Datano
     long c = getCapacity();
     long r = getRemaining();
     long u = getDfsUsed();
-    buffer.append(name);
+    buffer.append(ipAddr);
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append(" "+location);
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java Sun Apr  1 22:12:12 2012
@@ -84,9 +84,10 @@ public abstract class HdfsProtoUtil {
   private static HdfsProtos.DatanodeIDProto toProto(
       DatanodeID dni) {
     return HdfsProtos.DatanodeIDProto.newBuilder()
-      .setName(dni.getName())
+      .setIpAddr(dni.getIpAddr())
       .setHostName(dni.getHostName())
       .setStorageID(dni.getStorageID())
+      .setXferPort(dni.getXferPort())
       .setInfoPort(dni.getInfoPort())
       .setIpcPort(dni.getIpcPort())
       .build();
@@ -94,9 +95,10 @@ public abstract class HdfsProtoUtil {
   
   private static DatanodeID fromProto(HdfsProtos.DatanodeIDProto idProto) {
     return new DatanodeID(
-        idProto.getName(),
+        idProto.getIpAddr(),
         idProto.getHostName(),
         idProto.getStorageID(),
+        idProto.getXferPort(),
         idProto.getInfoPort(),
         idProto.getIpcPort());
   }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java Sun Apr  1 22:12:12 2012
@@ -45,9 +45,8 @@ public class UnregisteredNodeException e
    * @param storedNode data-node stored in the system with this storage id
    */
   public UnregisteredNodeException(DatanodeID nodeID, DatanodeInfo storedNode) {
-    super("Data node " + nodeID.getName() 
-          + " is attempting to report storage ID "
+    super("Data node " + nodeID + " is attempting to report storage ID " 
           + nodeID.getStorageID() + ". Node " 
-          + storedNode.getName() + " is expected to serve this storage.");
+          + storedNode + " is expected to serve this storage.");
   }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java Sun Apr  1 22:12:12 2012
@@ -97,8 +97,7 @@ public class ClientDatanodeProtocolTrans
    */
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
       Configuration conf, int socketTimeout) throws IOException {
-    InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getHost()
-        + ":" + datanodeid.getIpcPort());
+    InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr());
     rpcProxy = createClientDatanodeProtocolProxy(addr,
         UserGroupInformation.getCurrentUser(), conf,
         NetUtils.getDefaultSocketFactory(conf), socketTimeout);
@@ -107,8 +106,7 @@ public class ClientDatanodeProtocolTrans
   static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
       DatanodeID datanodeid, Configuration conf, int socketTimeout,
       LocatedBlock locatedBlock) throws IOException {
-    InetSocketAddress addr = NetUtils.createSocketAddr(
-      datanodeid.getHost() + ":" + datanodeid.getIpcPort());
+    InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr());
     if (LOG.isDebugEnabled()) {
       LOG.debug("ClientDatanodeProtocol addr=" + addr);
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Sun Apr  1 22:12:12 2012
@@ -204,15 +204,18 @@ public class PBHelper {
 
   // DatanodeId
   public static DatanodeID convert(DatanodeIDProto dn) {
-    return new DatanodeID(dn.getName(), dn.getHostName(), dn.getStorageID(), dn.getInfoPort(),
-        dn.getIpcPort());
+    return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(),
+        dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort());
   }
 
   public static DatanodeIDProto convert(DatanodeID dn) {
     return DatanodeIDProto.newBuilder()
-        .setName(dn.getName()).setHostName(dn.getHostName())
-        .setInfoPort(dn.getInfoPort()).setIpcPort(dn.getIpcPort())
-        .setStorageID(dn.getStorageID()).build();
+        .setIpAddr(dn.getIpAddr())
+        .setHostName(dn.getHostName())
+        .setStorageID(dn.getStorageID())
+        .setXferPort(dn.getXferPort())
+        .setInfoPort(dn.getInfoPort())
+        .setIpcPort(dn.getIpcPort()).build();
   }
 
   // Arrays of DatanodeId

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Sun Apr  1 22:12:12 2012
@@ -305,8 +305,9 @@ public class Balancer {
       DataOutputStream out = null;
       DataInputStream in = null;
       try {
-        sock.connect(NetUtils.createSocketAddr(
-            target.datanode.getName()), HdfsServerConstants.READ_TIMEOUT);
+        sock.connect(
+            NetUtils.createSocketAddr(target.datanode.getXferAddr()),
+            HdfsServerConstants.READ_TIMEOUT);
         sock.setKeepAlive(true);
         out = new DataOutputStream( new BufferedOutputStream(
             sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
@@ -587,7 +588,7 @@ public class Balancer {
     /** Add a node task */
     private void addNodeTask(NodeTask task) {
       assert (task.datanode != this) :
-        "Source and target are the same " + datanode.getName();
+        "Source and target are the same " + datanode;
       incScheduledSize(task.getSize());
       nodeTasks.add(task);
     }
@@ -1007,7 +1008,7 @@ public class Balancer {
         targetCandidates.remove();
       }
       LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
-          +source.datanode.getName() + " to " + target.datanode.getName());
+          +source.datanode + " to " + target.datanode);
       return true;
     }
     return false;
@@ -1055,7 +1056,7 @@ public class Balancer {
         sourceCandidates.remove();
       }
       LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
-          +source.datanode.getName() + " to " + target.datanode.getName());
+          +source.datanode + " to " + target.datanode);
       return true;
     }
     return false;

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Sun Apr  1 22:12:12 2012
@@ -808,9 +808,9 @@ public class BlockManager {
     final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
     if (node == null) {
       NameNode.stateChangeLog.warn("BLOCK* getBlocks: "
-          + "Asking for blocks from an unrecorded node " + datanode.getName());
+          + "Asking for blocks from an unrecorded node " + datanode);
       throw new HadoopIllegalArgumentException(
-          "Datanode " + datanode.getName() + " not found.");
+          "Datanode " + datanode + " not found.");
     }
 
     int numBlocks = node.numBlocks();
@@ -882,7 +882,7 @@ public class BlockManager {
         .hasNext();) {
       DatanodeDescriptor node = it.next();
       invalidateBlocks.add(b, node, false);
-      datanodes.append(node.getName()).append(" ");
+      datanodes.append(node).append(" ");
     }
     if (datanodes.length() != 0) {
       NameNode.stateChangeLog.info("BLOCK* addToInvalidates: "
@@ -921,7 +921,7 @@ public class BlockManager {
     if (node == null) {
       throw new IOException("Cannot mark block " + 
                             storedBlock.getBlockName() +
-                            " as corrupt because datanode " + dn.getName() +
+                            " as corrupt because datanode " + dn +
                             " does not exist. ");
     }
 
@@ -955,11 +955,11 @@ public class BlockManager {
   private void invalidateBlock(Block blk, DatanodeInfo dn)
       throws IOException {
     NameNode.stateChangeLog.info("BLOCK* invalidateBlock: "
-                                 + blk + " on " + dn.getName());
+                                 + blk + " on " + dn);
     DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     if (node == null) {
       throw new IOException("Cannot invalidate block " + blk
-          + " because datanode " + dn.getName() + " does not exist.");
+          + " because datanode " + dn + " does not exist.");
     }
 
     // Check how many copies we have of the block
@@ -977,11 +977,11 @@ public class BlockManager {
       removeStoredBlock(blk, node);
       if(NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("BLOCK* invalidateBlocks: "
-            + blk + " on " + dn.getName() + " listed for deletion.");
+            + blk + " on " + dn + " listed for deletion.");
       }
     } else {
       NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + blk + " on "
-          + dn.getName() + " is the only copy and was not deleted.");
+          + dn + " is the only copy and was not deleted.");
     }
   }
 
@@ -1224,11 +1224,11 @@ public class BlockManager {
           StringBuilder targetList = new StringBuilder("datanode(s)");
           for (int k = 0; k < targets.length; k++) {
             targetList.append(' ');
-            targetList.append(targets[k].getName());
+            targetList.append(targets[k]);
           }
           NameNode.stateChangeLog.info(
                   "BLOCK* ask "
-                  + rw.srcNode.getName() + " to replicate "
+                  + rw.srcNode + " to replicate "
                   + rw.block + " to " + targetList);
         }
       }
@@ -1410,15 +1410,15 @@ public class BlockManager {
     try {
       final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
       if (node == null || !node.isAlive) {
-        throw new IOException("ProcessReport from dead or unregistered node: "
-                              + nodeID.getName());
+        throw new IOException(
+            "ProcessReport from dead or unregistered node: " + nodeID);
       }
 
       // To minimize startup time, we discard any second (or later) block reports
       // that we receive while still in startup phase.
       if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) {
         NameNode.stateChangeLog.info("BLOCK* processReport: "
-            + "discarded non-initial block report from " + nodeID.getName()
+            + "discarded non-initial block report from " + nodeID
             + " because namenode still in startup phase");
         return;
       }
@@ -1451,7 +1451,7 @@ public class BlockManager {
     // Log the block report processing stats from Namenode perspective
     NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
     NameNode.stateChangeLog.info("BLOCK* processReport: from "
-        + nodeID.getName() + ", blocks: " + newReport.getNumberOfBlocks()
+        + nodeID + ", blocks: " + newReport.getNumberOfBlocks()
         + ", processing time: " + (endTime - startTime) + " msecs");
   }
 
@@ -1511,7 +1511,7 @@ public class BlockManager {
     }
     for (Block b : toInvalidate) {
       NameNode.stateChangeLog.info("BLOCK* processReport: block "
-          + b + " on " + node.getName() + " size " + b.getNumBytes()
+          + b + " on " + node + " size " + b.getNumBytes()
           + " does not belong to any file.");
       addToInvalidates(b, node);
     }
@@ -1662,7 +1662,7 @@ public class BlockManager {
     
     if(LOG.isDebugEnabled()) {
       LOG.debug("Reported block " + block
-          + " on " + dn.getName() + " size " + block.getNumBytes()
+          + " on " + dn + " size " + block.getNumBytes()
           + " replicaState = " + reportedState);
     }
   
@@ -1837,7 +1837,7 @@ assert storedBlock.findDatanode(dn) < 0 
           // closed. So, ignore this report, assuming we will get a
           // FINALIZED replica later. See HDFS-2791
           LOG.info("Received an RBW replica for block " + storedBlock +
-              " on " + dn.getName() + ": ignoring it, since the block is " +
+              " on " + dn + ": ignoring it, since the block is " +
               "complete with the same generation stamp.");
           return null;
         } else {
@@ -1850,7 +1850,7 @@ assert storedBlock.findDatanode(dn) < 0 
     default:
       String msg = "Unexpected replica state " + reportedState
       + " for block: " + storedBlock + 
-      " on " + dn.getName() + " size " + storedBlock.getNumBytes();
+      " on " + dn + " size " + storedBlock.getNumBytes();
       // log here at WARN level since this is really a broken HDFS
       // invariant
       LOG.warn(msg);
@@ -1949,7 +1949,7 @@ assert storedBlock.findDatanode(dn) < 0 
     if (storedBlock == null || storedBlock.getINode() == null) {
       // If this block does not belong to anyfile, then we are done.
       NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
-          + node.getName() + " size " + block.getNumBytes()
+          + node + " size " + block.getNumBytes()
           + " but it does not belong to any file.");
       // we could add this block to invalidate set of this datanode.
       // it will happen in next block report otherwise.
@@ -1972,7 +1972,7 @@ assert storedBlock.findDatanode(dn) < 0 
       curReplicaDelta = 0;
       NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: "
           + "Redundant addStoredBlock request received for " + storedBlock
-          + " on " + node.getName() + " size " + storedBlock.getNumBytes());
+          + " on " + node + " size " + storedBlock.getNumBytes());
     }
 
     // Now check for completion of blocks and safe block count
@@ -2035,7 +2035,7 @@ assert storedBlock.findDatanode(dn) < 0 
     
     StringBuilder sb = new StringBuilder(500);
     sb.append("BLOCK* addStoredBlock: blockMap updated: ")
-      .append(node.getName())
+      .append(node)
       .append(" is added to ");
     storedBlock.appendStringTo(sb);
     sb.append(" size " )
@@ -2069,7 +2069,7 @@ assert storedBlock.findDatanode(dn) < 0 
       } catch (IOException e) {
         NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " +
                                       "error in deleting bad block " + blk +
-                                      " on " + node + e);
+                                      " on " + node, e);
         gotException = true;
       }
     }
@@ -2335,7 +2335,7 @@ assert storedBlock.findDatanode(dn) < 0 
       //
       addToInvalidates(b, cur);
       NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
-                +"("+cur.getName()+", "+b+") is added to invalidated blocks set.");
+                +"("+cur+", "+b+") is added to invalidated blocks set.");
     }
   }
 
@@ -2350,7 +2350,7 @@ assert storedBlock.findDatanode(dn) < 0 
       excessBlocksCount++;
       if(NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("BLOCK* addToExcessReplicate:"
-            + " (" + dn.getName() + ", " + block
+            + " (" + dn + ", " + block
             + ") is added to excessReplicateMap");
       }
     }
@@ -2363,7 +2363,7 @@ assert storedBlock.findDatanode(dn) < 0 
   public void removeStoredBlock(Block block, DatanodeDescriptor node) {
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: "
-          + block + " from " + node.getName());
+          + block + " from " + node);
     }
     assert (namesystem.hasWriteLock());
     {
@@ -2476,7 +2476,7 @@ assert storedBlock.findDatanode(dn) < 0 
     }
     for (Block b : toInvalidate) {
       NameNode.stateChangeLog.info("BLOCK* addBlock: block "
-          + b + " on " + node.getName() + " size " + b.getNumBytes()
+          + b + " on " + node + " size " + b.getNumBytes()
           + " does not belong to any file.");
       addToInvalidates(b, node);
     }
@@ -2504,7 +2504,7 @@ assert storedBlock.findDatanode(dn) < 0 
         NameNode.stateChangeLog
             .warn("BLOCK* processIncrementalBlockReport"
                 + " is received from dead or unregistered node "
-                + nodeID.getName());
+                + nodeID);
         throw new IOException(
             "Got incremental block report from unregistered or dead node");
       }
@@ -2526,7 +2526,7 @@ assert storedBlock.findDatanode(dn) < 0 
           break;
         default:
           String msg = 
-            "Unknown block status code reported by " + nodeID.getName() +
+            "Unknown block status code reported by " + nodeID +
             ": " + rdbi;
           NameNode.stateChangeLog.warn(msg);
           assert false : msg; // if assertions are enabled, throw.
@@ -2535,14 +2535,14 @@ assert storedBlock.findDatanode(dn) < 0 
         if (NameNode.stateChangeLog.isDebugEnabled()) {
           NameNode.stateChangeLog.debug("BLOCK* block "
               + (rdbi.getStatus()) + ": " + rdbi.getBlock()
-              + " is received from " + nodeID.getName());
+              + " is received from " + nodeID);
         }
       }
     } finally {
       namesystem.writeUnlock();
       NameNode.stateChangeLog
           .debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from "
-              + nodeID.getName()
+              + nodeID
               +  " receiving: " + receiving + ", "
               + " received: " + received + ", "
               + " deleted: " + deleted);
@@ -2618,7 +2618,7 @@ assert storedBlock.findDatanode(dn) < 0 
     StringBuilder nodeList = new StringBuilder();
     while (nodeIter.hasNext()) {
       DatanodeDescriptor node = nodeIter.next();
-      nodeList.append(node.getName());
+      nodeList.append(node);
       nodeList.append(" ");
     }
     LOG.info("Block: " + block + ", Expected Replicas: "
@@ -2628,7 +2628,7 @@ assert storedBlock.findDatanode(dn) < 0 
         + ", excess replicas: " + num.excessReplicas()
         + ", Is Open File: " + fileINode.isUnderConstruction()
         + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
-        + srcNode.getName() + ", Is current datanode decommissioning: "
+        + srcNode + ", Is current datanode decommissioning: "
         + srcNode.isDecommissionInProgress());
   }
   

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java Sun Apr  1 22:12:12 2012
@@ -65,14 +65,14 @@ public class CorruptReplicasMap{
       nodes.add(dn);
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
                                    blk.getBlockName() +
-                                   " added as corrupt on " + dn.getName() +
+                                   " added as corrupt on " + dn +
                                    " by " + Server.getRemoteIp() +
                                    reasonText);
     } else {
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
                                    "duplicate requested for " + 
                                    blk.getBlockName() + " to add as corrupt " +
-                                   "on " + dn.getName() +
+                                   "on " + dn +
                                    " by " + Server.getRemoteIp() +
                                    reasonText);
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Sun Apr  1 22:12:12 2012
@@ -238,7 +238,7 @@ public class DatanodeManager {
     final DatanodeDescriptor node = getDatanode(nodeID.getStorageID());
     if (node == null) 
       return null;
-    if (!node.getName().equals(nodeID.getName())) {
+    if (!node.getXferAddr().equals(nodeID.getXferAddr())) {
       final UnregisteredNodeException e = new UnregisteredNodeException(
           nodeID, node);
       NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
@@ -270,7 +270,7 @@ public class DatanodeManager {
     networktopology.remove(nodeInfo);
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("remove datanode " + nodeInfo.getName());
+      LOG.debug("remove datanode " + nodeInfo);
     }
     namesystem.checkSafeMode();
   }
@@ -288,7 +288,7 @@ public class DatanodeManager {
         removeDatanode(descriptor);
       } else {
         NameNode.stateChangeLog.warn("BLOCK* removeDatanode: "
-                                     + node.getName() + " does not exist");
+                                     + node + " does not exist");
       }
     } finally {
       namesystem.writeUnlock();
@@ -306,7 +306,7 @@ public class DatanodeManager {
         }
         if (d != null && isDatanodeDead(d)) {
           NameNode.stateChangeLog.info(
-              "BLOCK* removeDeadDatanode: lost heartbeat from " + d.getName());
+              "BLOCK* removeDeadDatanode: lost heartbeat from " + d);
           removeDatanode(d);
         }
       }
@@ -332,7 +332,7 @@ public class DatanodeManager {
 
     if (LOG.isDebugEnabled()) {
       LOG.debug(getClass().getSimpleName() + ".addDatanode: "
-          + "node " + node.getName() + " is added to datanodeMap.");
+          + "node " + node + " is added to datanodeMap.");
     }
   }
 
@@ -344,7 +344,7 @@ public class DatanodeManager {
     }
     if (LOG.isDebugEnabled()) {
       LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
-          + node.getName() + "): storage " + key 
+          + node + "): storage " + key 
           + " is removed from datanodeMap.");
     }
   }
@@ -354,7 +354,7 @@ public class DatanodeManager {
     List<String> names = new ArrayList<String>(1);
     if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
       // get the node's IP address
-      names.add(node.getHost());
+      names.add(node.getIpAddr());
     } else {
       // get the node's host name
       String hostName = node.getHostName();
@@ -376,12 +376,12 @@ public class DatanodeManager {
     node.setNetworkLocation(networkLocation);
   }
 
-  private boolean inHostsList(DatanodeID node, String ipAddr) {
-     return checkInList(node, ipAddr, hostsReader.getHosts(), false);
+  private boolean inHostsList(DatanodeID node) {
+     return checkInList(node, hostsReader.getHosts(), false);
   }
   
-  private boolean inExcludedHostsList(DatanodeID node, String ipAddr) {
-    return checkInList(node, ipAddr, hostsReader.getExcludedHosts(), true);
+  private boolean inExcludedHostsList(DatanodeID node) {
+    return checkInList(node, hostsReader.getExcludedHosts(), true);
   }
 
   /**
@@ -419,7 +419,7 @@ public class DatanodeManager {
     
     for (Iterator<DatanodeDescriptor> it = nodeList.iterator(); it.hasNext();) {
       DatanodeDescriptor node = it.next();
-      if ((!inHostsList(node, null)) && (!inExcludedHostsList(node, null))
+      if ((!inHostsList(node)) && (!inExcludedHostsList(node))
           && node.isDecommissioned()) {
         // Include list is not empty, an existing datanode does not appear
         // in both include or exclude lists and it has been decommissioned.
@@ -430,37 +430,23 @@ public class DatanodeManager {
   }
 
   /**
-   * Check if the given node (of DatanodeID or ipAddress) is in the (include or
-   * exclude) list.  If ipAddress in null, check only based upon the given 
-   * DatanodeID.  If ipAddress is not null, the ipAddress should refers to the
-   * same host that given DatanodeID refers to.
+   * Check if the given DatanodeID is in the given (include or exclude) list.
    * 
-   * @param node, the host DatanodeID
-   * @param ipAddress, if not null, should refers to the same host
-   *                   that DatanodeID refers to
-   * @param hostsList, the list of hosts in the include/exclude file
-   * @param isExcludeList, boolean, true if this is the exclude list
-   * @return boolean, if in the list
+   * @param node the DatanodeID to check
+   * @param hostsList the list of hosts in the include/exclude file
+   * @param isExcludeList true if this is the exclude list
+   * @return true if the node is in the list, false otherwise
    */
   private static boolean checkInList(final DatanodeID node,
-      final String ipAddress,
       final Set<String> hostsList,
       final boolean isExcludeList) {
     final InetAddress iaddr;
-    if (ipAddress != null) {
-      try {
-        iaddr = InetAddress.getByName(ipAddress);
-      } catch (UnknownHostException e) {
-        LOG.warn("Unknown ip address: " + ipAddress, e);
-        return isExcludeList;
-      }
-    } else {
-      try {
-        iaddr = InetAddress.getByName(node.getHost());
-      } catch (UnknownHostException e) {
-        LOG.warn("Unknown host: " + node.getHost(), e);
-        return isExcludeList;
-      }
+
+    try {
+      iaddr = InetAddress.getByName(node.getIpAddr());
+    } catch (UnknownHostException e) {
+      LOG.warn("Unknown IP: " + node.getIpAddr(), e);
+      return isExcludeList;
     }
 
     // if include list is empty, host is in include list
@@ -470,10 +456,10 @@ public class DatanodeManager {
     return // compare ipaddress(:port)
     (hostsList.contains(iaddr.getHostAddress().toString()))
         || (hostsList.contains(iaddr.getHostAddress().toString() + ":"
-            + node.getPort()))
+            + node.getXferPort()))
         // compare hostname(:port)
         || (hostsList.contains(iaddr.getHostName()))
-        || (hostsList.contains(iaddr.getHostName() + ":" + node.getPort()))
+        || (hostsList.contains(iaddr.getHostName() + ":" + node.getXferPort()))
         || ((node instanceof DatanodeInfo) && hostsList
             .contains(((DatanodeInfo) node).getHostName()));
   }
@@ -483,7 +469,7 @@ public class DatanodeManager {
    */
   private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr) { 
     // If the registered node is in exclude list, then decommission it
-    if (inExcludedHostsList(nodeReg, ipAddr)) {
+    if (inExcludedHostsList(nodeReg)) {
       startDecommission(nodeReg);
     }
   }
@@ -498,7 +484,7 @@ public class DatanodeManager {
     if (node.isDecommissionInProgress()) {
       if (!blockManager.isReplicationInProgress(node)) {
         node.setDecommissioned();
-        LOG.info("Decommission complete for node " + node.getName());
+        LOG.info("Decommission complete for node " + node);
       }
     }
     return node.isDecommissioned();
@@ -507,7 +493,7 @@ public class DatanodeManager {
   /** Start decommissioning the specified datanode. */
   private void startDecommission(DatanodeDescriptor node) {
     if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
-      LOG.info("Start Decommissioning node " + node.getName() + " with " + 
+      LOG.info("Start Decommissioning node " + node + " with " + 
           node.numBlocks() +  " blocks.");
       heartbeatManager.startDecommission(node);
       node.decommissioningStatus.setStartTime(now());
@@ -520,7 +506,7 @@ public class DatanodeManager {
   /** Stop decommissioning the specified datanodes. */
   void stopDecommission(DatanodeDescriptor node) {
     if (node.isDecommissionInProgress() || node.isDecommissioned()) {
-      LOG.info("Stop Decommissioning node " + node.getName());
+      LOG.info("Stop Decommissioning node " + node);
       heartbeatManager.stopDecommission(node);
       blockManager.processOverReplicatedBlocksOnReCommission(node);
     }
@@ -558,30 +544,30 @@ public class DatanodeManager {
     if (dnAddress == null) {
       // Mostly called inside an RPC.
       // But if not, use address passed by the data-node.
-      dnAddress = nodeReg.getHost();
-    }      
+      dnAddress = nodeReg.getIpAddr();
+    }
+
+    // Update the IP to the address of the RPC request that is
+    // registering this datanode.
+    nodeReg.setIpAddr(dnAddress);
+    nodeReg.setExportedKeys(blockManager.getBlockKeys());
 
     // Checks if the node is not on the hosts list.  If it is not, then
     // it will be disallowed from registering. 
-    if (!inHostsList(nodeReg, dnAddress)) {
+    if (!inHostsList(nodeReg)) {
       throw new DisallowedDatanodeException(nodeReg);
     }
-
-    // Update "name" with the IP address of the RPC request that
-    // is registering this datanode.
-    nodeReg.setName(dnAddress + ":" + nodeReg.getPort());
-    nodeReg.setExportedKeys(blockManager.getBlockKeys());
       
     NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: "
-        + "node registration from " + nodeReg.getName()
+        + "node registration from " + nodeReg
         + " storage " + nodeReg.getStorageID());
 
     DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
-    DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getName());
+    DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getXferAddr());
       
     if (nodeN != null && nodeN != nodeS) {
       NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
-                        + "node from name: " + nodeN.getName());
+                        + "node from name: " + nodeN);
       // nodeN previously served a different data storage, 
       // which is not served by anybody anymore.
       removeDatanode(nodeN);
@@ -610,8 +596,8 @@ public class DatanodeManager {
           but this is might not work if VERSION file format has changed 
        */        
         NameNode.stateChangeLog.info( "BLOCK* NameSystem.registerDatanode: "
-                                      + "node " + nodeS.getName()
-                                      + " is replaced by " + nodeReg.getName() + 
+                                      + "node " + nodeS
+                                      + " is replaced by " + nodeReg + 
                                       " with the same storageID " +
                                       nodeReg.getStorageID());
       }
@@ -691,10 +677,10 @@ public class DatanodeManager {
   private void refreshDatanodes() throws IOException {
     for(DatanodeDescriptor node : datanodeMap.values()) {
       // Check if not include.
-      if (!inHostsList(node, null)) {
+      if (!inHostsList(node)) {
         node.setDisallowed(true); // case 2.
       } else {
-        if (inExcludedHostsList(node, null)) {
+        if (inExcludedHostsList(node)) {
           startDecommission(node); // case 3.
         } else {
           stopDecommission(node); // case 4.
@@ -821,16 +807,16 @@ public class DatanodeManager {
         }
         //Remove any form of the this datanode in include/exclude lists.
         try {
-          InetAddress inet = InetAddress.getByName(dn.getHost());
+          InetAddress inet = InetAddress.getByName(dn.getIpAddr());
           // compare hostname(:port)
           mustList.remove(inet.getHostName());
-          mustList.remove(inet.getHostName()+":"+dn.getPort());
+          mustList.remove(inet.getHostName()+":"+dn.getXferPort());
           // compare ipaddress(:port)
           mustList.remove(inet.getHostAddress().toString());
-          mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getPort());
+          mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getXferPort());
         } catch ( UnknownHostException e ) {
           mustList.remove(dn.getName());
-          mustList.remove(dn.getHost());
+          mustList.remove(dn.getIpAddr());
           LOG.warn(e);
         }
       }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java Sun Apr  1 22:12:12 2012
@@ -39,10 +39,10 @@ class Host2NodesMap {
       return false;
     }
       
-    String host = node.getHost();
+    String ipAddr = node.getIpAddr();
     hostmapLock.readLock().lock();
     try {
-      DatanodeDescriptor[] nodes = map.get(host);
+      DatanodeDescriptor[] nodes = map.get(ipAddr);
       if (nodes != null) {
         for(DatanodeDescriptor containedNode:nodes) {
           if (node==containedNode) {
@@ -66,8 +66,8 @@ class Host2NodesMap {
         return false;
       }
       
-      String host = node.getHost();
-      DatanodeDescriptor[] nodes = map.get(host);
+      String ipAddr = node.getIpAddr();
+      DatanodeDescriptor[] nodes = map.get(ipAddr);
       DatanodeDescriptor[] newNodes;
       if (nodes==null) {
         newNodes = new DatanodeDescriptor[1];
@@ -77,7 +77,7 @@ class Host2NodesMap {
         System.arraycopy(nodes, 0, newNodes, 0, nodes.length);
         newNodes[nodes.length] = node;
       }
-      map.put(host, newNodes);
+      map.put(ipAddr, newNodes);
       return true;
     } finally {
       hostmapLock.writeLock().unlock();
@@ -92,17 +92,17 @@ class Host2NodesMap {
       return false;
     }
       
-    String host = node.getHost();
+    String ipAddr = node.getIpAddr();
     hostmapLock.writeLock().lock();
     try {
 
-      DatanodeDescriptor[] nodes = map.get(host);
+      DatanodeDescriptor[] nodes = map.get(ipAddr);
       if (nodes==null) {
         return false;
       }
       if (nodes.length==1) {
         if (nodes[0]==node) {
-          map.remove(host);
+          map.remove(ipAddr);
           return true;
         } else {
           return false;
@@ -122,7 +122,7 @@ class Host2NodesMap {
         newNodes = new DatanodeDescriptor[nodes.length-1];
         System.arraycopy(nodes, 0, newNodes, 0, i);
         System.arraycopy(nodes, i+1, newNodes, i, nodes.length-i-1);
-        map.put(host, newNodes);
+        map.put(ipAddr, newNodes);
         return true;
       }
     } finally {
@@ -130,17 +130,18 @@ class Host2NodesMap {
     }
   }
     
-  /** get a data node by its host.
-   * @return DatanodeDescriptor if found; otherwise null.
+  /**
+   * Get a data node by its IP address.
+   * @return DatanodeDescriptor if found, null otherwise 
    */
-  DatanodeDescriptor getDatanodeByHost(String host) {
-    if (host==null) {
+  DatanodeDescriptor getDatanodeByHost(String ipAddr) {
+    if (ipAddr == null) {
       return null;
     }
       
     hostmapLock.readLock().lock();
     try {
-      DatanodeDescriptor[] nodes = map.get(host);
+      DatanodeDescriptor[] nodes = map.get(ipAddr);
       // no entry
       if (nodes== null) {
         return null;
@@ -155,40 +156,4 @@ class Host2NodesMap {
       hostmapLock.readLock().unlock();
     }
   }
-    
-  /**
-   * Find data node by its name.
-   * 
-   * @return DatanodeDescriptor if found or null otherwise 
-   */
-  public DatanodeDescriptor getDatanodeByName(String name) {
-    if (name==null) {
-      return null;
-    }
-      
-    int colon = name.indexOf(":");
-    String host;
-    if (colon < 0) {
-      host = name;
-    } else {
-      host = name.substring(0, colon);
-    }
-
-    hostmapLock.readLock().lock();
-    try {
-      DatanodeDescriptor[] nodes = map.get(host);
-      // no entry
-      if (nodes== null) {
-        return null;
-      }
-      for(DatanodeDescriptor containedNode:nodes) {
-        if (name.equals(containedNode.getName())) {
-          return containedNode;
-        }
-      }
-      return null;
-    } finally {
-      hostmapLock.readLock().unlock();
-    }
-  }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java Sun Apr  1 22:12:12 2012
@@ -75,7 +75,7 @@ class InvalidateBlocks {
       numBlocks++;
       if (log) {
         NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
-            + ": add " + block + " to " + datanode.getName());
+            + ": add " + block + " to " + datanode);
       }
     }
   }
@@ -111,7 +111,8 @@ class InvalidateBlocks {
     for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
       final LightWeightHashSet<Block> blocks = entry.getValue();
       if (blocks.size() > 0) {
-        out.println(datanodeManager.getDatanode(entry.getKey()).getName() + blocks);
+        out.println(datanodeManager.getDatanode(entry.getKey()));
+        out.println(blocks);
       }
     }
   }
@@ -135,7 +136,7 @@ class InvalidateBlocks {
 
     if (NameNode.stateChangeLog.isInfoEnabled()) {
       NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
-          + ": ask " + dn.getName() + " to delete " + toInvalidate);
+          + ": ask " + dn + " to delete " + toInvalidate);
     }
     return toInvalidate.size();
   }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java Sun Apr  1 22:12:12 2012
@@ -88,9 +88,6 @@ public class JspHelper {
   private static class NodeRecord extends DatanodeInfo {
     int frequency;
 
-    public NodeRecord() {
-      frequency = -1;
-    }
     public NodeRecord(DatanodeInfo info, int count) {
       super(info);
       this.frequency = count;
@@ -172,7 +169,7 @@ public class JspHelper {
 
       //just ping to check whether the node is alive
       InetSocketAddress targetAddr = NetUtils.createSocketAddr(
-          chosenNode.getHost() + ":" + chosenNode.getInfoPort());
+          chosenNode.getInfoAddr());
         
       try {
         s = NetUtils.getDefaultSocketFactory(conf).createSocket();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Sun Apr  1 22:12:12 2012
@@ -667,7 +667,9 @@ public class DataNode extends Configured
    * @param nsInfo the namespace info from the first part of the NN handshake
    */
   DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
-    DatanodeRegistration bpRegistration = new DatanodeRegistration(getXferAddr());
+    final String xferIp = streamingAddr.getAddress().getHostAddress();
+    DatanodeRegistration bpRegistration = new DatanodeRegistration(xferIp);
+    bpRegistration.setXferPort(getXferPort());
     bpRegistration.setInfoPort(getInfoPort());
     bpRegistration.setIpcPort(getIpcPort());
     bpRegistration.setHostName(hostName);
@@ -702,7 +704,7 @@ public class DataNode extends Configured
       storage.setStorageID(bpRegistration.getStorageID());
       storage.writeAll();
       LOG.info("New storage id " + bpRegistration.getStorageID()
-          + " is assigned to data-node " + bpRegistration.getName());
+          + " is assigned to data-node " + bpRegistration);
     } else if(!storage.getStorageID().equals(bpRegistration.getStorageID())) {
       throw new IOException("Inconsistent storage IDs. Name-node returned "
           + bpRegistration.getStorageID() 
@@ -873,13 +875,6 @@ public class DataNode extends Configured
   }
 
   /**
-   * @return the IP:port to report to the NN for data transfer
-   */
-  private String getXferAddr() {
-    return streamingAddr.getAddress().getHostAddress() + ":" + getXferPort();
-  }
-
-  /**
    * @return the datanode's IPC port
    */
   @VisibleForTesting
@@ -921,8 +916,8 @@ public class DataNode extends Configured
   public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
       DatanodeID datanodeid, final Configuration conf, final int socketTimeout)
     throws IOException {
-    final InetSocketAddress addr = NetUtils.createSocketAddr(
-        datanodeid.getHost() + ":" + datanodeid.getIpcPort());
+    final InetSocketAddress addr =
+      NetUtils.createSocketAddr(datanodeid.getIpcAddr());
     if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
       InterDatanodeProtocol.LOG.debug("InterDatanodeProtocol addr=" + addr);
     }
@@ -946,7 +941,7 @@ public class DataNode extends Configured
   
   public static void setNewStorageID(DatanodeID dnId) {
     LOG.info("Datanode is " + dnId);
-    dnId.setStorageID(createNewStorageId(dnId.getPort()));
+    dnId.setStorageID(createNewStorageId(dnId.getXferPort()));
   }
   
   static String createNewStorageId(int port) {
@@ -1222,7 +1217,7 @@ public class DataNode extends Configured
       if (LOG.isInfoEnabled()) {
         StringBuilder xfersBuilder = new StringBuilder();
         for (int i = 0; i < numTargets; i++) {
-          xfersBuilder.append(xferTargets[i].getName());
+          xfersBuilder.append(xferTargets[i]);
           xfersBuilder.append(" ");
         }
         LOG.info(bpReg + " Starting thread to transfer block " + 
@@ -1380,7 +1375,7 @@ public class DataNode extends Configured
       
       try {
         InetSocketAddress curTarget = 
-          NetUtils.createSocketAddr(targets[0].getName());
+          NetUtils.createSocketAddr(targets[0].getXferAddr());
         sock = newSocket();
         NetUtils.connect(sock, curTarget, dnConf.socketTimeout);
         sock.setSoTimeout(targets.length * dnConf.socketTimeout);
@@ -1433,9 +1428,8 @@ public class DataNode extends Configured
           }
         }
       } catch (IOException ie) {
-        LOG.warn(
-            bpReg + ":Failed to transfer " + b + " to " + targets[0].getName()
-                + " got ", ie);
+        LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
+            targets[0] + " got ", ie);
         // check if there are any disk problem
         checkDiskError();
         
@@ -1989,9 +1983,9 @@ public class DataNode extends Configured
   
   private static void logRecoverBlock(String who,
       ExtendedBlock block, DatanodeID[] targets) {
-    StringBuilder msg = new StringBuilder(targets[0].getName());
+    StringBuilder msg = new StringBuilder(targets[0].toString());
     for (int i = 1; i < targets.length; i++) {
-      msg.append(", " + targets[i].getName());
+      msg.append(", " + targets[i]);
     }
     LOG.info(who + " calls recoverBlock(block=" + block
         + ", targets=[" + msg + "])");

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Sun Apr  1 22:12:12 2012
@@ -352,7 +352,7 @@ class DataXceiver extends Receiver imple
       if (targets.length > 0) {
         InetSocketAddress mirrorTarget = null;
         // Connect to backup machine
-        mirrorNode = targets[0].getName();
+        mirrorNode = targets[0].getXferAddr();
         mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
         mirrorSock = datanode.newSocket();
         try {
@@ -667,8 +667,8 @@ class DataXceiver extends Receiver imple
     
     try {
       // get the output stream to the proxy
-      InetSocketAddress proxyAddr = NetUtils.createSocketAddr(
-          proxySource.getName());
+      InetSocketAddress proxyAddr =
+        NetUtils.createSocketAddr(proxySource.getXferAddr());
       proxySock = datanode.newSocket();
       NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
       proxySock.setSoTimeout(dnConf.socketTimeout);
@@ -820,7 +820,7 @@ class DataXceiver extends Receiver imple
             if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
               DatanodeRegistration dnR = 
                 datanode.getDNRegistrationForBP(blk.getBlockPoolId());
-              resp.setFirstBadLink(dnR.getName());
+              resp.setFirstBadLink(dnR.getXferAddr());
             }
             resp.build().writeDelimitedTo(out);
             out.flush();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Sun Apr  1 22:12:12 2012
@@ -136,10 +136,8 @@ public class DatanodeJspHelper {
           out.print("Empty file");
         } else {
           DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
-          String fqdn = canonicalize(chosenNode.getHost());
-          String datanodeAddr = chosenNode.getName();
-          int datanodePort = Integer.parseInt(datanodeAddr.substring(
-              datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
+          String fqdn = canonicalize(chosenNode.getIpAddr());
+          int datanodePort = chosenNode.getXferPort();
           String redirectLocation = "http://" + fqdn + ":"
               + chosenNode.getInfoPort() + "/browseBlock.jsp?blockId="
               + firstBlock.getBlock().getBlockId() + "&blockSize="
@@ -313,7 +311,7 @@ public class DatanodeJspHelper {
       dfs.close();
       return;
     }
-    String fqdn = canonicalize(chosenNode.getHost());
+    String fqdn = canonicalize(chosenNode.getIpAddr());
     String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort()
         + "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
         + "&namenodeInfoPort=" + namenodeInfoPort
@@ -360,10 +358,9 @@ public class DatanodeJspHelper {
       out.print("<td>" + blockidstring + ":</td>");
       DatanodeInfo[] locs = cur.getLocations();
       for (int j = 0; j < locs.length; j++) {
-        String datanodeAddr = locs[j].getName();
-        datanodePort = Integer.parseInt(datanodeAddr.substring(datanodeAddr
-            .indexOf(':') + 1, datanodeAddr.length()));
-        fqdn = canonicalize(locs[j].getHost());
+        String datanodeAddr = locs[j].getXferAddr();
+        datanodePort = locs[j].getXferPort();
+        fqdn = canonicalize(locs[j].getIpAddr());
         String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort()
             + "/browseBlock.jsp?blockId=" + blockidstring
             + "&blockSize=" + blockSize
@@ -519,10 +516,8 @@ public class DatanodeJspHelper {
             nextStartOffset = 0;
             nextBlockSize = nextBlock.getBlock().getNumBytes();
             DatanodeInfo d = JspHelper.bestNode(nextBlock, conf);
-            String datanodeAddr = d.getName();
-            nextDatanodePort = Integer.parseInt(datanodeAddr.substring(
-                datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
-            nextHost = d.getHost();
+            nextDatanodePort = d.getXferPort();
+            nextHost = d.getIpAddr();
             nextPort = d.getInfoPort();
           }
         }
@@ -573,10 +568,8 @@ public class DatanodeJspHelper {
               prevStartOffset = 0;
             prevBlockSize = prevBlock.getBlock().getNumBytes();
             DatanodeInfo d = JspHelper.bestNode(prevBlock, conf);
-            String datanodeAddr = d.getName();
-            prevDatanodePort = Integer.parseInt(datanodeAddr.substring(
-                datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
-            prevHost = d.getHost();
+            prevDatanodePort = d.getXferPort();
+            prevHost = d.getIpAddr();
             prevPort = d.getInfoPort();
           }
         }
@@ -693,7 +686,8 @@ public class DatanodeJspHelper {
       dfs.close();
       return;
     }
-    InetSocketAddress addr = NetUtils.createSocketAddr(chosenNode.getName());
+    InetSocketAddress addr = 
+      NetUtils.createSocketAddr(chosenNode.getXferAddr());
     // view the last chunkSizeToView bytes while Tailing
     final long startOffset = blockSize >= chunkSizeToView ? blockSize
         - chunkSizeToView : 0;

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java Sun Apr  1 22:12:12 2012
@@ -59,7 +59,7 @@ public class FileChecksumServlets {
         HttpServletRequest request, NameNode nn) 
         throws IOException {
       final String hostname = host instanceof DatanodeInfo 
-          ? ((DatanodeInfo)host).getHostName() : host.getHost();
+          ? ((DatanodeInfo)host).getHostName() : host.getIpAddr();
       final String scheme = request.getScheme();
       final int port = "https".equals(scheme)
           ? (Integer)getServletContext().getAttribute("datanode.https.port")

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java?rev=1308205&r1=1308204&r2=1308205&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java Sun Apr  1 22:12:12 2012
@@ -59,7 +59,7 @@ public class FileDataServlet extends Dfs
     if (host instanceof DatanodeInfo) {
       hostname = ((DatanodeInfo)host).getHostName();
     } else {
-      hostname = host.getHost();
+      hostname = host.getIpAddr();
     }
     final int port = "https".equals(scheme)
       ? (Integer)getServletContext().getAttribute("datanode.https.port")