You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by el...@apache.org on 2012/04/01 05:41:42 UTC

svn commit: r1308014 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/protocolPB/ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/main/jav...

Author: eli
Date: Sun Apr  1 03:41:41 2012
New Revision: 1308014

URL: http://svn.apache.org/viewvc?rev=1308014&view=rev
Log:
HDFS-3171. The DatanodeID "name" field is overloaded. Contributed by Eli Collins

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sun Apr  1 03:41:41 2012
@@ -290,6 +290,8 @@ Release 2.0.0 - UNRELEASED 
 
     HDFS-3172. dfs.upgrade.permission is dead code. (eli)
 
+    HDFS-3171. The DatanodeID "name" field is overloaded. (eli)
+
   OPTIMIZATIONS
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java Sun Apr  1 03:41:41 2012
@@ -183,6 +183,7 @@ public class DatanodeID implements Writa
    */
   public void updateRegInfo(DatanodeID nodeReg) {
     name = nodeReg.getName();
+    hostName = nodeReg.getHostName();
     infoPort = nodeReg.getInfoPort();
     ipcPort = nodeReg.getIpcPort();
   }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Sun Apr  1 03:41:41 2012
@@ -606,8 +606,8 @@ public class PBHelper {
     DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto
         .newBuilder();
     return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration))
-        .setStorageInfo(PBHelper.convert(registration.storageInfo))
-        .setKeys(PBHelper.convert(registration.exportedKeys)).build();
+        .setStorageInfo(PBHelper.convert(registration.getStorageInfo()))
+        .setKeys(PBHelper.convert(registration.getExportedKeys())).build();
   }
 
   public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Sun Apr  1 03:41:41 2012
@@ -337,7 +337,7 @@ public class DatanodeManager {
   }
 
   /** Physically remove node from datanodeMap. */
-  private void wipeDatanode(final DatanodeID node) throws IOException {
+  private void wipeDatanode(final DatanodeID node) {
     final String key = node.getStorageID();
     synchronized (datanodeMap) {
       host2DatanodeMap.remove(datanodeMap.remove(key));
@@ -481,8 +481,7 @@ public class DatanodeManager {
   /**
    * Decommission the node if it is in exclude list.
    */
-  private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr) 
-    throws IOException {
+  private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr) { 
     // If the registered node is in exclude list, then decommission it
     if (inExcludedHostsList(nodeReg, ipAddr)) {
       startDecommission(nodeReg);
@@ -506,7 +505,7 @@ public class DatanodeManager {
   }
 
   /** Start decommissioning the specified datanode. */
-  private void startDecommission(DatanodeDescriptor node) throws IOException {
+  private void startDecommission(DatanodeDescriptor node) {
     if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
       LOG.info("Start Decommissioning node " + node.getName() + " with " + 
           node.numBlocks() +  " blocks.");
@@ -519,7 +518,7 @@ public class DatanodeManager {
   }
 
   /** Stop decommissioning the specified datanodes. */
-  void stopDecommission(DatanodeDescriptor node) throws IOException {
+  void stopDecommission(DatanodeDescriptor node) {
     if (node.isDecommissionInProgress() || node.isDecommissioned()) {
       LOG.info("Stop Decommissioning node " + node.getName());
       heartbeatManager.stopDecommission(node);
@@ -545,8 +544,16 @@ public class DatanodeManager {
     return newID;
   }
 
-  public void registerDatanode(DatanodeRegistration nodeReg
-      ) throws IOException {
+  /**
+   * Register the given datanode with the namenode. NB: the given
+   * registration is mutated and given back to the datanode.
+   *
+   * @param nodeReg the datanode registration
+   * @throws DisallowedDatanodeException if the registration request is
+   *    denied because the datanode does not match includes/excludes
+   */
+  public void registerDatanode(DatanodeRegistration nodeReg)
+      throws DisallowedDatanodeException {
     String dnAddress = Server.getRemoteAddress();
     if (dnAddress == null) {
       // Mostly called inside an RPC.
@@ -560,16 +567,10 @@ public class DatanodeManager {
       throw new DisallowedDatanodeException(nodeReg);
     }
 
-    String hostName = nodeReg.getHost();
-      
-    // update the datanode's name with ip:port
-    DatanodeID dnReg = new DatanodeID(dnAddress + ":" + nodeReg.getPort(),
-                                      hostName,
-                                      nodeReg.getStorageID(),
-                                      nodeReg.getInfoPort(),
-                                      nodeReg.getIpcPort());
-    nodeReg.updateRegInfo(dnReg);
-    nodeReg.exportedKeys = blockManager.getBlockKeys();
+    // Update "name" with the IP address of the RPC request that
+    // is registering this datanode.
+    nodeReg.setName(dnAddress + ":" + nodeReg.getPort());
+    nodeReg.setExportedKeys(blockManager.getBlockKeys());
       
     NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: "
         + "node registration from " + nodeReg.getName()
@@ -617,7 +618,6 @@ public class DatanodeManager {
       // update cluster map
       getNetworkTopology().remove(nodeS);
       nodeS.updateRegInfo(nodeReg);
-      nodeS.setHostName(hostName);
       nodeS.setDisallowed(false); // Node is in the include list
       
       // resolve network location
@@ -635,7 +635,7 @@ public class DatanodeManager {
       // this data storage has never been registered
       // it is either empty or was created by pre-storageID version of DFS
       nodeReg.setStorageID(newStorageID());
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
+      if (NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug(
             "BLOCK* NameSystem.registerDatanode: "
             + "new storageID " + nodeReg.getStorageID() + " assigned.");

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java Sun Apr  1 03:41:41 2012
@@ -325,10 +325,10 @@ class BPOfferService {
   void registrationSucceeded(BPServiceActor bpServiceActor,
       DatanodeRegistration reg) throws IOException {
     if (bpRegistration != null) {
-      checkNSEquality(bpRegistration.storageInfo.getNamespaceID(),
-          reg.storageInfo.getNamespaceID(), "namespace ID");
-      checkNSEquality(bpRegistration.storageInfo.getClusterID(),
-          reg.storageInfo.getClusterID(), "cluster ID");
+      checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(),
+          reg.getStorageInfo().getNamespaceID(), "namespace ID");
+      checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
+          reg.getStorageInfo().getClusterID(), "cluster ID");
     } else {
       bpRegistration = reg;
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java Sun Apr  1 03:41:41 2012
@@ -602,7 +602,7 @@ class BPServiceActor implements Runnable
 
     while (shouldRun()) {
       try {
-        // Use returned registration from namenode with updated machine name.
+        // Use returned registration from namenode with updated fields
         bpRegistration = bpNamenode.registerDatanode(bpRegistration);
         break;
       } catch(SocketTimeoutException e) {  // namenode is busy

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Sun Apr  1 03:41:41 2012
@@ -244,9 +244,10 @@ public class DataNode extends Configured
   private DataStorage storage = null;
   private HttpServer infoServer = null;
   DataNodeMetrics metrics;
-  private InetSocketAddress selfAddr;
+  private InetSocketAddress streamingAddr;
   
-  private volatile String hostName; // Host name of this datanode
+  private String hostName;
+  private DatanodeID id;
   
   boolean isBlockTokenEnabled;
   BlockPoolTokenSecretManager blockPoolTokenSecretManager;
@@ -288,6 +289,7 @@ public class DataNode extends Configured
         .get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
     try {
       hostName = getHostName(conf);
+      LOG.info("Configured hostname is " + hostName);
       startDataNode(conf, dataDirs, resources);
     } catch (IOException ie) {
       shutdown();
@@ -305,16 +307,25 @@ public class DataNode extends Configured
     clusterId = nsCid;
   }
 
+  /**
+   * Returns the hostname for this datanode. If the hostname is not
+   * explicitly configured in the given config, then it is determined
+   * via the DNS class.
+   *
+   * @param config
+   * @return the hostname (NB: may not be a FQDN)
+   * @throws UnknownHostException if the dfs.datanode.dns.interface
+   *    option is used and the hostname can not be determined
+   */
   private static String getHostName(Configuration config)
       throws UnknownHostException {
-    // use configured nameserver & interface to get local hostname
     String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
     if (name == null) {
-      name = DNS
-          .getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
-              DFS_DATANODE_DNS_INTERFACE_DEFAULT), config.get(
-              DFS_DATANODE_DNS_NAMESERVER_KEY,
-              DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
+      name = DNS.getDefaultHost(
+          config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
+                     DFS_DATANODE_DNS_INTERFACE_DEFAULT),
+          config.get(DFS_DATANODE_DNS_NAMESERVER_KEY,
+                     DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
     }
     return name;
   }
@@ -485,23 +496,22 @@ public class DataNode extends Configured
   }
   
   private void initDataXceiver(Configuration conf) throws IOException {
-    InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
-
     // find free port or use privileged port provided
     ServerSocket ss;
-    if(secureResources == null) {
+    if (secureResources == null) {
+      InetSocketAddress addr = DataNode.getStreamingAddr(conf);
       ss = (dnConf.socketWriteTimeout > 0) ? 
           ServerSocketChannel.open().socket() : new ServerSocket();
-          Server.bind(ss, streamingAddr, 0);
+          Server.bind(ss, addr, 0);
     } else {
       ss = secureResources.getStreamingSocket();
     }
     ss.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); 
-    // adjust machine name with the actual port
-    int tmpPort = ss.getLocalPort();
-    selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
-                                     tmpPort);
-    LOG.info("Opened streaming server at " + selfAddr);
+
+    streamingAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
+                                     ss.getLocalPort());
+
+    LOG.info("Opened streaming server at " + streamingAddr);
     this.threadGroup = new ThreadGroup("dataXceiverServer");
     this.dataXceiverServer = new Daemon(threadGroup, 
         new DataXceiverServer(ss, conf, this));
@@ -646,7 +656,7 @@ public class DataNode extends Configured
     this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
     initIpcServer(conf);
 
-    metrics = DataNodeMetrics.create(conf, getMachineName());
+    metrics = DataNodeMetrics.create(conf, getDisplayName());
 
     blockPoolManager = new BlockPoolManager(this);
     blockPoolManager.refreshNamenodes(conf);
@@ -657,14 +667,16 @@ public class DataNode extends Configured
    * @param nsInfo the namespace info from the first part of the NN handshake
    */
   DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
-    DatanodeRegistration bpRegistration = createUnknownBPRegistration();
-    String blockPoolId = nsInfo.getBlockPoolID();
-    
+    DatanodeRegistration bpRegistration = new DatanodeRegistration(getXferAddr());
+    bpRegistration.setInfoPort(getInfoPort());
+    bpRegistration.setIpcPort(getIpcPort());
+    bpRegistration.setHostName(hostName);
     bpRegistration.setStorageID(getStorageId());
-    StorageInfo storageInfo = storage.getBPStorage(blockPoolId);
+
+    StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
     if (storageInfo == null) {
       // it's null in the case of SimulatedDataSet
-      bpRegistration.storageInfo.layoutVersion = HdfsConstants.LAYOUT_VERSION;
+      bpRegistration.getStorageInfo().layoutVersion = HdfsConstants.LAYOUT_VERSION;
       bpRegistration.setStorageInfo(nsInfo);
     } else {
       bpRegistration.setStorageInfo(storageInfo);
@@ -679,13 +691,14 @@ public class DataNode extends Configured
    * Also updates the block pool's state in the secret manager.
    */
   synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
-      String blockPoolId)
-      throws IOException {
-    hostName = bpRegistration.getHost();
+      String blockPoolId) throws IOException {
+    // Set the ID if we haven't already
+    if (null == id) {
+      id = bpRegistration;
+    }
 
     if (storage.getStorageID().equals("")) {
-      // This is a fresh datanode -- take the storage ID provided by the
-      // NN and persist it.
+      // This is a fresh datanode, persist the NN-provided storage ID
       storage.setStorageID(bpRegistration.getStorageID());
       storage.writeAll();
       LOG.info("New storage id " + bpRegistration.getStorageID()
@@ -708,7 +721,7 @@ public class DataNode extends Configured
    */
   private void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistration,
       String blockPoolId) throws IOException {
-    ExportedBlockKeys keys = bpRegistration.exportedKeys;
+    ExportedBlockKeys keys = bpRegistration.getExportedKeys();
     isBlockTokenEnabled = keys.isBlockTokenEnabled();
     // TODO should we check that all federated nns are either enabled or
     // disabled?
@@ -728,8 +741,8 @@ public class DataNode extends Configured
     }
     
     blockPoolTokenSecretManager.setKeys(blockPoolId,
-        bpRegistration.exportedKeys);
-    bpRegistration.exportedKeys = ExportedBlockKeys.DUMMY_KEYS;
+        bpRegistration.getExportedKeys());
+    bpRegistration.setExportedKeys(ExportedBlockKeys.DUMMY_KEYS);
   }
 
   /**
@@ -783,18 +796,6 @@ public class DataNode extends Configured
     data.addBlockPool(nsInfo.getBlockPoolID(), conf);
   }
 
-  /**
-   * Create a DatanodeRegistration object with no valid StorageInfo.
-   * This is used when reporting an error during handshake - ie
-   * before we can load any specific block pool.
-   */
-  private DatanodeRegistration createUnknownBPRegistration() {
-    DatanodeRegistration reg = new DatanodeRegistration(getMachineName());
-    reg.setInfoPort(infoServer.getPort());
-    reg.setIpcPort(getIpcPort());
-    return reg;
-  }
-
   BPOfferService[] getAllBpOs() {
     return blockPoolManager.getAllNamenodeThreads();
   }
@@ -844,23 +845,44 @@ public class DataNode extends Configured
     MBeans.register("DataNode", "DataNodeInfo", this);
   }
   
-  int getPort() {
-    return selfAddr.getPort();
+  int getXferPort() {
+    return streamingAddr.getPort();
   }
   
   String getStorageId() {
     return storage.getStorageID();
   }
-  
-  /** 
-   * Get host:port with host set to Datanode host and port set to the
-   * port {@link DataXceiver} is serving.
-   * @return host:port string
+
+  /**
+   * @return name useful for logging
    */
-  public String getMachineName() {
-    return hostName + ":" + getPort();
+  public String getDisplayName() {
+    // NB: our DatanodeID may not be set yet
+    return hostName + ":" + getIpcPort();
   }
-  
+
+  /**
+   * NB: The datanode can perform data transfer on the streaming
+   * address however clients are given the IPC IP address for data
+   * transfer, and that may be be a different address.
+   * 
+   * @return socket address for data transfer
+   */
+  public InetSocketAddress getXferAddress() {
+    return streamingAddr;
+  }
+
+  /**
+   * @return the IP:port to report to the NN for data transfer
+   */
+  private String getXferAddr() {
+    return streamingAddr.getAddress().getHostAddress() + ":" + getXferPort();
+  }
+
+  /**
+   * @return the datanode's IPC port
+   */
+  @VisibleForTesting
   public int getIpcPort() {
     return ipcServer.getListenerAddress().getPort();
   }
@@ -881,25 +903,6 @@ public class DataNode extends Configured
   }
   
   /**
-   * get BP registration by machine and port name (host:port)
-   * @param mName - the name that the NN used
-   * @return BP registration 
-   * @throws IOException 
-   */
-  DatanodeRegistration getDNRegistrationByMachineName(String mName) {
-    // TODO: all the BPs should have the same name as each other, they all come
-    // from getName() here! and the use cases only are in tests where they just
-    // call with getName(). So we could probably just make this method return
-    // the first BPOS's registration. See HDFS-2609.
-    BPOfferService [] bposArray = blockPoolManager.getAllNamenodeThreads();
-    for (BPOfferService bpos : bposArray) {
-      if(bpos.bpRegistration.getName().equals(mName))
-        return bpos.bpRegistration;
-    }
-    return null;
-  }
-  
-  /**
    * Creates either NIO or regular depending on socketWriteTimeout.
    */
   protected Socket newSocket() throws IOException {
@@ -936,10 +939,6 @@ public class DataNode extends Configured
       throw new IOException(ie.getMessage());
     }
   }
-  
-  public InetSocketAddress getSelfAddr() {
-    return selfAddr;
-  }
     
   DataNodeMetrics getMetrics() {
     return metrics;
@@ -1632,7 +1631,7 @@ public class DataNode extends Configured
 
   @Override
   public String toString() {
-    return "DataNode{data=" + data + ", localName='" + getMachineName()
+    return "DataNode{data=" + data + ", localName='" + getDisplayName()
         + "', storageID='" + getStorageId() + "', xmitsInProgress="
         + xmitsInProgress.get() + "}";
   }
@@ -1998,7 +1997,6 @@ public class DataNode extends Configured
         + ", targets=[" + msg + "])");
   }
 
-  // ClientDataNodeProtocol implementation
   @Override // ClientDataNodeProtocol
   public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
     checkWriteAccess(block);
@@ -2076,8 +2074,7 @@ public class DataNode extends Configured
     storage.finalizeUpgrade(blockPoolId);
   }
 
-  // Determine a Datanode's streaming address
-  public static InetSocketAddress getStreamingAddr(Configuration conf) {
+  static InetSocketAddress getStreamingAddr(Configuration conf) {
     return NetUtils.createSocketAddr(
         conf.get(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT));
   }
@@ -2099,8 +2096,11 @@ public class DataNode extends Configured
     return this.getConf().get("dfs.datanode.info.port");
   }
   
-  public int getInfoPort(){
-    return this.infoServer.getPort();
+  /**
+   * @return the datanode's http port
+   */
+  public int getInfoPort() {
+    return infoServer.getPort();
   }
 
   /**
@@ -2142,7 +2142,7 @@ public class DataNode extends Configured
     blockPoolManager.refreshNamenodes(conf);
   }
 
-  @Override //ClientDatanodeProtocol
+  @Override // ClientDatanodeProtocol
   public void refreshNamenodes() throws IOException {
     conf = new Configuration();
     refreshNamenodes(conf);
@@ -2206,8 +2206,7 @@ public class DataNode extends Configured
   
   @VisibleForTesting
   public DatanodeID getDatanodeId() {
-    return new DatanodeID(getMachineName(), hostName, getStorageId(),
-        infoServer.getPort(), getIpcPort());
+    return id;
   }
 
   /**

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Sun Apr  1 03:41:41 2012
@@ -194,7 +194,7 @@ public class DataStorage extends Storage
     }
     
     // make sure we have storage id set - if not - generate new one
-    createStorageID(datanode.getPort());
+    createStorageID(datanode.getXferPort());
     
     // 3. Update all storages. Some of them might have just been formatted.
     this.writeAll();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java Sun Apr  1 03:41:41 2012
@@ -168,13 +168,13 @@ class DataXceiver extends Receiver imple
         ++opsProcessed;
       } while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
     } catch (Throwable t) {
-      LOG.error(datanode.getMachineName() + ":DataXceiver error processing " +
+      LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
                 ((op == null) ? "unknown" : op.name()) + " operation " +
                 " src: " + remoteAddress +
                 " dest: " + localAddress, t);
     } finally {
       if (LOG.isDebugEnabled()) {
-        LOG.debug(datanode.getMachineName() + ":Number of active connections is: "
+        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
             + datanode.getXceiverCount());
       }
       updateCurrentThreadName("Cleaning up");

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java Sun Apr  1 03:41:41 2012
@@ -152,11 +152,11 @@ class DataXceiverServer implements Runna
         // another thread closed our listener socket - that's expected during shutdown,
         // but not in other circumstances
         if (datanode.shouldRun) {
-          LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ace);
+          LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ace);
         }
       } catch (IOException ie) {
         IOUtils.closeSocket(s);
-        LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ie);
+        LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ie);
       } catch (OutOfMemoryError ie) {
         IOUtils.closeSocket(s);
         // DataNode can run out of memory if there is too many transfers.
@@ -169,7 +169,7 @@ class DataXceiverServer implements Runna
           // ignore
         }
       } catch (Throwable te) {
-        LOG.error(datanode.getMachineName()
+        LOG.error(datanode.getDisplayName()
             + ":DataXceiverServer: Exiting due to: ", te);
         datanode.shouldRun = false;
       }
@@ -177,7 +177,7 @@ class DataXceiverServer implements Runna
     try {
       ss.close();
     } catch (IOException ie) {
-      LOG.warn(datanode.getMachineName()
+      LOG.warn(datanode.getDisplayName()
           + " :DataXceiverServer: close exception", ie);
     }
   }
@@ -188,7 +188,7 @@ class DataXceiverServer implements Runna
     try {
       this.ss.close();
     } catch (IOException ie) {
-      LOG.warn(datanode.getMachineName() + ":DataXceiverServer.kill(): ", ie);
+      LOG.warn(datanode.getDisplayName() + ":DataXceiverServer.kill(): ", ie);
     }
 
     // close all the sockets that were accepted earlier

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java Sun Apr  1 03:41:41 2012
@@ -55,7 +55,7 @@ class UpgradeManagerDatanode extends Upg
     if( ! super.initializeUpgrade())
       return; // distr upgrade is not needed
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
-        + dataNode.getMachineName() 
+        + dataNode.getDisplayName() 
         + " version " + getUpgradeVersion() + " to current LV " 
         + HdfsConstants.LAYOUT_VERSION + " is initialized.");
     UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
@@ -113,7 +113,7 @@ class UpgradeManagerDatanode extends Upg
     upgradeDaemon = new Daemon(curUO);
     upgradeDaemon.start();
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
-        + dataNode.getMachineName() 
+        + dataNode.getDisplayName() 
         + " version " + getUpgradeVersion() + " to current LV " 
         + HdfsConstants.LAYOUT_VERSION + " is started.");
     return true;
@@ -128,7 +128,7 @@ class UpgradeManagerDatanode extends Upg
     if(startUpgrade()) // upgrade started
       return;
     throw new IOException(
-        "Distributed upgrade for DataNode " + dataNode.getMachineName() 
+        "Distributed upgrade for DataNode " + dataNode.getDisplayName() 
         + " version " + getUpgradeVersion() + " to current LV " 
         + HdfsConstants.LAYOUT_VERSION + " cannot be started. "
         + "The upgrade object is not defined.");
@@ -143,7 +143,7 @@ class UpgradeManagerDatanode extends Upg
     currentUpgrades = null;
     upgradeDaemon = null;
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
-        + dataNode.getMachineName()
+        + dataNode.getDisplayName()
         + " version " + getUpgradeVersion() + " to current LV " 
         + HdfsConstants.LAYOUT_VERSION + " is complete.");
   }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Sun Apr  1 03:41:41 2012
@@ -80,9 +80,8 @@ public interface DatanodeProtocol {
    *
    * @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
    * @param registration datanode registration information
-   * @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains 
-   * new storageID if the datanode did not have one and
-   * registration ID for further communication.
+   * @return the given {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration} with
+   *  updated registration information
    */
   public DatanodeRegistration registerDatanode(DatanodeRegistration registration
       ) throws IOException;

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Sun Apr  1 03:41:41 2012
@@ -49,8 +49,8 @@ implements Writable, NodeRegistration {
        });
   }
 
-  public StorageInfo storageInfo;
-  public ExportedBlockKeys exportedKeys;
+  private StorageInfo storageInfo;
+  private ExportedBlockKeys exportedKeys;
 
   /**
    * Default constructor.
@@ -83,7 +83,19 @@ implements Writable, NodeRegistration {
   public void setStorageInfo(StorageInfo storage) {
     this.storageInfo = new StorageInfo(storage);
   }
-  
+
+  public StorageInfo getStorageInfo() {
+    return storageInfo;
+  }
+
+  public void setExportedKeys(ExportedBlockKeys keys) {
+    this.exportedKeys = keys;
+  }
+
+  public ExportedBlockKeys getExportedKeys() {
+    return exportedKeys;
+  }
+
   @Override // NodeRegistration
   public int getVersion() {
     return storageInfo.getLayoutVersion();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Sun Apr  1 03:41:41 2012
@@ -1041,9 +1041,9 @@ public class MiniDFSCluster {
       //      hadoop.security.token.service.use_ip=true
       //since the HDFS does things based on IP:port, we need to add the mapping
       //for IP:port to rackId
-      String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
+      String ipAddr = dn.getXferAddress().getAddress().getHostAddress();
       if (racks != null) {
-        int port = dn.getSelfAddr().getPort();
+        int port = dn.getXferAddress().getPort();
         LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
                             " to rack " + racks[i-curDatanodesNum]);
         StaticMapping.addNodeToRack(ipAddr + ":" + port,
@@ -1422,7 +1422,7 @@ public class MiniDFSCluster {
     DataNodeProperties dnprop = dataNodes.remove(i);
     DataNode dn = dnprop.datanode;
     LOG.info("MiniDFSCluster Stopping DataNode " +
-                       dn.getMachineName() +
+                       dn.getDisplayName() +
                        " from a total of " + (dataNodes.size() + 1) + 
                        " datanodes.");
     dn.shutdown();
@@ -1433,16 +1433,13 @@ public class MiniDFSCluster {
   /*
    * Shutdown a datanode by name.
    */
-  public synchronized DataNodeProperties stopDataNode(String name) {
+  public synchronized DataNodeProperties stopDataNode(String dnName) {
     int i;
     for (i = 0; i < dataNodes.size(); i++) {
       DataNode dn = dataNodes.get(i).datanode;
-      // get BP registration
-      DatanodeRegistration dnR = 
-        DataNodeTestUtils.getDNRegistrationByMachineName(dn, name);
-      LOG.info("for name=" + name + " found bp=" + dnR + 
-          "; with dnMn=" + dn.getMachineName());
-      if(dnR != null) {
+      LOG.info("DN name=" + dnName + " found DN=" + dn +
+          " with name=" + dn.getDisplayName());
+      if (dnName.equals(dn.getDatanodeId().getName())) {
         break;
       }
     }
@@ -1472,9 +1469,9 @@ public class MiniDFSCluster {
     String[] args = dnprop.dnArgs;
     Configuration newconf = new HdfsConfiguration(conf); // save cloned config
     if (keepPort) {
-      InetSocketAddress addr = dnprop.datanode.getSelfAddr();
-      conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":"
-          + addr.getPort());
+      InetSocketAddress addr = dnprop.datanode.getXferAddress();
+      conf.set(DFS_DATANODE_ADDRESS_KEY, 
+          addr.getAddress().getHostAddress() + ":" + addr.getPort());
     }
     dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf),
         newconf, args));

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java Sun Apr  1 03:41:41 2012
@@ -158,7 +158,7 @@ public class TestConnCache {
             testFile.toString(), 0, FILE_SIZE)
         .getLocatedBlocks().get(0);
     DataNode dn = util.getDataNode(block);
-    InetSocketAddress dnAddr = dn.getSelfAddr();
+    InetSocketAddress dnAddr = dn.getXferAddress();
 
     // Make some sockets to the DN
     Socket[] dnSockets = new Socket[CACHE_SIZE];

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java Sun Apr  1 03:41:41 2012
@@ -50,7 +50,7 @@ public class TestDFSAddressConfig extend
     ArrayList<DataNode> dns = cluster.getDataNodes();
     DataNode dn = dns.get(0);
 
-    String selfSocketAddr = dn.getSelfAddr().toString();
+    String selfSocketAddr = dn.getXferAddress().toString();
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
 
@@ -75,7 +75,7 @@ public class TestDFSAddressConfig extend
     dns = cluster.getDataNodes();
     dn = dns.get(0);
 
-    selfSocketAddr = dn.getSelfAddr().toString();
+    selfSocketAddr = dn.getXferAddress().toString();
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     // assert that default self socket address is 127.0.0.1
     assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
@@ -100,7 +100,7 @@ public class TestDFSAddressConfig extend
     dns = cluster.getDataNodes();
     dn = dns.get(0);
 
-    selfSocketAddr = dn.getSelfAddr().toString();
+    selfSocketAddr = dn.getXferAddress().toString();
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     // assert that default self socket address is 0.0.0.0
     assertTrue(selfSocketAddr.contains("/0.0.0.0:"));

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Sun Apr  1 03:41:41 2012
@@ -269,7 +269,7 @@ public class TestDatanodeBlockScanner ex
       if (corruptReplica(block, i)) {
         corruptReplicasDNIDs[j++] = i;
         LOG.info("successfully corrupted block " + block + " on node " 
-                 + i + " " + cluster.getDataNodes().get(i).getSelfAddr());
+                 + i + " " + cluster.getDataNodes().get(i).getDisplayName());
       }
     }
     
@@ -281,7 +281,7 @@ public class TestDatanodeBlockScanner ex
     for (int i = numCorruptReplicas - 1; i >= 0 ; i--) {
       LOG.info("restarting node with corrupt replica: position " 
           + i + " node " + corruptReplicasDNIDs[i] + " " 
-          + cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getSelfAddr());
+          + cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getDisplayName());
       cluster.restartDataNode(corruptReplicasDNIDs[i]);
     }
 
@@ -343,7 +343,7 @@ public class TestDatanodeBlockScanner ex
       if (!changeReplicaLength(block, 0, -1)) {
         throw new IOException(
             "failed to find or change length of replica on node 0 "
-            + cluster.getDataNodes().get(0).getSelfAddr());
+            + cluster.getDataNodes().get(0).getDisplayName());
       }      
     } finally {
       cluster.shutdown();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java Sun Apr  1 03:41:41 2012
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocolPB
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.net.NetUtils;
@@ -58,8 +59,9 @@ public class TestIsMethodSupported {
     cluster = (new MiniDFSCluster.Builder(conf))
         .numDataNodes(1).build();
     nnAddress = cluster.getNameNode().getNameNodeAddress();
-    dnAddress = new InetSocketAddress(cluster.getDataNodes().get(0)
-        .getDatanodeId().getHost(), cluster.getDataNodes().get(0).getIpcPort());
+    DataNode dn = cluster.getDataNodes().get(0);
+    dnAddress = new InetSocketAddress(dn.getDatanodeId().getHost(),
+                                      dn.getIpcPort());
   }
 
   @AfterClass

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Sun Apr  1 03:41:41 2012
@@ -432,8 +432,8 @@ public class TestPBHelper {
         new StorageInfo(), expKeys);
     DatanodeRegistrationProto proto = PBHelper.convert(reg);
     DatanodeRegistration reg2 = PBHelper.convert(proto);
-    compare(reg.storageInfo, reg2.storageInfo);
-    compare(reg.exportedKeys, reg2.exportedKeys);
+    compare(reg.getStorageInfo(), reg2.getStorageInfo());
+    compare(reg.getExportedKeys(), reg2.getExportedKeys());
     compare((DatanodeID)reg, (DatanodeID)reg2);
   }
   

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java Sun Apr  1 03:41:41 2012
@@ -36,12 +36,7 @@ import com.google.common.base.Preconditi
  * Utility class for accessing package-private DataNode information during tests.
  *
  */
-public class DataNodeTestUtils {
-  public static DatanodeRegistration 
-  getDNRegistrationByMachineName(DataNode dn, String mName) {
-    return dn.getDNRegistrationByMachineName(mName);
-  }
-  
+public class DataNodeTestUtils {  
   public static DatanodeRegistration 
   getDNRegistrationForBP(DataNode dn, String bpid) throws IOException {
     return dn.getDNRegistrationForBP(bpid);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Sun Apr  1 03:41:41 2012
@@ -383,7 +383,7 @@ public class SimulatedFSDataset implemen
   public SimulatedFSDataset(DataNode datanode, DataStorage storage,
       Configuration conf) {
     if (storage != null) {
-      storage.createStorageID(datanode.getPort());
+      storage.createStorageID(datanode.getXferPort());
       this.storageId = storage.getStorageID();
     } else {
       this.storageId = "unknownStorageId" + new Random().nextInt();

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Sun Apr  1 03:41:41 2012
@@ -679,8 +679,9 @@ public class TestBlockReport {
     assertEquals(datanodes.size(), 2);
 
     if(LOG.isDebugEnabled()) {
+      int lastDn = datanodes.size() - 1;
       LOG.debug("New datanode "
-          + cluster.getDataNodes().get(datanodes.size() - 1).getMachineName() 
+          + cluster.getDataNodes().get(lastDn).getDisplayName() 
           + " has been started");
     }
     if (waitReplicas) DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java Sun Apr  1 03:41:41 2012
@@ -183,7 +183,7 @@ public class TestDeleteBlockPool {
       Assert.assertEquals(1, dn1.getAllBpOs().length);
       
       DFSAdmin admin = new DFSAdmin(nn1Conf);
-      String dn1Address = dn1.getSelfAddr().getHostName()+":"+dn1.getIpcPort();
+      String dn1Address = dn1.getDatanodeId().getHost() + ":" + dn1.getIpcPort();
       String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
       
       int ret = admin.run(args);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1308014&r1=1308013&r2=1308014&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Sun Apr  1 03:41:41 2012
@@ -136,7 +136,7 @@ public class TestDiskError {
     DataNode datanode = cluster.getDataNodes().get(sndNode);
     
     // replicate the block to the second datanode
-    InetSocketAddress target = datanode.getSelfAddr();
+    InetSocketAddress target = datanode.getXferAddress();
     Socket s = new Socket(target.getAddress(), target.getPort());
     // write the header.
     DataOutputStream out = new DataOutputStream(s.getOutputStream());