You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2007/12/05 20:38:53 UTC

svn commit: r601484 - in /lucene/hadoop/trunk: ./ conf/ src/java/org/apache/hadoop/dfs/ src/java/org/apache/hadoop/fs/ src/java/org/apache/hadoop/ipc/ src/java/org/apache/hadoop/mapred/ src/java/org/apache/hadoop/net/ src/test/org/apache/hadoop/dfs/ sr...

Author: dhruba
Date: Wed Dec  5 11:38:51 2007
New Revision: 601484

URL: http://svn.apache.org/viewvc?rev=601484&view=rev
Log:
HADOOP-2185.  RPC Server uses any available port if the specified port is
zero. Otherwise it uses the specified port.
(Konstantin Shvachko via dhruba)


Added:
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHDFSServerPorts.java   (with props)
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMRServerPorts.java   (with props)
Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/conf/hadoop-default.xml
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/BlockCrcUpgrade.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/HftpFileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileSystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Client.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Server.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/StatusHttpServer.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NetUtils.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestBlockReplacement.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java
    lucene/hadoop/trunk/src/webapps/datanode/tail.jsp

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Wed Dec  5 11:38:51 2007
@@ -25,6 +25,10 @@
 
     HADOOP-2184.  RPC Support for user permissions and authentication.
     (Raghu Angadi via dhruba)
+
+    HADOOP-2185.  RPC Server uses any available port if the specified port is
+    zero. Otherwise it uses the specified port.
+    (Konstantin Shvachko via dhruba)
     
   NEW FEATURES
 

Modified: lucene/hadoop/trunk/conf/hadoop-default.xml
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/conf/hadoop-default.xml?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/conf/hadoop-default.xml (original)
+++ lucene/hadoop/trunk/conf/hadoop-default.xml Wed Dec  5 11:38:51 2007
@@ -191,48 +191,38 @@
 </property>
 
 <property>
-  <name>dfs.secondary.info.port</name>
-  <value>50090</value>
-  <description>The base number for the Secondary namenode info port.
-  </description>
-</property>
-
-<property>
-  <name>dfs.secondary.info.bindAddress</name>
-  <value>0.0.0.0</value>
+  <name>dfs.secondary.http.bindAddress</name>
+  <value>0.0.0.0:50090</value>
   <description>
-    The address where the secondary namenode web UI will listen to.
+    The secondary namenode http server bind address and port.
+    If the port is 0 then the server will start on a free port.
   </description>
 </property>
 
 <property>
   <name>dfs.datanode.bindAddress</name>
-  <value>0.0.0.0</value>
+  <value>0.0.0.0:50010</value>
   <description>
-    the address where the datanode will listen to.
+    The address where the datanode will listen to.
+    If the port is 0 then the server will start on a free port.
   </description>
 </property>
 
 <property>
-  <name>dfs.datanode.port</name>
-  <value>50010</value>
-  <description>The port number that the dfs datanode server uses as a starting 
-	       point to look for a free port to listen on.
-</description>
-</property>
-
-<property>
-  <name>dfs.info.bindAddress</name>
-  <value>0.0.0.0</value>
+  <name>dfs.datanode.http.bindAddress</name>
+  <value>0.0.0.0:50075</value>
   <description>
-    the address where the dfs namenode web ui will listen on.
+    The datanode http server bind address and port.
+    If the port is 0 then the server will start on a free port.
   </description>
 </property>
 
 <property>
-  <name>dfs.info.port</name>
-  <value>50070</value>
-  <description>The base port number for the dfs namenode web ui.
+  <name>dfs.http.bindAddress</name>
+  <value>0.0.0.0:50070</value>
+  <description>
+    The address and the base port where the dfs namenode web ui will listen on.
+    If the port is 0 then the server will start on a free port.
   </description>
 </property>
 
@@ -458,17 +448,11 @@
 </property>
 
 <property>
-  <name>mapred.job.tracker.info.bindAddress</name>
-  <value>0.0.0.0</value>
+  <name>mapred.job.tracker.http.bindAddress</name>
+  <value>0.0.0.0:50030</value>
   <description>
-    the address where the job tracker info webserver will be binded on.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.info.port</name>
-  <value>50030</value>
-  <description>The port that the MapReduce job tracker info webserver runs at.
+    The job tracker http server bind address and port.
+    If the port is 0 then the server will start on a free port.
   </description>
 </property>
 
@@ -483,7 +467,7 @@
 
 <property>
   <name>mapred.task.tracker.report.bindAddress</name>
-  <value>127.0.0.1</value>
+  <value>127.0.0.1:0</value>
   <description>The interface that task processes use to communicate
   with their parent tasktracker process.</description>
 </property>
@@ -709,17 +693,11 @@
 </property>
 
 <property>
-  <name>tasktracker.http.bindAddress</name>
-  <value>0.0.0.0</value>
+  <name>mapred.task.tracker.http.bindAddress</name>
+  <value>0.0.0.0:50060</value>
   <description>
-    the address where the task tracker http server will be binded on.
-  </description>
-</property>
-
-<property>
-  <name>tasktracker.http.port</name>
-  <value>50060</value>
-  <description>The default port for task trackers to use as their http server.
+    The task tracker http server bind address and port.
+    If the port is 0 then the server will start on a free port.
   </description>
 </property>
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/BlockCrcUpgrade.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/BlockCrcUpgrade.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/BlockCrcUpgrade.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/BlockCrcUpgrade.java Wed Dec  5 11:38:51 2007
@@ -26,6 +26,7 @@
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.io.retry.*;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Daemon;
 
@@ -179,7 +180,7 @@
       dnStr += dn.getName() + " ";
       Socket dnSock = null;
       try {
-        InetSocketAddress dnAddr = DataNode.createSocketAddr(dn.getName());
+        InetSocketAddress dnAddr = NetUtils.createSocketAddr(dn.getName());
         dnSock = new Socket();
         dnSock.connect(dnAddr, FSConstants.READ_TIMEOUT);
         dnSock.setSoTimeout(FSConstants.READ_TIMEOUT);
@@ -843,7 +844,7 @@
     
     try {
       do {
-        InetSocketAddress dnAddr = DataNode.createSocketAddr(dnInfo.getName());
+        InetSocketAddress dnAddr = NetUtils.createSocketAddr(dnInfo.getName());
         dnSock = new Socket();
         dnSock.connect(dnAddr, FSConstants.READ_TIMEOUT);
         dnSock.setSoTimeout(FSConstants.READ_TIMEOUT);

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java Wed Dec  5 11:38:51 2007
@@ -1119,7 +1119,8 @@
         DatanodeInfo[] nodes = block.getLocations();
         try {
           DatanodeInfo chosenNode = bestNode(nodes, deadNodes);
-          InetSocketAddress targetAddr = DataNode.createSocketAddr(chosenNode.getName());
+          InetSocketAddress targetAddr = 
+                            NetUtils.createSocketAddr(chosenNode.getName());
           return new DNAddrPair(chosenNode, targetAddr);
         } catch (IOException ie) {
           String blockInfo = block.getBlock() + " file=" + src;
@@ -1485,7 +1486,7 @@
         //
         // Connect to first DataNode in the list.  Abort if this fails.
         //
-        InetSocketAddress target = DataNode.createSocketAddr(nodes[0].getName());
+        InetSocketAddress target = NetUtils.createSocketAddr(nodes[0].getName());
         try {
           s = socketFactory.createSocket();
           s.connect(target, READ_TIMEOUT);

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java Wed Dec  5 11:38:51 2007
@@ -20,7 +20,6 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
-import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URLConnection;
 import java.net.URLEncoder;
@@ -67,10 +66,7 @@
   }
   
   private String getInfoServer() throws IOException {
-    InetSocketAddress addr = 
-      DataNode.createSocketAddr(getConf().get("fs.default.name"));
-    int infoPort = getConf().getInt("dfs.info.port", 50070);
-    return addr.getHostName() + ":" + infoPort;
+    return getConf().get("dfs.http.bindAddress", "0.0.0.0:50070");
   }
   
   /**

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Wed Dec  5 11:38:51 2007
@@ -19,13 +19,13 @@
 
 import org.apache.commons.logging.*;
 
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.metrics.MetricsUtil;
 import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -83,30 +83,12 @@
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.DataNode");
 
   /**
-   * Util method to build socket addr from either:
-   *   <host>:<post>
-   *   <fs>://<host>:<port>/<path>
+   * Use {@link NetUtils#createSocketAddr(String)} instead.
    */
+  @Deprecated
   public static InetSocketAddress createSocketAddr(String target
                                                    ) throws IOException {
-    int colonIndex = target.indexOf(':');
-    if (colonIndex < 0) {
-      throw new RuntimeException("Not a host:port pair: " + target);
-    }
-    String hostname;
-    int port;
-    if (!target.contains("/")) {
-      // must be the old style <host>:<port>
-      hostname = target.substring(0, colonIndex);
-      port = Integer.parseInt(target.substring(colonIndex + 1));
-    } else {
-      // a new uri
-      URI addr = new Path(target).toUri();
-      hostname = addr.getHost();
-      port = addr.getPort();
-    }
-
-    return new InetSocketAddress(hostname, port);
+    return NetUtils.createSocketAddr(target);
   }
 
   DatanodeProtocol namenode = null;
@@ -242,13 +224,15 @@
     machineName = DNS.getDefaultHost(
                                      conf.get("dfs.datanode.dns.interface","default"),
                                      conf.get("dfs.datanode.dns.nameserver","default"));
-    InetSocketAddress nameNodeAddr = createSocketAddr(
-                                                      conf.get("fs.default.name", "local"));
+    InetSocketAddress nameNodeAddr = NetUtils.createSocketAddr(
+                                     conf.get("fs.default.name", "local"));
     
     this.defaultBytesPerChecksum = 
        Math.max(conf.getInt("io.bytes.per.checksum", 512), 1); 
     
-    int tmpPort = conf.getInt("dfs.datanode.port", 50010);
+    String address = conf.get("dfs.datanode.bindAddress", "0.0.0.0:50010");
+    InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
+    int tmpPort = socAddr.getPort();
     storage = new DataStorage();
     // construct registration
     this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort);
@@ -289,19 +273,11 @@
     }
       
     // find free port
-    ServerSocket ss = null;
-    String bindAddress = conf.get("dfs.datanode.bindAddress", "0.0.0.0");
-    while (ss == null) {
-      try {
-        ss = new ServerSocket(tmpPort, 0, InetAddress.getByName(bindAddress));
-        LOG.info("Opened server at " + tmpPort);
-      } catch (IOException ie) {
-        LOG.info("Could not open server at " + tmpPort + ", trying new port");
-        tmpPort++;
-      }
-    }
+    ServerSocket ss = new ServerSocket(tmpPort, 0, socAddr.getAddress());
     // adjust machine name with the actual port
+    tmpPort = ss.getLocalPort();
     this.dnRegistration.setName(machineName + ":" + tmpPort);
+    LOG.info("Opened server at " + tmpPort);
       
     this.dataXceiveServer = new Daemon(new DataXceiveServer(ss));
 
@@ -313,9 +289,11 @@
     DataNode.nameNodeAddr = nameNodeAddr;
 
     //create a servlet to serve full-file content
-    int infoServerPort = conf.getInt("dfs.datanode.info.port", 50075);
-    String infoServerBindAddress = conf.get("dfs.datanode.info.bindAddress", "0.0.0.0");
-    this.infoServer = new StatusHttpServer("datanode", infoServerBindAddress, infoServerPort, true);
+    String infoAddr = conf.get("dfs.datanode.http.bindAddress", "0.0.0.0:50075");
+    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
+    String infoHost = infoSocAddr.getHostName();
+    int tmpInfoPort = infoSocAddr.getPort();
+    this.infoServer = new StatusHttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0);
     this.infoServer.addServlet(null, "/streamFile/*", StreamFile.class);
     this.infoServer.start();
     // adjust info port
@@ -380,7 +358,7 @@
     return "<namenode>";
   }
 
-  private void setNewStorageID(DatanodeRegistration dnReg) {
+  static void setNewStorageID(DatanodeRegistration dnReg) {
     /* Return 
      * "DS-randInt-ipaddr-currentTimeMillis"
      * It is considered extermely rare for all these numbers to match
@@ -965,7 +943,7 @@
           InetSocketAddress mirrorTarget = null;
           // Connect to backup machine
           mirrorNode = targets[0].getName();
-          mirrorTarget = createSocketAddr(mirrorNode);
+          mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
           mirrorSock = new Socket();
           try {
             mirrorSock.connect(mirrorTarget, READ_TIMEOUT);
@@ -1098,7 +1076,7 @@
         blockSender = new BlockSender(block, 0, -1, false, false);
 
         // get the output stream to the target
-        InetSocketAddress targetAddr = createSocketAddr(target.getName());
+        InetSocketAddress targetAddr = NetUtils.createSocketAddr(target.getName());
         targetSock = new Socket();
         targetSock.connect(targetAddr, READ_TIMEOUT);
         targetSock.setSoTimeout(READ_TIMEOUT);
@@ -1661,7 +1639,7 @@
       
       try {
         InetSocketAddress curTarget = 
-          createSocketAddr(targets[0].getName());
+          NetUtils.createSocketAddr(targets[0].getName());
         sock = new Socket();  
         sock.connect(curTarget, READ_TIMEOUT);
         sock.setSoTimeout(targets.length*READ_TIMEOUT);

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Wed Dec  5 11:38:51 2007
@@ -23,6 +23,7 @@
 import org.apache.hadoop.dfs.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.mapred.StatusHttpServer;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ipc.Server;
@@ -32,6 +33,7 @@
 import java.io.FileWriter;
 import java.io.IOException;
 import java.io.PrintWriter;
+import java.net.InetSocketAddress;
 import java.util.*;
 import java.util.Map.Entry;
 import java.text.SimpleDateFormat;
@@ -119,7 +121,6 @@
   //
   StatusHttpServer infoServer;
   int infoPort;
-  String infoBindAddress;
   Date startTime;
     
   //
@@ -205,17 +206,26 @@
 
 
   /**
-   * dirs is a list of directories where the filesystem directory state 
-   * is stored
+   * FSNamesystem constructor.
    */
-  public FSNamesystem(String hostname,
-                      int port,
-                      NameNode nn, Configuration conf) throws IOException {
+  FSNamesystem(NameNode nn, Configuration conf) throws IOException {
     fsNamesystemObject = this;
+    try {
+      initialize(nn, conf);
+    } catch(IOException e) {
+      close();
+      throw e;
+    }
+  }
+
+  /**
+   * Initialize FSNamesystem.
+   */
+  private void initialize(NameNode nn, Configuration conf) throws IOException {
     setConfigurationParameters(conf);
 
-    this.localMachine = hostname;
-    this.port = port;
+    this.localMachine = nn.getNameNodeAddress().getHostName();
+    this.port = nn.getNameNodeAddress().getPort();
     this.dir = new FSDirectory(this, conf);
     StartupOption startOpt = NameNode.getStartupOption(conf);
     this.dir.loadFSImage(getNamespaceDirs(conf), startOpt);
@@ -223,7 +233,7 @@
     setBlockTotal();
     pendingReplications = new PendingReplicationBlocks(
                             conf.getInt("dfs.replication.pending.timeout.sec", 
-                                        -1) * 1000);
+                                        -1) * 1000L);
     this.hbthread = new Daemon(new HeartbeatMonitor());
     this.lmthread = new Daemon(new LeaseMonitor());
     this.replthread = new Daemon(new ReplicationMonitor());
@@ -232,15 +242,18 @@
     replthread.start();
     this.systemStart = now();
     this.startTime = new Date(systemStart); 
-        
+
     this.hostsReader = new HostsFileReader(conf.get("dfs.hosts",""),
                                            conf.get("dfs.hosts.exclude",""));
     this.dnthread = new Daemon(new DecommissionedMonitor());
     dnthread.start();
 
-    this.infoPort = conf.getInt("dfs.info.port", 50070);
-    this.infoBindAddress = conf.get("dfs.info.bindAddress", "0.0.0.0");
-    this.infoServer = new StatusHttpServer("dfs", infoBindAddress, infoPort, false);
+    String infoAddr = conf.get("dfs.http.bindAddress", "0.0.0.0:50070");
+    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
+    String infoHost = infoSocAddr.getHostName();
+    int tmpInfoPort = infoSocAddr.getPort();
+    this.infoServer = new StatusHttpServer("dfs", infoHost, tmpInfoPort, 
+                                            tmpInfoPort == 0);
     this.infoServer.setAttribute("name.system", this);
     this.infoServer.setAttribute("name.node", nn);
     this.infoServer.setAttribute("name.conf", conf);
@@ -249,11 +262,11 @@
     this.infoServer.addServlet("listPaths", "/listPaths/*", ListPathsServlet.class);
     this.infoServer.addServlet("data", "/data/*", FileDataServlet.class);
     this.infoServer.start();
-        
+
     // The web-server port can be ephemeral... ensure we have the correct info
     this.infoPort = this.infoServer.getPort();
-    conf.setInt("dfs.info.port", this.infoPort); 
-    LOG.info("Web-server up at: " + conf.get("dfs.info.port"));
+    conf.set("dfs.http.bindAddress", infoHost + ":" + infoPort); 
+    LOG.info("Web-server up at: " + conf.get("dfs.http.bindAddress"));
   }
 
   static Collection<File> getNamespaceDirs(Configuration conf) {
@@ -563,7 +576,7 @@
    * total size is <code>size</code>
    * 
    * @param datanode on which blocks are located
-   * @parm size total size of blocks
+   * @param size total size of blocks
    */
   synchronized BlocksWithLocations getBlocks(DatanodeID datanode, long size)
       throws IOException {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/HftpFileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/HftpFileSystem.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/HftpFileSystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/HftpFileSystem.java Wed Dec  5 11:38:51 2007
@@ -39,6 +39,7 @@
 import org.xml.sax.helpers.XMLReaderFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSInputStream;
@@ -67,9 +68,11 @@
   public void initialize(URI name, Configuration conf) throws IOException {
     setConf(conf);
     this.fshostname = name.getHost();
-    this.fsport = name.getPort() != -1
-      ? name.getPort()
-      : conf.getInt("dfs.info.port", -1);
+    this.fsport = name.getPort();
+    if(fsport >= 0)
+      return;
+    String infoAddr = conf.get("dfs.http.bindAddress", "0.0.0.0:50070");
+    this.fsport = NetUtils.createSocketAddr(infoAddr).getPort();
   }
 
   @Override

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java Wed Dec  5 11:38:51 2007
@@ -34,6 +34,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.dfs.FSConstants.UpgradeAction;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.net.NetUtils;
 
 public class JspHelper {
   static FSNamesystem fsn = null;
@@ -73,7 +74,8 @@
       chosenNode = nodes[index];
 
       //just ping to check whether the node is alive
-      InetSocketAddress targetAddr = DataNode.createSocketAddr(chosenNode.getHost() + ":" + chosenNode.getInfoPort());
+      InetSocketAddress targetAddr = NetUtils.createSocketAddr(
+          chosenNode.getHost() + ":" + chosenNode.getInfoPort());
         
       try {
         s = new Socket();

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Wed Dec  5 11:38:51 2007
@@ -24,6 +24,7 @@
 import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 
 import java.io.*;
@@ -108,16 +109,14 @@
   /**
    * Initialize the server
    * 
-   * @param hostname which hostname to bind to
-   * @param port the port number to bind to
+   * @param address hostname:port to bind to
    * @param conf the configuration
    */
-  private void init(String hostname, int port, 
-                    Configuration conf
-                    ) throws IOException {
+  private void initialize(String address, Configuration conf) throws IOException {
+    InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
     this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
-    this.server = RPC.getServer(this, hostname, port, handlerCount, 
-                                false, conf);
+    this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
+                                handlerCount, false, conf);
 
     // The rpc-server port can be ephemeral... ensure we have the correct info
     this.nameNodeAddress = this.server.getListenerAddress(); 
@@ -125,19 +124,13 @@
     LOG.info("Namenode up at: " + this.nameNodeAddress);
 
     myMetrics = new NameNodeMetrics(conf);
-        
-    try {
-      this.namesystem = new FSNamesystem(this.nameNodeAddress.getHostName(), this.nameNodeAddress.getPort(), this, conf);
-      this.server.start();  //start RPC server   
-  
-      this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
-      this.emptier.setDaemon(true);
-      this.emptier.start();
-    } catch (IOException e) {
-      this.server.stop();
-      throw e;
-    }
-    
+
+    this.namesystem = new FSNamesystem(this, conf);
+    this.server.start();  //start RPC server   
+
+    this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
+    this.emptier.setDaemon(true);
+    this.emptier.start();
   }
     
   /**
@@ -163,9 +156,7 @@
    * @throws IOException
    */
   public NameNode(Configuration conf) throws IOException {
-    InetSocketAddress addr = 
-      DataNode.createSocketAddr(conf.get("fs.default.name"));
-    init(addr.getHostName(), addr.getPort(), conf);
+    this(conf.get("fs.default.name"), conf);
   }
 
   /**
@@ -175,10 +166,15 @@
    * the NameNode is up and running if the user passes the port as
    * <code>zero</code>.  
    */
-  public NameNode(String bindAddress, int port, 
+  public NameNode(String bindAddress,
                   Configuration conf
                   ) throws IOException {
-    init(bindAddress, port, conf);
+    try {
+      initialize(bindAddress, conf);
+    } catch (IOException e) {
+      this.stop();
+      throw e;
+    }
   }
 
   /**
@@ -196,12 +192,12 @@
    * Stop all NameNode threads and wait for all to finish.
    */
   public void stop() {
-    if (!stopRequested) {
-      stopRequested = true;
-      namesystem.close();
-      emptier.interrupt();
-      server.stop();
-    }
+    if (stopRequested)
+      return;
+    stopRequested = true;
+    if(namesystem != null) namesystem.close();
+    if(emptier != null) emptier.interrupt();
+    if(server != null) server.stop();
   }
   
   /////////////////////////////////////////////////////

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java Wed Dec  5 11:38:51 2007
@@ -31,6 +31,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
 
@@ -264,8 +265,8 @@
   
   private void lostFoundMove(DFSFileInfo file, LocatedBlocks blocks)
     throws IOException {
-    DFSClient dfs = new DFSClient(DataNode.createSocketAddr(
-                                                            conf.get("fs.default.name", "local")), conf);
+    DFSClient dfs = new DFSClient(NetUtils.createSocketAddr(
+                              conf.get("fs.default.name", "local")), conf);
     if (!lfInited) {
       lostFoundInit(dfs);
     }
@@ -296,12 +297,12 @@
         if (fos == null) {
           fos = dfs.create(target + "/" + chain, true);
           if (fos != null) chain++;
-        }
-        if (fos == null) {
-          LOG.warn(errmsg + ": could not store chain " + chain);
-          // perhaps we should bail out here...
-          // return;
-          continue;
+          else {
+            LOG.warn(errmsg + ": could not store chain " + chain);
+            // perhaps we should bail out here...
+            // return;
+            continue;
+          }
         }
         
         // copy the block. It's a pity it's not abstracted from DFSInputStream ...
@@ -344,7 +345,7 @@
       
       try {
         chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
-        targetAddr = DataNode.createSocketAddr(chosenNode.getName());
+        targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
       }  catch (IOException ie) {
         if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) {
           throw new IOException("Could not obtain block " + lblock);
@@ -421,22 +422,10 @@
         (nodes.length - deadNodes.size() < 1)) {
       throw new IOException("No live nodes contain current block");
     }
-    DatanodeInfo chosenNode = null;
-    for (int i = 0; i < nodes.length; i++) {
-      if (deadNodes.contains(nodes[i])) {
-        continue;
-      }
-      String nodename = nodes[i].getName();
-      int colon = nodename.indexOf(':');
-      if (colon >= 0) {
-        nodename = nodename.substring(0, colon);
-      }
-    }
-    if (chosenNode == null) {
-      do  {
-        chosenNode = nodes[r.nextInt(nodes.length)];
-      } while (deadNodes.contains(chosenNode));
-    }
+    DatanodeInfo chosenNode;
+    do {
+      chosenNode = nodes[r.nextInt(nodes.length)];
+    } while (deadNodes.contains(chosenNode));
     return chosenNode;
   }
   

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java Wed Dec  5 11:38:51 2007
@@ -91,8 +91,8 @@
     // Create connection to the namenode.
     //
     shouldRun = true;
-    nameNodeAddr = DataNode.createSocketAddr(
-                                             conf.get("fs.default.name", "local"));
+    nameNodeAddr = NetUtils.createSocketAddr(
+                              conf.get("fs.default.name", "local"));
     this.conf = conf;
     this.namenode =
         (ClientProtocol) RPC.waitForProxy(ClientProtocol.class,
@@ -101,9 +101,13 @@
     //
     // initialize the webserver for uploading files.
     //
-    infoPort = conf.getInt("dfs.secondary.info.port", 50090);
-    infoBindAddress = conf.get("dfs.secondary.info.bindAddress", "0.0.0.0");
-    infoServer = new StatusHttpServer("dfs", infoBindAddress, infoPort, true);
+    String infoAddr = conf.get("dfs.secondary.http.bindAddress", 
+                                "0.0.0.0:50090");
+    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
+    infoBindAddress = infoSocAddr.getHostName();
+    int tmpInfoPort = infoSocAddr.getPort();
+    infoServer = new StatusHttpServer("dfs", infoBindAddress, tmpInfoPort, 
+                                      tmpInfoPort == 0);
     infoServer.setAttribute("name.secondary", this);
     this.infoServer.setAttribute("name.conf", conf);
     infoServer.addServlet("getimage", "/getimage", GetImageServlet.class);
@@ -111,8 +115,9 @@
 
     // The web-server port can be ephemeral... ensure we have the correct info
     infoPort = infoServer.getPort();
-    conf.setInt("dfs.secondary.info.port", infoPort);
-    LOG.info("Secondary Web-server up at: " + conf.get("dfs.secondary.info.port"));
+    conf.set("dfs.secondary.http.bindAddress", infoBindAddress + ":" +infoPort); 
+    LOG.info("Secondary Web-server up at: " 
+              + conf.get("dfs.secondary.http.bindAddress"));
 
     //
     // Initialize other scheduling parameters from the configuration
@@ -248,9 +253,7 @@
     if (fsName.equals("local")) {
       throw new IOException("This is not a DFS");
     }
-    String[] splits = fsName.split(":", 2);
-    int infoPort = conf.getInt("dfs.info.port", 50070);
-    return splits[0]+":"+infoPort;
+    return conf.get("dfs.http.bindAddress", "0.0.0.0:50070");
   }
 
   /*

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileSystem.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileSystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileSystem.java Wed Dec  5 11:38:51 2007
@@ -26,6 +26,7 @@
 
 import org.apache.hadoop.dfs.*;
 import org.apache.hadoop.conf.*;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.fs.permission.FsPermission;
 
@@ -69,7 +70,7 @@
     String cmd = argv[i];
     if ("-dfs".equals(cmd)) {
       i++;
-      InetSocketAddress addr = DataNode.createSocketAddr(argv[i++]);
+      InetSocketAddress addr = NetUtils.createSocketAddr(argv[i++]);
       fs = new DistributedFileSystem(addr, conf);
     } else if ("-local".equals(cmd)) {
       i++;

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Client.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Client.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Client.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Client.java Wed Dec  5 11:38:51 2007
@@ -71,6 +71,7 @@
   private int maxIdleTime; //connections will be culled if it was idle for 
                            //maxIdleTime msecs
   private int maxRetries; //the max. no. of retries for socket connections
+  private Thread connectionCullerThread;
   private SocketFactory socketFactory;           // how to create sockets
 
   /** A call waiting for a value. */
@@ -450,12 +451,12 @@
     this.maxRetries = conf.getInt("ipc.client.connect.max.retries", 10);
     this.conf = conf;
     this.socketFactory = factory;
-    Thread t = new ConnectionCuller();
-    t.setDaemon(true);
-    t.setName(valueClass.getName() + " Connection Culler");
+    this.connectionCullerThread = new ConnectionCuller();
+    connectionCullerThread.setDaemon(true);
+    connectionCullerThread.setName(valueClass.getName() + " Connection Culler");
     LOG.debug(valueClass.getName() + 
               "Connection culler maxidletime= " + maxIdleTime + "ms");
-    t.start();
+    connectionCullerThread.start();
   }
 
   /**
@@ -472,6 +473,10 @@
   public void stop() {
     LOG.info("Stopping client");
     running = false;
+    connectionCullerThread.interrupt();
+    try {
+      connectionCullerThread.join();
+    } catch(InterruptedException e) {}
   }
 
   /** Sets the timeout used for network i/o. */

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Server.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Server.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Server.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/ipc/Server.java Wed Dec  5 11:38:51 2007
@@ -358,7 +358,7 @@
     }
 
     InetSocketAddress getAddress() {
-      return new InetSocketAddress(acceptChannel.socket().getInetAddress(), acceptChannel.socket().getLocalPort());
+      return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
     }
     
     void doAccept(SelectionKey key) throws IOException,  OutOfMemoryError {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java Wed Dec  5 11:38:51 2007
@@ -54,6 +54,7 @@
 import org.apache.hadoop.metrics.MetricsUtil;
 import org.apache.hadoop.metrics.Updater;
 import org.apache.hadoop.metrics.jvm.JvmMetrics;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.HostsFileReader;
 import org.apache.hadoop.util.StringUtils;
 
@@ -598,7 +599,6 @@
 
   // Used to provide an HTML view on Job, Task, and TaskTracker structures
   StatusHttpServer infoServer;
-  String infoBindAddress;
   int infoPort;
 
   Server interTrackerServer;
@@ -653,9 +653,13 @@
       }
     }
 
-    this.infoPort = conf.getInt("mapred.job.tracker.info.port", 50030);
-    this.infoBindAddress = conf.get("mapred.job.tracker.info.bindAddress","0.0.0.0");
-    infoServer = new StatusHttpServer("job", infoBindAddress, infoPort, false);
+    String infoAddr = conf.get("mapred.job.tracker.http.bindAddress",
+                                "0.0.0.0:50030");
+    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
+    String infoBindAddress = infoSocAddr.getHostName();
+    int tmpInfoPort = infoSocAddr.getPort();
+    infoServer = new StatusHttpServer("job", infoBindAddress, tmpInfoPort, 
+                                      tmpInfoPort == 0);
     infoServer.setAttribute("job.tracker", this);
     infoServer.start();
 
@@ -671,7 +675,8 @@
     this.conf.set("mapred.job.tracker", (this.localMachine + ":" + this.port));
     LOG.info("JobTracker up at: " + this.port);
     this.infoPort = this.infoServer.getPort();
-    this.conf.setInt("mapred.job.tracker.info.port", this.infoPort); 
+    this.conf.set("mapred.job.tracker.http.bindAddress", 
+        infoBindAddress + ":" + this.infoPort); 
     LOG.info("JobTracker webserver: " + this.infoServer.getPort());
     this.systemDir = jobConf.getSystemDir();
 
@@ -705,13 +710,7 @@
   public static InetSocketAddress getAddress(Configuration conf) {
     String jobTrackerStr =
       conf.get("mapred.job.tracker", "localhost:8012");
-    int colon = jobTrackerStr.indexOf(":");
-    if (colon < 0) {
-      throw new RuntimeException("Bad mapred.job.tracker: "+jobTrackerStr);
-    }
-    String jobTrackerName = jobTrackerStr.substring(0, colon);
-    int jobTrackerPort = Integer.parseInt(jobTrackerStr.substring(colon+1));
-    return new InetSocketAddress(jobTrackerName, jobTrackerPort);
+    return NetUtils.createSocketAddr(jobTrackerStr);
   }
 
 
@@ -751,13 +750,15 @@
       LOG.info("Stopping expireTrackers");
       this.expireTrackers.stopTracker();
       try {
-        this.expireTrackersThread.interrupt();
-        this.expireTrackersThread.join();
+        if(expireTrackersThread != null) {
+          this.expireTrackersThread.interrupt();
+          this.expireTrackersThread.join();
+        }
       } catch (InterruptedException ex) {
         ex.printStackTrace();
       }
     }
-    if (this.retireJobs != null) {
+    if (this.retireJobsThread != null) {
       LOG.info("Stopping retirer");
       this.retireJobsThread.interrupt();
       try {
@@ -766,7 +767,7 @@
         ex.printStackTrace();
       }
     }
-    if (this.initJobs != null) {
+    if (this.initJobsThread != null) {
       LOG.info("Stopping initer");
       this.initJobsThread.interrupt();
       try {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/StatusHttpServer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/StatusHttpServer.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/StatusHttpServer.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/StatusHttpServer.java Wed Dec  5 11:38:51 2007
@@ -44,8 +44,6 @@
  *   "/" -> the jsp server code from (src/webapps/<name>)
  */
 public class StatusHttpServer {
-  private static final boolean isWindows = 
-    System.getProperty("os.name").startsWith("Windows");
   private org.mortbay.jetty.Server webServer;
   private SocketListener listener;
   private boolean findPort;
@@ -184,21 +182,20 @@
           webServer.start();
           break;
         } catch (org.mortbay.util.MultiException ex) {
-          // look for the multi exception containing a bind exception,
-          // in that case try the next port number.
+          // if the multi exception contains ONLY a bind exception,
+          // then try the next port number.
           boolean needNewPort = false;
-          for(int i=0; i < ex.size(); ++i) {
-            Exception sub = ex.getException(i);
+          if(ex.size() == 1) {
+            Exception sub = ex.getException(0);
             if (sub instanceof java.net.BindException) {
+              if(!findPort)
+                throw sub; // java.net.BindException
               needNewPort = true;
-              break;
             }
           }
-          if (!findPort || !needNewPort) {
+          if (!needNewPort)
             throw ex;
-          } else {
-            listener.setPort(listener.getPort() + 1);
-          }
+          listener.setPort(listener.getPort() + 1);
         }
       }
     } catch (IOException ie) {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java Wed Dec  5 11:38:51 2007
@@ -24,7 +24,6 @@
 import java.io.OutputStream;
 import java.io.PrintStream;
 import java.io.RandomAccessFile;
-import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException; 
@@ -70,6 +69,7 @@
 import org.apache.hadoop.metrics.Updater;
 import org.apache.hadoop.metrics.jvm.JvmMetrics;
 import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.RunJar;
@@ -414,19 +414,22 @@
     this.myMetrics = new TaskTrackerMetrics();
     
     // bind address
-    String bindAddress =
-      this.fConf.get("mapred.task.tracker.report.bindAddress", "127.0.0.1");
+    String address = 
+      this.fConf.get("mapred.task.tracker.report.bindAddress", "127.0.0.1:0");
+    InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
+    String bindAddress = socAddr.getHostName();
+    int tmpPort = socAddr.getPort();
 
     // RPC initialization
     int max = maxCurrentMapTasks > maxCurrentReduceTasks ? 
                        maxCurrentMapTasks : maxCurrentReduceTasks;
     this.taskReportServer =
-      RPC.getServer(this, bindAddress, 0, max, false, this.fConf);
+      RPC.getServer(this, bindAddress, tmpPort, max, false, this.fConf);
     this.taskReportServer.start();
 
     // get the assigned address
     this.taskReportAddress = taskReportServer.getListenerAddress();
-    this.fConf.set("mapred.task.tracker.report.address",
+    this.fConf.set("mapred.task.tracker.report.bindAddress",
                    taskReportAddress.toString());
     LOG.info("TaskTracker up at: " + this.taskReportAddress);
 
@@ -772,9 +775,13 @@
     this.jobTrackAddr = JobTracker.getAddress(conf);
     this.mapOutputFile = new MapOutputFile();
     this.mapOutputFile.setConf(conf);
-    int httpPort = conf.getInt("tasktracker.http.port", 50060);
-    String httpBindAddress = conf.get("tasktracker.http.bindAddress", "0.0.0.0");
-    this.server = new StatusHttpServer("task", httpBindAddress, httpPort, true);
+    String infoAddr = conf.get("mapred.task.tracker.http.bindAddress", 
+                                "0.0.0.0:50060");
+    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
+    String httpBindAddress = infoSocAddr.getHostName();
+    int httpPort = infoSocAddr.getPort();
+    this.server = new StatusHttpServer(
+                        "task", httpBindAddress, httpPort, httpPort == 0);
     workerThreads = conf.getInt("tasktracker.http.threads", 40);
     this.shuffleServerMetrics = new ShuffleServerMetrics(fConf);
     server.setThreads(1, workerThreads);

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NetUtils.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NetUtils.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NetUtils.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NetUtils.java Wed Dec  5 11:38:51 2007
@@ -1,8 +1,11 @@
 package org.apache.hadoop.net;
 
+import java.net.InetSocketAddress;
+import java.net.URI;
 import javax.net.SocketFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.util.ReflectionUtils;
 
@@ -74,5 +77,31 @@
     } catch (ClassNotFoundException cnfe) {
       throw new RuntimeException("Socket Factory class not found: " + cnfe);
     }
+  }
+
+  /**
+   * Util method to build socket addr from either:
+   *   <host>:<post>
+   *   <fs>://<host>:<port>/<path>
+   */
+  public static InetSocketAddress createSocketAddr(String target) {
+    int colonIndex = target.indexOf(':');
+    if (colonIndex < 0) {
+      throw new RuntimeException("Not a host:port pair: " + target);
+    }
+    String hostname;
+    int port;
+    if (!target.contains("/")) {
+      // must be the old style <host>:<port>
+      hostname = target.substring(0, colonIndex);
+      port = Integer.parseInt(target.substring(colonIndex + 1));
+    } else {
+      // a new uri
+      URI addr = new Path(target).toUri();
+      hostname = addr.getHost();
+      port = addr.getPort();
+    }
+  
+    return new InetSocketAddress(hostname, port);
   }
 }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java Wed Dec  5 11:38:51 2007
@@ -28,7 +28,6 @@
 import org.apache.hadoop.conf.Configuration;
 
 import java.io.File;
-import java.io.FilenameFilter;
 import java.io.OutputStream;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
@@ -219,7 +218,7 @@
     int nameNodePort = 9000 + testCycleNumber++; // ToDo: settable base port
     String nameNodeSocketAddr = "localhost:" + nameNodePort;
     conf.set("dfs.name.dir", nameFSDir);
-    NameNode nameNodeDaemon = new NameNode("localhost", nameNodePort, conf);
+    NameNode nameNodeDaemon = new NameNode(nameNodeSocketAddr, conf);
     DFSClient dfsClient = null;
     try {
       //

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java Wed Dec  5 11:38:51 2007
@@ -351,7 +351,7 @@
 	
     NameNode.format(conf);
     
-    nameNodeDaemon = new NameNode("localhost", nameNodePort, conf);
+    nameNodeDaemon = new NameNode(nameNodeSocketAddr, conf);
 
     //
     //        start DataNodes

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java Wed Dec  5 11:38:51 2007
@@ -152,7 +152,7 @@
     
     // Setup the NameNode configuration
     conf.set("fs.default.name", "localhost:"+ Integer.toString(nameNodePort));
-    conf.setInt("dfs.info.port", 0);
+    conf.get("dfs.http.bindAddress", "0.0.0.0:0");
     if (manageDfsDirs) {
       conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+
                new File(base_dir, "name2").getPath());
@@ -235,7 +235,8 @@
     }
 
     // Set up the right ports for the datanodes
-    conf.setInt("dfs.datanode.info.port", 0);
+    conf.set("dfs.datanode.bindAddress", "0.0.0.0:0");
+    conf.set("dfs.datanode.http.bindAddress", "0.0.0.0:0");
     
     String[] args = (operation == null ||
                      operation == StartupOption.FORMAT ||
@@ -286,7 +287,6 @@
    * @param operation the operation with which to start the DataNodes.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
-   * @param simulatedCapacities array of capacities of the simulated data nodes
    *
    * @throws IllegalStateException if NameNode has been shutdown
    */

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestBlockReplacement.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestBlockReplacement.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestBlockReplacement.java Wed Dec  5 11:38:51 2007
@@ -34,6 +34,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
 
 import junit.framework.TestCase;
 /**
@@ -209,7 +210,7 @@
   private boolean replaceBlock( Block block, DatanodeInfo source,
       DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
     Socket sock = new Socket();
-    sock.connect(DataNode.createSocketAddr(
+    sock.connect(NetUtils.createSocketAddr(
         sourceProxy.getName()), FSConstants.READ_TIMEOUT);
     sock.setSoTimeout(FSConstants.READ_TIMEOUT);
     // sendRequest

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java Wed Dec  5 11:38:51 2007
@@ -28,6 +28,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.ToolRunner;
 
 public class TestDFSShellGenericOptions extends TestCase {
@@ -100,7 +101,7 @@
     try {
       ToolRunner.run(shell, args);
       fs = new DistributedFileSystem(
-                                     DataNode.createSocketAddr(namenode), 
+                                     NetUtils.createSocketAddr(namenode), 
                                      shell.getConf());
       assertTrue("Directory does not get created", 
                  fs.isDirectory(new Path("/data")));

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java Wed Dec  5 11:38:51 2007
@@ -28,6 +28,7 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.dfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
@@ -134,7 +135,7 @@
                  new InetSocketAddress("localhost", cluster.getNameNodePort()),
                  conf);                
     datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
-    dnAddr = DataNode.createSocketAddr(datanode.getName());
+    dnAddr = NetUtils.createSocketAddr(datanode.getName());
     FileSystem fileSys = cluster.getFileSystem();
     
     int fileLen = Math.min(conf.getInt("dfs.block.size", 4096), 4096);

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHDFSServerPorts.java?rev=601484&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHDFSServerPorts.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHDFSServerPorts.java Wed Dec  5 11:38:51 2007
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.ipc.RPC;
+
+/**
+ * This test checks correctness of port usage by hdfs components:
+ * NameNode, DataNode, and SecondaryNamenode.
+ * 
+ * The correct behavior is:<br> 
+ * - when a specific port is provided the server must either start on that port 
+ * or fail by throwing {@link java.net.BindException}.<br>
+ * - if the port = 0 (ephemeral) then the server should choose 
+ * a free port and start on it.
+ */
+public class TestHDFSServerPorts extends TestCase {
+  public static final String NAME_NODE_HOST = "localhost:";
+  public static final int    NAME_NODE_PORT = 50013;
+  public static final String NAME_NODE_ADDRESS = NAME_NODE_HOST 
+                                               + NAME_NODE_PORT;
+  public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
+  public static final int    NAME_NODE_HTTP_PORT = 50073;
+  public static final String NAME_NODE_HTTP_ADDRESS = NAME_NODE_HTTP_HOST 
+                                                    + NAME_NODE_HTTP_PORT;
+
+  Configuration config;
+  File hdfsDir;
+
+  /**
+   * Start the name-node.
+   */
+  public NameNode startNameNode() throws IOException {
+    String dataDir = System.getProperty("test.build.data");
+    hdfsDir = new File(dataDir, "dfs");
+    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+      throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
+    }
+    config = new Configuration();
+    config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
+    config.set("fs.default.name", NAME_NODE_ADDRESS);
+    config.set("dfs.http.bindAddress", NAME_NODE_HTTP_ADDRESS);
+    NameNode.format(config);
+
+    String[] args = new String[] {};
+    return NameNode.createNameNode(args, config);
+  }
+
+  public void stopNameNode(NameNode nn) {
+    nn.stop();
+    RPC.stopClient();
+  }
+
+  public Configuration getConfig() {
+    return this.config;
+  }
+
+  /**
+   * Check whether the name-node can be started.
+   */
+  private boolean canStartNameNode(Configuration conf) throws IOException {
+    NameNode nn2 = null;
+    try {
+      nn2 = NameNode.createNameNode(new String[]{}, conf);
+    } catch(IOException e) {
+      if (e instanceof java.net.BindException)
+        return false;
+      throw e;
+    }
+    stopNameNode(nn2);
+    return true;
+  }
+
+  /**
+   * Check whether the data-node can be started.
+   */
+  private boolean canStartDataNode(Configuration conf) throws IOException {
+    DataNode dn = null;
+    try {
+      dn = DataNode.createDataNode(new String[]{}, conf);
+    } catch(IOException e) {
+      if (e instanceof java.net.BindException)
+        return false;
+      throw e;
+    }
+    dn.shutdown();
+    return true;
+  }
+
+  /**
+   * Check whether the secondary name-node can be started.
+   */
+  private boolean canStartSecondaryNode(Configuration conf) throws IOException {
+    SecondaryNameNode sn = null;
+    try {
+      sn = new SecondaryNameNode(conf);
+    } catch(IOException e) {
+      if (e instanceof java.net.BindException)
+        return false;
+      throw e;
+    }
+    sn.shutdown();
+    return true;
+  }
+
+  /**
+   * Verify name-node port usage.
+   */
+  public void testNameNodePorts() throws Exception {
+    NameNode nn = startNameNode();
+
+    // start another namenode on the same port
+    Configuration conf2 = new Configuration(config);
+    conf2.set("dfs.name.dir", new File(hdfsDir, "name2").getPath());
+    NameNode.format(conf2);
+    boolean started = canStartNameNode(conf2);
+    assertFalse(started); // should fail
+
+    // start on a different main port
+    conf2.set("fs.default.name", NAME_NODE_HOST + 0);
+    started = canStartNameNode(conf2);
+    assertFalse(started); // should fail again
+
+    // different http port
+    conf2.set("dfs.http.bindAddress", NAME_NODE_HTTP_HOST + 0);
+    started = canStartNameNode(conf2);
+    assertTrue(started); // should start now
+
+    stopNameNode(nn);
+  }
+
+  /**
+   * Verify data-node port usage.
+   */
+  public void testDataNodePorts() throws Exception {
+    NameNode nn = startNameNode();
+
+    // start data-node on the same port as name-node
+    Configuration conf2 = new Configuration(config);
+    conf2.set("dfs.data.dir", new File(hdfsDir, "data").getPath());
+    conf2.set("dfs.datanode.bindAddress", NAME_NODE_ADDRESS);
+    conf2.set("dfs.datanode.http.bindAddress", NAME_NODE_HTTP_HOST + 0);
+    boolean started = canStartDataNode(conf2);
+    assertFalse(started); // should fail
+
+    // bind http server to the same port as name-node
+    conf2.set("dfs.datanode.bindAddress", NAME_NODE_HOST + 0);
+    conf2.set("dfs.datanode.http.bindAddress", NAME_NODE_HTTP_ADDRESS);
+    started = canStartDataNode(conf2);
+    assertFalse(started); // should fail
+    
+    // both ports are different from the name-node ones
+    conf2.set("dfs.datanode.bindAddress", NAME_NODE_HOST + 0);
+    conf2.set("dfs.datanode.http.bindAddress", NAME_NODE_HTTP_HOST + 0);
+    started = canStartDataNode(conf2);
+    assertTrue(started); // should start now
+    stopNameNode(nn);
+  }
+
+  /**
+   * Verify secondary name-node port usage.
+   */
+  public void testSecondaryNodePorts() throws Exception {
+    NameNode nn = startNameNode();
+
+    // bind http server to the same port as name-node
+    Configuration conf2 = new Configuration(config);
+    conf2.set("dfs.secondary.http.bindAddress", NAME_NODE_ADDRESS);
+    SecondaryNameNode.LOG.info("= Starting 1 on: " + conf2.get("dfs.secondary.http.bindAddress"));
+    boolean started = canStartSecondaryNode(conf2);
+    assertFalse(started); // should fail
+
+    // bind http server to a different port
+    conf2.set("dfs.secondary.http.bindAddress", NAME_NODE_HTTP_HOST + 0);
+    SecondaryNameNode.LOG.info("= Starting 2 on: " + conf2.get("dfs.secondary.http.bindAddress"));
+    started = canStartSecondaryNode(conf2);
+    assertTrue(started); // should start now
+    stopNameNode(nn);
+  }
+}

Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHDFSServerPorts.java
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHDFSServerPorts.java
------------------------------------------------------------------------------
    svn:keywords = Id Revision HeadURL

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java Wed Dec  5 11:38:51 2007
@@ -31,6 +31,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
 
 /**
  * This class tests the replication of a DFS file.
@@ -64,10 +65,10 @@
     throws IOException {
     Configuration conf = fileSys.getConf();
     ClientProtocol namenode = (ClientProtocol) RPC.getProxy(
-                                                            ClientProtocol.class,
-                                                            ClientProtocol.versionID,
-                                                            DataNode.createSocketAddr(conf.get("fs.default.name")), 
-                                                            conf);
+                      ClientProtocol.class,
+                      ClientProtocol.versionID,
+                      NetUtils.createSocketAddr(conf.get("fs.default.name")), 
+                      conf);
       
     LocatedBlocks locations;
     boolean isReplicationDone;

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java Wed Dec  5 11:38:51 2007
@@ -109,8 +109,9 @@
       this.numDir = numDir;
       localDirs = new String[numDir];
       conf = createJobConf();
-      conf.setInt("mapred.task.tracker.info.port", 0);
-      conf.setInt("mapred.task.tracker.report.port", taskTrackerPort);
+      conf.set("mapred.task.tracker.http.bindAddress", "0.0.0.0:0");
+      conf.set("mapred.task.tracker.report.bindAddress", 
+                "127.0.0.1:" + taskTrackerPort);
       File localDirBase = 
         new File(conf.get("mapred.local.dir")).getAbsoluteFile();
       localDirBase.mkdirs();
@@ -224,7 +225,8 @@
     JobConf result = new JobConf();
     result.set("fs.default.name", namenode);
     result.set("mapred.job.tracker", "localhost:"+jobTrackerPort);
-    result.setInt("mapred.job.tracker.info.port", jobTrackerInfoPort);
+    result.set("mapred.job.tracker.http.bindAddress", 
+                        "0.0.0.0:" + jobTrackerInfoPort);
     // for debugging have all task output sent to the test output
     JobClient.setTaskOutputFilter(result, JobClient.TaskStatusFilter.ALL);
     return result;

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMRServerPorts.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMRServerPorts.java?rev=601484&view=auto
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMRServerPorts.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMRServerPorts.java Wed Dec  5 11:38:51 2007
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import junit.framework.TestCase;
+import org.apache.hadoop.dfs.TestHDFSServerPorts;
+import org.apache.hadoop.dfs.NameNode;
+
+/**
+ * This test checks correctness of port usage by mapreduce components:
+ * JobTracker, and TaskTracker.
+ * 
+ * The correct behavior is:<br> 
+ * - when a specific port is provided the server must either start on that port 
+ * or fail by throwing {@link java.net.BindException}.<br>
+ * - if the port = 0 (ephemeral) then the server should choose 
+ * a free port and start on it.
+ */
+public class TestMRServerPorts extends TestCase {
+  TestHDFSServerPorts hdfs = new TestHDFSServerPorts();
+
+  /**
+   * Check whether the JobTracker can be started.
+   */
+  private JobTracker startJobTracker(JobConf conf) 
+  throws IOException {
+    conf.set("mapred.job.tracker", "localhost:0");
+    conf.set("mapred.job.tracker.http.bindAddress", "0.0.0.0:0");
+    JobTracker jt = null;
+    try {
+      jt = JobTracker.startTracker(conf);
+      conf.set("mapred.job.tracker", "localhost:" + jt.getTrackerPort());
+      conf.set("mapred.job.tracker.http.bindAddress", 
+                            "0.0.0.0:" + jt.getInfoPort());
+    } catch(InterruptedException e) {
+      throw new IOException(e.getLocalizedMessage());
+    }
+    return jt;
+  }
+
+  /**
+   * Check whether the JobTracker can be started.
+   */
+  private boolean canStartJobTracker(JobConf conf) 
+  throws IOException, InterruptedException {
+    JobTracker jt = null;
+    try {
+      jt = JobTracker.startTracker(conf);
+    } catch(IOException e) {
+      if (e instanceof java.net.BindException)
+        return false;
+      throw e;
+    }
+    jt.fs.close();
+    jt.stopTracker();
+    return true;
+  }
+
+  /**
+   * Check whether the TaskTracker can be started.
+   */
+  private boolean canStartTaskTracker(JobConf conf) 
+  throws IOException, InterruptedException {
+    TaskTracker tt = null;
+    try {
+      tt = new TaskTracker(conf);
+    } catch(IOException e) {
+      if (e instanceof java.net.BindException)
+        return false;
+      throw e;
+    }
+    tt.shutdown();
+    return true;
+  }
+
+  /**
+   * Verify JobTracker port usage.
+   */
+  public void testJobTrackerPorts() throws Exception {
+    NameNode nn = hdfs.startNameNode();
+
+    // start job tracker on the same port as name-node
+    JobConf conf2 = new JobConf(hdfs.getConfig());
+    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_ADDRESS);
+    conf2.set("mapred.job.tracker.http.bindAddress",
+        TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
+    boolean started = canStartJobTracker(conf2);
+    assertFalse(started); // should fail
+
+    // bind http server to the same port as name-node
+    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
+    conf2.set("mapred.job.tracker.http.bindAddress",
+        TestHDFSServerPorts.NAME_NODE_HTTP_ADDRESS);
+    started = canStartJobTracker(conf2);
+    assertFalse(started); // should fail again
+
+    // both ports are different from the name-node ones
+    conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
+    conf2.set("mapred.job.tracker.http.bindAddress",
+        TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
+    started = canStartJobTracker(conf2);
+    assertTrue(started); // should start now
+
+    hdfs.stopNameNode(nn);
+  }
+
+  /**
+   * Verify JobTracker port usage.
+   */
+  public void testTaskTrackerPorts() throws Exception {
+    NameNode nn = hdfs.startNameNode();
+
+    JobConf conf2 = new JobConf(hdfs.getConfig());
+    JobTracker jt = startJobTracker(conf2);
+
+    // start job tracker on the same port as name-node
+    conf2.set("mapred.task.tracker.report.bindAddress",
+        TestHDFSServerPorts.NAME_NODE_ADDRESS);
+    conf2.set("mapred.task.tracker.http.bindAddress",
+        TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
+    boolean started = canStartTaskTracker(conf2);
+    assertFalse(started); // should fail
+
+    // bind http server to the same port as name-node
+    conf2.set("mapred.task.tracker.report.bindAddress",
+        TestHDFSServerPorts.NAME_NODE_HOST + 0);
+    conf2.set("mapred.task.tracker.http.bindAddress",
+        TestHDFSServerPorts.NAME_NODE_HTTP_ADDRESS);
+    started = canStartTaskTracker(conf2);
+    assertFalse(started); // should fail again
+
+    // both ports are different from the name-node ones
+    conf2.set("mapred.task.tracker.report.bindAddress",
+        TestHDFSServerPorts.NAME_NODE_HOST + 0);
+    conf2.set("mapred.task.tracker.http.bindAddress",
+        TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
+    started = canStartTaskTracker(conf2);
+    assertTrue(started); // should start now
+
+    jt.fs.close();
+    jt.stopTracker();
+    hdfs.stopNameNode(nn);
+  }
+}

Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMRServerPorts.java
------------------------------------------------------------------------------
    svn:eol-style = native

Propchange: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMRServerPorts.java
------------------------------------------------------------------------------
    svn:keywords = Id Revision HeadURL

Modified: lucene/hadoop/trunk/src/webapps/datanode/tail.jsp
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/datanode/tail.jsp?rev=601484&r1=601483&r2=601484&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/webapps/datanode/tail.jsp (original)
+++ lucene/hadoop/trunk/src/webapps/datanode/tail.jsp Wed Dec  5 11:38:51 2007
@@ -9,6 +9,7 @@
   import="org.apache.hadoop.io.*"
   import="org.apache.hadoop.conf.*"
   import="org.apache.hadoop.net.DNS"
+	import="org.apache.hadoop.net.NetUtils"
   import="java.text.DateFormat"
 %>
 
@@ -85,7 +86,7 @@
       dfs.close();
       return;
     }      
-    InetSocketAddress addr = DataNode.createSocketAddr(chosenNode.getName());
+    InetSocketAddress addr = NetUtils.createSocketAddr(chosenNode.getName());
     //view the last chunkSizeToView bytes while Tailing
     if (blockSize >= chunkSizeToView)
       startOffset = blockSize - chunkSizeToView;