You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ha...@apache.org on 2010/06/19 00:48:53 UTC

svn commit: r956141 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/balancer/ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache...

Author: hairong
Date: Fri Jun 18 22:48:53 2010
New Revision: 956141

URL: http://svn.apache.org/viewvc?rev=956141&view=rev
Log:
HDFS-599. Allow NameNode to have a separate port for service requests from client request. Contributed by Dmytro Molkov.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=956141&r1=956140&r2=956141&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Jun 18 22:48:53 2010
@@ -7,6 +7,9 @@ Trunk (unreleased changes)
     HDFS-992. Re-factor block access token implementation to conform to the 
     generic Token interface in Common (Kan Zhang and Jitendra Pandey via jghoman)
 
+    HDFS-599. Allow NameNode to have a seprate port for service requests from
+    client requests. (Dmytro Molkov via hairong)
+
   IMPROVEMENTS
 
     HDFS-1110. Reuses objects for commonly used file names in namenode to

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=956141&r1=956140&r2=956141&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri Jun 18 22:48:53 2010
@@ -43,10 +43,12 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT = "localhost:50100";
   public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY = "dfs.namenode.backup.http-address";
   public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50105";
+  public static final String  DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.backup.dnrpc-address";
   public static final String  DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = "dfs.datanode.balance.bandwidthPerSec";
   public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50070";
+  public static final String  DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address";
   public static final String  DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
   public static final long    DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
   public static final String  DFS_NAMENODE_SAFEMODE_EXTENSION_KEY = "dfs.namenode.safemode.extension";
@@ -164,6 +166,8 @@ public class DFSConfigKeys extends Commo
   public static final int     DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT = 5;
   public static final String  DFS_NAMENODE_HANDLER_COUNT_KEY = "dfs.namenode.handler.count";
   public static final int     DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
+  public static final String  DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY = "dfs.namenode.service.handler.count";
+  public static final int     DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_SUPPORT_APPEND_KEY = "dfs.support.append";
   public static final boolean DFS_SUPPORT_APPEND_DEFAULT = true;
   public static final String  DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=956141&r1=956140&r2=956141&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Fri Jun 18 22:48:53 2010
@@ -909,7 +909,7 @@ public class Balancer implements Tool {
    * set up the retry policy */ 
   private static NamenodeProtocol createNamenode(Configuration conf)
     throws IOException {
-    InetSocketAddress nameNodeAddr = NameNode.getAddress(conf);
+    InetSocketAddress nameNodeAddr = NameNode.getServiceAddress(conf, true);
     RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(
         5, 200, TimeUnit.MILLISECONDS);
     Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=956141&r1=956140&r2=956141&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Jun 18 22:48:53 2010
@@ -233,7 +233,7 @@ public class DataNode extends Configured
            final AbstractList<File> dataDirs) throws IOException {
     this(conf, dataDirs, (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class,
                        DatanodeProtocol.versionID,
-                       NameNode.getAddress(conf), 
+                       NameNode.getServiceAddress(conf, true), 
                        conf));
   }
   
@@ -284,7 +284,7 @@ public class DataNode extends Configured
                                      conf.get("dfs.datanode.dns.interface","default"),
                                      conf.get("dfs.datanode.dns.nameserver","default"));
     }
-    this.nameNodeAddr = NameNode.getAddress(conf);
+    this.nameNodeAddr = NameNode.getServiceAddress(conf, true);
     
     this.socketTimeout =  conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
                                       HdfsConstants.READ_TIMEOUT);
@@ -305,7 +305,7 @@ public class DataNode extends Configured
 
     // connect to name node
     this.namenode = namenode;
-    
+
     // get version and id info from the name-node
     NamespaceInfo nsInfo = handshake();
     StartupOption startOpt = getStartupOption(conf);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=956141&r1=956140&r2=956141&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Fri Jun 18 22:48:53 2010
@@ -56,6 +56,7 @@ public class BackupNode extends NameNode
   private static final String BN_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT;
   private static final String BN_HTTP_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
   private static final String BN_HTTP_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT;
+  private static final String BN_SERVICE_RPC_ADDRESS_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY;
 
   /** Name-node proxy */
   NamenodeProtocol namenode;
@@ -80,11 +81,27 @@ public class BackupNode extends NameNode
     String hostName = DNS.getDefaultHost("default");
     return new InetSocketAddress(hostName, port);
   }
+  
+  @Override
+  protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) throws IOException {
+    String addr = conf.get(BN_SERVICE_RPC_ADDRESS_KEY);
+    if (addr == null || addr.isEmpty()) {
+      return null;
+    }
+    int port = NetUtils.createSocketAddr(addr).getPort();
+    String hostName = DNS.getDefaultHost("default");
+    return new InetSocketAddress(hostName, port);
+  }
 
   @Override // NameNode
   protected void setRpcServerAddress(Configuration conf) {
     conf.set(BN_ADDRESS_NAME_KEY, getHostPortString(rpcAddress));
   }
+  
+  @Override // Namenode
+  protected void setRpcServiceServerAddress(Configuration conf) {
+    conf.set(BN_SERVICE_RPC_ADDRESS_KEY, getHostPortString(serviceRPCAddress));
+  }
 
   @Override // NameNode
   protected InetSocketAddress getHttpServerAddress(Configuration conf) {
@@ -229,7 +246,7 @@ public class BackupNode extends NameNode
 
   private NamespaceInfo handshake(Configuration conf) throws IOException {
     // connect to name node
-    InetSocketAddress nnAddress = super.getRpcServerAddress(conf);
+    InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
     this.namenode =
       (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
           NamenodeProtocol.versionID, nnAddress, conf);

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=956141&r1=956140&r2=956141&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Jun 18 22:48:53 2010
@@ -136,7 +136,7 @@ public class NameNode implements Namenod
   }
   
   public long getProtocolVersion(String protocol, 
-                                 long clientVersion) throws IOException { 
+                                 long clientVersion) throws IOException {
     if (protocol.equals(ClientProtocol.class.getName())) {
       return ClientProtocol.versionID; 
     } else if (protocol.equals(DatanodeProtocol.class.getName())){
@@ -159,10 +159,18 @@ public class NameNode implements Namenod
 
   protected FSNamesystem namesystem; 
   protected NamenodeRole role;
-  /** RPC server */
+  /** RPC server. */
   protected Server server;
+  /** RPC server for HDFS Services communication.
+      BackupNode, Datanodes and all other services
+      should be connecting to this server if it is
+      configured. Clients should only go to NameNode#server
+  */
+  protected Server serviceRpcServer;
   /** RPC server address */
   protected InetSocketAddress rpcAddress = null;
+  /** RPC server for DN address */
+  protected InetSocketAddress serviceRPCAddress = null;
   /** httpServer */
   protected HttpServer httpServer;
   /** HTTP server address */
@@ -204,6 +212,32 @@ public class NameNode implements Namenod
     return NetUtils.createSocketAddr(address, DEFAULT_PORT);
   }
 
+  /**
+   * Set the configuration property for the service rpc address
+   * to address
+   */
+  public static void setServiceAddress(Configuration conf,
+                                           String address) {
+    LOG.info("Setting ADDRESS " + address);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address);
+  }
+  
+  /**
+   * Fetches the address for services to use when connecting to namenode
+   * based on the value of fallback returns null if the special
+   * address is not specified or returns the default namenode address
+   * to be used by both clients and services.
+   * Services here are datanodes, backup node, any non client connection
+   */
+  public static InetSocketAddress getServiceAddress(Configuration conf,
+                                                        boolean fallback) {
+    String addr = conf.get(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
+    if (addr == null || addr.isEmpty()) {
+      return fallback ? getAddress(conf) : null;
+    }
+    return getAddress(addr);
+  }
+
   public static InetSocketAddress getAddress(Configuration conf) {
     URI filesystemURI = FileSystem.getDefaultUri(conf);
     String authority = filesystemURI.getAuthority();
@@ -247,9 +281,25 @@ public class NameNode implements Namenod
     return role.equals(that);
   }
 
+  /**
+   * Given a configuration get the address of the service rpc server
+   * If the service rpc is not configured returns null
+   */
+  protected InetSocketAddress getServiceRpcServerAddress(Configuration conf)
+    throws IOException {
+    return NameNode.getServiceAddress(conf, false);
+  }
+
   protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOException {
     return getAddress(conf);
   }
+  
+  /**
+   * Modifies the configuration passed to contain the service rpc address setting
+   */
+  protected void setRpcServiceServerAddress(Configuration conf) {
+    setServiceAddress(conf, getHostPortString(serviceRPCAddress));
+  }
 
   protected void setRpcServerAddress(Configuration conf) {
     FileSystem.setDefaultUri(conf, getUri(rpcAddress));
@@ -298,7 +348,18 @@ public class NameNode implements Namenod
 
     NameNode.initMetrics(conf, this.getRole());
     loadNamesystem(conf);
-    // create rpc server 
+    // create rpc server
+    InetSocketAddress dnSocketAddr = getServiceRpcServerAddress(conf);
+    if (dnSocketAddr != null) {
+      int serviceHandlerCount =
+        conf.getInt(DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
+                    DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
+      this.serviceRpcServer = RPC.getServer(NamenodeProtocols.class, this,
+          dnSocketAddr.getHostName(), dnSocketAddr.getPort(), serviceHandlerCount,
+          false, conf, namesystem.getDelegationTokenSecretManager());
+      this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress();
+      setRpcServiceServerAddress(conf);
+    }
     this.server = RPC.getServer(NamenodeProtocols.class, this,
                                 socAddr.getHostName(), socAddr.getPort(),
                                 handlerCount, false, conf, 
@@ -309,6 +370,9 @@ public class NameNode implements Namenod
 
     activate(conf);
     LOG.info(getRole() + " up at: " + rpcAddress);
+    if (serviceRPCAddress != null) {
+      LOG.info(getRole() + " service server is up at: " + serviceRPCAddress); 
+    }
   }
 
   /**
@@ -322,6 +386,9 @@ public class NameNode implements Namenod
     namesystem.activate(conf);
     startHttpServer(conf);
     server.start();  //start RPC server
+    if (serviceRpcServer != null) {
+      serviceRpcServer.start();      
+    }
     startTrashEmptier(conf);
     
     plugins = conf.getInstances("dfs.namenode.plugins", ServicePlugin.class);
@@ -471,6 +538,7 @@ public class NameNode implements Namenod
     if(namesystem != null) namesystem.close();
     if(emptier != null) emptier.interrupt();
     if(server != null) server.stop();
+    if(serviceRpcServer != null) serviceRpcServer.stop();
     if (myMetrics != null) {
       myMetrics.shutdown();
     }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=956141&r1=956140&r2=956141&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Fri Jun 18 22:48:53 2010
@@ -129,7 +129,7 @@ public class SecondaryNameNode implement
     
     // Create connection to the namenode.
     shouldRun = true;
-    nameNodeAddr = NameNode.getAddress(conf);
+    nameNodeAddr = NameNode.getServiceAddress(conf, true);
 
     this.conf = conf;
     this.namenode =

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=956141&r1=956140&r2=956141&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Fri Jun 18 22:48:53 2010
@@ -35,15 +35,26 @@ import org.apache.log4j.Level;
 public class TestDistributedFileSystem extends junit.framework.TestCase {
   private static final Random RAN = new Random();
 
+  private boolean dualPortTesting = false;
+  
+  private HdfsConfiguration getTestConfiguration() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    if (dualPortTesting) {
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+              "localhost:0");
+    }
+    return conf;
+  }
+
   public void testFileSystemCloseAll() throws Exception {
-    Configuration conf = new HdfsConfiguration();
+    Configuration conf = getTestConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 0, true, null);
     URI address = FileSystem.getDefaultUri(conf);
 
     try {
       FileSystem.closeAll();
 
-      conf = new HdfsConfiguration();
+      conf = getTestConfiguration();
       FileSystem.setDefaultUri(conf, address);
       FileSystem.get(conf);
       FileSystem.get(conf);
@@ -59,7 +70,7 @@ public class TestDistributedFileSystem e
    * multiple files are open.
    */
   public void testDFSClose() throws Exception {
-    Configuration conf = new HdfsConfiguration();
+    Configuration conf = getTestConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fileSys = cluster.getFileSystem();
 
@@ -76,7 +87,7 @@ public class TestDistributedFileSystem e
   }
 
   public void testDFSClient() throws Exception {
-    Configuration conf = new HdfsConfiguration();
+    Configuration conf = getTestConfiguration();
     MiniDFSCluster cluster = null;
 
     try {
@@ -165,7 +176,7 @@ public class TestDistributedFileSystem e
     System.out.println("seed=" + seed);
     RAN.setSeed(seed);
 
-    final Configuration conf = new HdfsConfiguration();
+    final Configuration conf = getTestConfiguration();
     conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
 
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
@@ -228,5 +239,15 @@ public class TestDistributedFileSystem e
         assertEquals(qfoocs, barcs);
       }
     }
+    cluster.shutdown();
+  }
+  
+  public void testAllWithDualPort() throws Exception {
+    dualPortTesting = true;
+
+    testFileSystemCloseAll();
+    testDFSClose();
+    testDFSClient();
+    testFileChecksum();
   }
 }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=956141&r1=956140&r2=956141&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java Fri Jun 18 22:48:53 2010
@@ -83,10 +83,13 @@ public class TestHDFSServerPorts extends
     return System.getProperty("test.build.data", "build/test/data");
   }
   
+  public NameNode startNameNode() throws IOException {
+    return startNameNode(false);
+  }
   /**
    * Start the namenode.
    */
-  public NameNode startNameNode() throws IOException {
+  public NameNode startNameNode(boolean withService) throws IOException {
     String dataDir = getTestingDir();
     hdfsDir = new File(dataDir, "dfs");
     if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
@@ -96,6 +99,9 @@ public class TestHDFSServerPorts extends
     config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         fileAsURI(new File(hdfsDir, "name1")).toString());
     FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
+    if (withService) {
+      NameNode.setServiceAddress(config, NAME_NODE_HOST + "0");      
+    }
     config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
     NameNode.format(config);
 
@@ -239,13 +245,17 @@ public class TestHDFSServerPorts extends
     return true;
   }
 
+  public void testNameNodePorts() throws Exception {
+    runTestNameNodePorts(false);
+    runTestNameNodePorts(true);
+  }
   /**
    * Verify namenode port usage.
    */
-  public void testNameNodePorts() throws Exception {
+  public void runTestNameNodePorts(boolean withService) throws Exception {
     NameNode nn = null;
     try {
-      nn = startNameNode();
+      nn = startNameNode(withService);
 
       // start another namenode on the same port
       Configuration conf2 = new HdfsConfiguration(config);
@@ -265,7 +275,18 @@ public class TestHDFSServerPorts extends
       // different http port
       conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
       started = canStartNameNode(conf2);
-      assertTrue(started); // should start now
+
+      if (withService) {
+        assertFalse("Should've failed on service port", started);
+
+        // reset conf2 since NameNode modifies it
+        FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0");
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
+        // Set Service address      
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NAME_NODE_HOST + "0");
+        started = canStartNameNode(conf2);        
+      }
+      assertTrue(started);
     } finally {
       stopNameNode(nn);
     }
@@ -358,7 +379,8 @@ public class TestHDFSServerPorts extends
         LOG.info("= Starting 2 on: " + 
                                   backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
 
-        assertTrue(canStartBackupNode(backup_config)); // should start now
+        boolean started = canStartBackupNode(backup_config);
+        assertTrue("Backup Namenode should've started", started); // should start now
       } finally {
         stopNameNode(nn);
       }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java?rev=956141&r1=956140&r2=956141&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java Fri Jun 18 22:48:53 2010
@@ -29,9 +29,7 @@ import org.apache.hadoop.fs.Path;
  * A JUnit test for checking if restarting DFS preserves integrity.
  */
 public class TestRestartDFS extends TestCase {
-  /** check if DFS remains in proper condition after a restart */
-  public void testRestartDFS() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
+  public void runTests(Configuration conf, boolean serviceTest) throws Exception {
     MiniDFSCluster cluster = null;
     DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
 
@@ -44,6 +42,10 @@ public class TestRestartDFS extends Test
     FileStatus dirstatus;
 
     try {
+      if (serviceTest) {
+        conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                 "localhost:0");
+      }
       cluster = new MiniDFSCluster(conf, 4, true, null);
       FileSystem fs = cluster.getFileSystem();
       files.createFiles(fs, dir);
@@ -58,8 +60,12 @@ public class TestRestartDFS extends Test
       if (cluster != null) { cluster.shutdown(); }
     }
     try {
+      if (serviceTest) {
+        conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                 "localhost:0");
+      }
       // Here we restart the MiniDFScluster without formatting namenode
-      cluster = new MiniDFSCluster(conf, 4, false, null);
+      cluster = new MiniDFSCluster(conf, 4, false, null); 
       FileSystem fs = cluster.getFileSystem();
       assertTrue("Filesystem corrupted after restart.",
                  files.checkFiles(fs, dir));
@@ -77,6 +83,10 @@ public class TestRestartDFS extends Test
       if (cluster != null) { cluster.shutdown(); }
     }
     try {
+      if (serviceTest) {
+        conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                 "localhost:0");
+      }
       // This is a second restart to check that after the first restart
       // the image written in parallel to both places did not get corrupted
       cluster = new MiniDFSCluster(conf, 4, false, null);
@@ -98,4 +108,17 @@ public class TestRestartDFS extends Test
       if (cluster != null) { cluster.shutdown(); }
     }
   }
+  /** check if DFS remains in proper condition after a restart */
+  public void testRestartDFS() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    runTests(conf, false);
+  }
+  
+  /** check if DFS remains in proper condition after a restart 
+   * this rerun is with 2 ports enabled for RPC in the namenode
+   */
+   public void testRestartDualPortDFS() throws Exception {
+     final Configuration conf = new HdfsConfiguration();
+     runTests(conf, true);
+   }
 }