You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2011/10/27 21:57:51 UTC

svn commit: r1189979 - in /hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/test/java/org/apache/hadoop/hdfs/ src/test/...

Author: szetszwo
Date: Thu Oct 27 19:57:50 2011
New Revision: 1189979

URL: http://svn.apache.org/viewvc?rev=1189979&view=rev
Log:
svn merge -c 1177100 from trunk for HDFS-2355.

Modified:
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java

Propchange: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Oct 27 19:57:50 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1172916,1173402,1173468,1175113,1176178,1176550,1176719,1176729,1176733,1177487,1177531,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1183081,1183098,1183175,1183554,1186508,1187140,1189028,1189355,1189360,1189546,1189932
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1172916,1173402,1173468,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177487,1177531,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1183081,1183098,1183175,1183554,1186508,1187140,1189028,1189355,1189360,1189546,1189932
 /hadoop/core/branches/branch-0.19/hdfs:713112
 /hadoop/hdfs/branches/HDFS-1052:987665-1095512
 /hadoop/hdfs/branches/HDFS-265:796829-820463

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1189979&r1=1189978&r2=1189979&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Oct 27 19:57:50 2011
@@ -739,6 +739,9 @@ Release 0.23.0 - Unreleased
     HDFS-1869. mkdirs should use the supplied permission for all of the created
     directories. (Daryn Sharp via szetszwo)
 
+    HDFS-2355. Federation: enable using the same configuration file across 
+    all the nodes in the cluster. (suresh)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

Propchange: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Oct 27 19:57:50 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1172916,1173402,1173468,1175113,1176178,1176550,1176719,1176729,1176733,1177487,1177531,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1183081,1183098,1183175,1183554,1186508,1187140,1189028,1189355,1189360,1189546,1189932
+/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:1161777,1161781,1162188,1162421,1162491,1162499,1162613,1162928,1162954,1162979,1163050,1163069,1163081,1163490,1163768,1164255,1164301,1164339,1166402,1167383,1167662,1170085,1170379,1170459,1170996,1171136,1171297,1171379,1171611,1172916,1173402,1173468,1175113,1176178,1176550,1176719,1176729,1176733,1177100,1177487,1177531,1177859,1177864,1177905,1179169,1179856,1179861,1180757,1183081,1183098,1183175,1183554,1186508,1187140,1189028,1189355,1189360,1189546,1189932
 /hadoop/core/branches/branch-0.19/hdfs/src/java:713112
 /hadoop/core/trunk/src/hdfs:776175-785643,785929-786278
 /hadoop/hdfs/branches/HDFS-1052/src/java:987665-1095512

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1189979&r1=1189978&r2=1189979&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Thu Oct 27 19:57:50 2011
@@ -38,6 +38,7 @@ import java.util.Random;
 import java.util.StringTokenizer;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -576,17 +577,6 @@ public class DFSUtil {
     }
   }
   
-  /**
-   * Returns the configured nameservice Id
-   * 
-   * @param conf
-   *          Configuration object to lookup the nameserviceId
-   * @return nameserviceId string from conf
-   */
-  public static String getNameServiceId(Configuration conf) {
-    return conf.get(DFS_FEDERATION_NAMESERVICE_ID);
-  }
-  
   /** Return used as percentage of capacity */
   public static float getPercentUsed(long used, long capacity) {
     return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity; 
@@ -696,4 +686,77 @@ public class DFSUtil {
         ClientDatanodeProtocol.versionID, addr, ticket, confWithNoIpcIdle,
         NetUtils.getDefaultSocketFactory(conf), socketTimeout);
   }
+  
+  /**
+   * Get name service Id for the {@link NameNode} based on namenode RPC address
+   * matching the local node address.
+   */
+  public static String getNamenodeNameServiceId(Configuration conf) {
+    return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
+  }
+  
+  /**
+   * Get name service Id for the BackupNode based on backup node RPC address
+   * matching the local node address.
+   */
+  public static String getBackupNameServiceId(Configuration conf) {
+    return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+  }
+  
+  /**
+   * Get name service Id for the secondary node based on secondary http address
+   * matching the local node address.
+   */
+  public static String getSecondaryNameServiceId(Configuration conf) {
+    return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+  }
+  
+  /**
+   * Get the nameservice Id by matching the {@code addressKey} with the
+   * the address of the local node. 
+   * 
+   * If {@link DFSConfigKeys#DFS_FEDERATION_NAMESERVICE_ID} is not specifically
+   * configured, this method determines the nameservice Id by matching the local
+   * nodes address with the configured addresses. When a match is found, it
+   * returns the nameservice Id from the corresponding configuration key.
+   * 
+   * @param conf Configuration
+   * @param addressKey configuration key to get the address.
+   * @return name service Id on success, null on failure.
+   * @throws HadoopIllegalArgumentException on error
+   */
+  private static String getNameServiceId(Configuration conf, String addressKey) {
+    String nameserviceId = conf.get(DFS_FEDERATION_NAMESERVICE_ID);
+    if (nameserviceId != null) {
+      return nameserviceId;
+    }
+    
+    Collection<String> ids = getNameServiceIds(conf);
+    if (ids == null || ids.size() == 0) {
+      // Not federation configuration, hence no nameservice Id
+      return null;
+    }
+    
+    // Match the rpc address with that of local address
+    int found = 0;
+    for (String id : ids) {
+      String addr = conf.get(getNameServiceIdKey(addressKey, id));
+      InetSocketAddress s = NetUtils.createSocketAddr(addr);
+      if (NetUtils.isLocalAddress(s.getAddress())) {
+        nameserviceId = id;
+        found++;
+      }
+    }
+    if (found > 1) { // Only one address must match the local address
+      throw new HadoopIllegalArgumentException(
+          "Configuration has multiple RPC addresses that matches "
+              + "the local node's address. Please configure the system with "
+              + "the parameter " + DFS_FEDERATION_NAMESERVICE_ID);
+    }
+    if (found == 0) {
+      throw new HadoopIllegalArgumentException("Configuration address "
+          + addressKey + " is missing in configuration with name service Id");
+    }
+    return nameserviceId;
+  }
 }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1189979&r1=1189978&r2=1189979&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Thu Oct 27 19:57:50 2011
@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@@ -386,4 +387,9 @@ public class BackupNode extends NameNode
   String getClusterId() {
     return clusterId;
   }
+  
+  @Override
+  protected String getNameServiceId(Configuration conf) {
+    return DFSUtil.getBackupNameServiceId(conf);
+  }
 }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1189979&r1=1189978&r2=1189979&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Oct 27 19:57:50 2011
@@ -30,6 +30,7 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -453,11 +454,14 @@ public class NameNode {
       throws IOException { 
     this.role = role;
     try {
-      initializeGenericKeys(conf);
+      initializeGenericKeys(conf, getNameServiceId(conf));
       initialize(conf);
     } catch (IOException e) {
       this.stop();
       throw e;
+    } catch (HadoopIllegalArgumentException e) {
+      this.stop();
+      throw e;
     }
   }
 
@@ -762,16 +766,16 @@ public class NameNode {
    * @param conf
    *          Configuration object to lookup specific key and to set the value
    *          to the key passed. Note the conf object is modified
+   * @param nameserviceId name service Id
    * @see DFSUtil#setGenericConf(Configuration, String, String...)
    */
-  public static void initializeGenericKeys(Configuration conf) {
-    final String nameserviceId = DFSUtil.getNameServiceId(conf);
+  public static void initializeGenericKeys(Configuration conf, String
+      nameserviceId) {
     if ((nameserviceId == null) || nameserviceId.isEmpty()) {
       return;
     }
     
     DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
-    
     if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
       URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
           + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
@@ -779,6 +783,14 @@ public class NameNode {
     }
   }
     
+  /** 
+   * Get the name service Id for the node
+   * @return name service Id or null if federation is not configured
+   */
+  protected String getNameServiceId(Configuration conf) {
+    return DFSUtil.getNamenodeNameServiceId(conf);
+  }
+  
   /**
    */
   public static void main(String argv[]) throws Exception {
@@ -792,5 +804,4 @@ public class NameNode {
       System.exit(-1);
     }
   }
-  
 }

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1189979&r1=1189978&r2=1189979&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Thu Oct 27 19:57:50 2011
@@ -38,10 +38,12 @@ import org.apache.commons.cli.ParseExcep
 import org.apache.commons.cli.PosixParser;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -171,12 +173,17 @@ public class SecondaryNameNode implement
   public SecondaryNameNode(Configuration conf,
       CommandLineOpts commandLineOpts) throws IOException {
     try {
-      NameNode.initializeGenericKeys(conf);
+      NameNode.initializeGenericKeys(conf,
+          DFSUtil.getSecondaryNameServiceId(conf));
       initialize(conf, commandLineOpts);
     } catch(IOException e) {
       shutdown();
       LOG.fatal("Failed to start secondary namenode. ", e);
       throw e;
+    } catch(HadoopIllegalArgumentException e) {
+      shutdown();
+      LOG.fatal("Failed to start secondary namenode. ", e);
+      throw e;
     }
   }
   

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1189979&r1=1189978&r2=1189979&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Thu Oct 27 19:57:50 2011
@@ -29,8 +29,7 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 
-import junit.framework.Assert;
-
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -40,8 +39,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
-
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
 public class TestDFSUtil {
   /**
@@ -76,79 +74,141 @@ public class TestDFSUtil {
       }
     }
 
-    assertTrue("expected 1 corrupt files but got " + corruptCount, 
-               corruptCount == 1);
-    
+    assertTrue("expected 1 corrupt files but got " + corruptCount,
+        corruptCount == 1);
+
     // test an empty location
     bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
     assertEquals(0, bs.length);
   }
 
-  /** 
-   * Test for
-   * {@link DFSUtil#getNameServiceIds(Configuration)}
-   * {@link DFSUtil#getNameServiceId(Configuration)}
-   * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+
+  private Configuration setupAddress(String key) {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+    conf.set(DFSUtil.getNameServiceIdKey(key, "nn1"), "localhost:9000");
+    return conf;
+  }
+
+  /**
+   * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
+   * nameserviceId from the configuration returned
    */
   @Test
-  public void testMultipleNamenodes() throws IOException {
+  public void getNameServiceId() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
+  }
+  
+  /**
+   * Test {@link DFSUtil#getNameNodeNameServiceId(Configuration)} to ensure
+   * nameserviceId for namenode is determined based on matching the address with
+   * local node's address
+   */
+  @Test
+  public void getNameNodeNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
+   * nameserviceId for backup node is determined based on matching the address
+   * with local node's address
+   */
+  @Test
+  public void getBackupNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
+   * nameserviceId for backup node is determined based on matching the address
+   * with local node's address
+   */
+  @Test
+  public void getSecondaryNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getNameServiceId(Configuration, String))} to ensure
+   * exception is thrown when multiple rpc addresses match the local node's
+   * address
+   */
+  @Test(expected = HadoopIllegalArgumentException.class)
+  public void testGetNameServiceIdException() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
+        "localhost:9000");
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
+        "localhost:9001");
+    DFSUtil.getNamenodeNameServiceId(conf);
+    fail("Expected exception is not thrown");
+  }
+
+  /**
+   * Test {@link DFSUtil#getNameServiceIds(Configuration)}
+   */
+  @Test
+  public void testGetNameServiceIds() {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
-    
-    // Test - The configured nameserviceIds are returned
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
     Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
     Iterator<String> it = nameserviceIds.iterator();
     assertEquals(2, nameserviceIds.size());
     assertEquals("nn1", it.next().toString());
     assertEquals("nn2", it.next().toString());
-    
-    // Tests default nameserviceId is returned
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
-    assertEquals("nn1", DFSUtil.getNameServiceId(conf));
-    
+  }
+
+  /**
+   * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+   * {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
+   * (Configuration)}
+   */
+  @Test
+  public void testMultipleNamenodes() throws IOException {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
     // Test - configured list of namenodes are returned
     final String NN1_ADDRESS = "localhost:9000";
     final String NN2_ADDRESS = "localhost:9001";
     final String NN3_ADDRESS = "localhost:9002";
-    conf.set(DFSUtil.getNameServiceIdKey(
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS);
-    conf.set(DFSUtil.getNameServiceIdKey(
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS);
-    
-    Collection<InetSocketAddress> nnAddresses = 
-      DFSUtil.getNNServiceRpcAddresses(conf);
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
+        NN1_ADDRESS);
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
+        NN2_ADDRESS);
+
+    Collection<InetSocketAddress> nnAddresses = DFSUtil
+        .getNNServiceRpcAddresses(conf);
     assertEquals(2, nnAddresses.size());
     Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
-    assertEquals(2, nameserviceIds.size());
     InetSocketAddress addr = iterator.next();
     assertEquals("localhost", addr.getHostName());
     assertEquals(9000, addr.getPort());
     addr = iterator.next();
     assertEquals("localhost", addr.getHostName());
     assertEquals(9001, addr.getPort());
-    
+
     // Test - can look up nameservice ID from service address
-    InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS);
-    String nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress1,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertEquals("nn1", nameserviceId);
-    InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
-    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress2,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertEquals("nn2", nameserviceId);
-    InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
-    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress3,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertNull(nameserviceId);
+    checkNameServiceId(conf, NN1_ADDRESS, "nn1");
+    checkNameServiceId(conf, NN2_ADDRESS, "nn2");
+    checkNameServiceId(conf, NN3_ADDRESS, null);
   }
-  
-  /** 
+
+  public void checkNameServiceId(Configuration conf, String addr,
+      String expectedNameServiceId) {
+    InetSocketAddress s = NetUtils.createSocketAddr(addr);
+    String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals(expectedNameServiceId, nameserviceId);
+  }
+
+  /**
    * Test for
    * {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
    */
@@ -157,27 +217,25 @@ public class TestDFSUtil {
     HdfsConfiguration conf = new HdfsConfiguration();
     final String DEFAULT_ADDRESS = "localhost:9000";
     final String NN2_ADDRESS = "localhost:9001";
-    conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
-    
+    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
+
     InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
     boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
     assertTrue(isDefault);
     InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
     isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
     assertFalse(isDefault);
   }
-  
+
   /** Tests to ensure default namenode is used as fallback */
   @Test
   public void testDefaultNamenode() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     final String hdfs_default = "hdfs://localhost:9999/";
-    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default);
-    // If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that 
+    conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
+    // If DFS_FEDERATION_NAMESERVICES is not set, verify that
     // default namenode address is returned.
     List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
     assertEquals(1, addrList.size());
@@ -191,26 +249,26 @@ public class TestDFSUtil {
   @Test
   public void testConfModification() throws IOException {
     final HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1");
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
-    final String nameserviceId = DFSUtil.getNameServiceId(conf);
-    
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+    conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
+
     // Set the nameservice specific keys with nameserviceId in the config key
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
       // Note: value is same as the key
       conf.set(DFSUtil.getNameServiceIdKey(key, nameserviceId), key);
     }
-    
+
     // Initialize generic keys from specific keys
-    NameNode.initializeGenericKeys(conf);
-    
+    NameNode.initializeGenericKeys(conf, nameserviceId);
+
     // Retrieve the keys without nameserviceId and Ensure generic keys are set
     // to the correct value
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
       assertEquals(key, conf.get(key));
     }
   }
-  
+
   /**
    * Tests for empty configuration, an exception is thrown from
    * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
@@ -238,16 +296,16 @@ public class TestDFSUtil {
     } catch (IOException expected) {
     }
   }
-  
+
   @Test
-  public void testGetServerInfo(){
+  public void testGetServerInfo() {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
     String httpsport = DFSUtil.getInfoServer(null, conf, true);
-    Assert.assertEquals("0.0.0.0:50470", httpsport);
+    assertEquals("0.0.0.0:50470", httpsport);
     String httpport = DFSUtil.getInfoServer(null, conf, false);
-    Assert.assertEquals("0.0.0.0:50070", httpport);
+    assertEquals("0.0.0.0:50070", httpport);
   }
 
 }
\ No newline at end of file

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java?rev=1189979&r1=1189978&r2=1189979&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java Thu Oct 27 19:57:50 2011
@@ -96,7 +96,8 @@ public class TestMulitipleNNDataBlockSca
 
       String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
       for (int i = 0; i < 2; i++) {
-        String nsId = DFSUtil.getNameServiceId(cluster.getConfiguration(i));
+        String nsId = DFSUtil.getNamenodeNameServiceId(cluster
+            .getConfiguration(i));
         namenodesBuilder.append(nsId);
         namenodesBuilder.append(",");
       }
@@ -116,7 +117,7 @@ public class TestMulitipleNNDataBlockSca
         LOG.info(ex.getMessage());
       }
 
-      namenodesBuilder.append(DFSUtil.getNameServiceId(cluster
+      namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
           .getConfiguration(2)));
       conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
           .toString());