You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2017/09/14 18:17:52 UTC

[1/4] hadoop git commit: Revert "HDFS-10391. Always enable NameNode service RPC port. Contributed by Gergely Novak."

Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ecad18480 -> 5f6abb280
  refs/heads/trunk 66ca0a654 -> 65a941008


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
index 4d04970..b3bb3dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
@@ -171,18 +171,15 @@ public class TestEditLogTailer {
     for (int i = 0; i < 5; i++) {
       try {
         // Have to specify IPC ports so the NNs can talk to each other.
-        int[] ports = ServerSocketUtil.getPorts(6);
+        int[] ports = ServerSocketUtil.getPorts(3);
         MiniDFSNNTopology topology = new MiniDFSNNTopology()
             .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
                 .addNN(new MiniDFSNNTopology.NNConf("nn1")
-                    .setIpcPort(ports[0])
-                    .setServicePort(ports[1]))
+                    .setIpcPort(ports[0]))
                 .addNN(new MiniDFSNNTopology.NNConf("nn2")
-                    .setIpcPort(ports[2])
-                    .setServicePort(ports[3]))
+                    .setIpcPort(ports[1]))
                 .addNN(new MiniDFSNNTopology.NNConf("nn3")
-                    .setIpcPort(ports[4])
-                    .setServicePort(ports[5])));
+                    .setIpcPort(ports[2])));
 
         cluster = new MiniDFSCluster.Builder(conf)
           .nnTopology(topology)
@@ -222,14 +219,11 @@ public class TestEditLogTailer {
     MiniDFSNNTopology topology = new MiniDFSNNTopology()
         .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
             .addNN(new MiniDFSNNTopology.NNConf("nn1")
-                .setIpcPort(ServerSocketUtil.getPort(0, 100))
-                .setServicePort(ServerSocketUtil.getPort(0, 100)))
+                .setIpcPort(ServerSocketUtil.getPort(0, 100)))
             .addNN(new MiniDFSNNTopology.NNConf("nn2")
-                .setIpcPort(ServerSocketUtil.getPort(0, 100))
-                .setServicePort(ServerSocketUtil.getPort(0, 100)))
+                .setIpcPort(ServerSocketUtil.getPort(0, 100)))
             .addNN(new MiniDFSNNTopology.NNConf("nn3")
-                .setIpcPort(ServerSocketUtil.getPort(0, 100))
-                .setServicePort(ServerSocketUtil.getPort(0, 100))));
+                .setIpcPort(ServerSocketUtil.getPort(0, 100))));
 
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(topology)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
index f2274f9..f1f5793 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdfs.tools;
 
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -180,11 +179,9 @@ public class TestDFSHAAdmin {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol)
         .getServiceStatus();
     assertEquals(0, runTool("-getAllServiceState"));
-    assertOutputContains(String.format("%-50s %-10s", (HOST_A + ":" +
-            DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT),
+    assertOutputContains(String.format("%-50s %-10s", (HOST_A + ":" + 12345),
         STANDBY_READY_RESULT.getState()));
-    assertOutputContains(String.format("%-50s %-10s", (HOST_B + ":" +
-            DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT),
+    assertOutputContains(String.format("%-50s %-10s", (HOST_B + ":" + 12345),
         STANDBY_READY_RESULT.getState()));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
index d49a3cf..a21a31d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
@@ -77,7 +77,7 @@ public class TestDFSHAAdminMiniCluster {
     tool.setErrOut(new PrintStream(errOutBytes));
     cluster.waitActive();
     
-    nn1Port = cluster.getNameNodeServicePort(0);
+    nn1Port = cluster.getNameNodePort(0);
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
index 9943260..bbb787e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
@@ -88,11 +88,9 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
     MiniDFSNNTopology topology = new MiniDFSNNTopology()
     .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
         .addNN(new MiniDFSNNTopology.NNConf("nn1")
-            .setIpcPort(ServerSocketUtil.getPort(10021, 100))
-            .setServicePort(ServerSocketUtil.getPort(10025, 100)))
+            .setIpcPort(ServerSocketUtil.getPort(10021, 100)))
         .addNN(new MiniDFSNNTopology.NNConf("nn2")
-            .setIpcPort(ServerSocketUtil.getPort(10022, 100))
-            .setServicePort(ServerSocketUtil.getPort(10026, 100))));
+            .setIpcPort(ServerSocketUtil.getPort(10022, 100))));
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(topology)
         .numDataNodes(0)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
index 36d57a9..942719e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
@@ -24,7 +24,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -122,13 +121,13 @@ public class TestGetConf {
       TestType type, HdfsConfiguration conf) throws IOException {
     switch (type) {
     case NAMENODE:
-      return DFSUtil.getNNServiceRpcAddresses(conf);
+      return DFSUtil.getNNServiceRpcAddressesForCluster(conf);
     case BACKUP:
       return DFSUtil.getBackupNodeAddresses(conf);
     case SECONDARY:
       return DFSUtil.getSecondaryNameNodeAddresses(conf);
     case NNRPCADDRESSES:
-      return DFSUtil.getNNServiceRpcAddresses(conf);
+      return DFSUtil.getNNServiceRpcAddressesForCluster(conf);
     }
     return null;
   }
@@ -279,12 +278,10 @@ public class TestGetConf {
   public void testNonFederation() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration(false);
   
-    // Returned namenode address should match the default service address
+    // Returned namenode address should match default address
     conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:1000");
-    verifyAddresses(conf, TestType.NAMENODE, false, "localhost:" +
-        DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
-    verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:" +
-        DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
+    verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000");
+    verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000");
   
     // Returned address should match backupnode RPC address
     conf.set(DFS_NAMENODE_BACKUP_ADDRESS_KEY,"localhost:1001");
@@ -300,14 +297,12 @@ public class TestGetConf {
     conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
     verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000");
     verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000");
-
-    // Returned namenode address should match the default service address
+  
+    // Returned address should match RPC address
     conf = new HdfsConfiguration();
     conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
-    verifyAddresses(conf, TestType.NAMENODE, false, "localhost:" +
-        DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
-    verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:" +
-        DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
+    verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1001");
+    verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1001");
   }
 
   /**
@@ -335,6 +330,23 @@ public class TestGetConf {
     verifyAddresses(conf, TestType.BACKUP, false, backupAddresses);
     verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
     verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
+  
+    // Test to ensure namenode, backup, secondary namenode addresses and 
+    // namenode rpc addresses are  returned from federation configuration. 
+    // Returned namenode addresses are based on regular RPC address
+    // in the absence of service RPC address.
+    conf = new HdfsConfiguration(false);
+    setupNameServices(conf, nsCount);
+    nnAddresses = setupAddress(conf,
+        DFS_NAMENODE_RPC_ADDRESS_KEY, nsCount, 1000);
+    backupAddresses = setupAddress(conf,
+        DFS_NAMENODE_BACKUP_ADDRESS_KEY, nsCount, 2000);
+    secondaryAddresses = setupAddress(conf,
+        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, nsCount, 3000);
+    verifyAddresses(conf, TestType.NAMENODE, false, nnAddresses);
+    verifyAddresses(conf, TestType.BACKUP, false, backupAddresses);
+    verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
+    verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
   }
   
   @Test(timeout=10000)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[3/4] hadoop git commit: Revert "HDFS-10391. Always enable NameNode service RPC port. Contributed by Gergely Novak."

Posted by ar...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
index 4d04970..b3bb3dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
@@ -171,18 +171,15 @@ public class TestEditLogTailer {
     for (int i = 0; i < 5; i++) {
       try {
         // Have to specify IPC ports so the NNs can talk to each other.
-        int[] ports = ServerSocketUtil.getPorts(6);
+        int[] ports = ServerSocketUtil.getPorts(3);
         MiniDFSNNTopology topology = new MiniDFSNNTopology()
             .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
                 .addNN(new MiniDFSNNTopology.NNConf("nn1")
-                    .setIpcPort(ports[0])
-                    .setServicePort(ports[1]))
+                    .setIpcPort(ports[0]))
                 .addNN(new MiniDFSNNTopology.NNConf("nn2")
-                    .setIpcPort(ports[2])
-                    .setServicePort(ports[3]))
+                    .setIpcPort(ports[1]))
                 .addNN(new MiniDFSNNTopology.NNConf("nn3")
-                    .setIpcPort(ports[4])
-                    .setServicePort(ports[5])));
+                    .setIpcPort(ports[2])));
 
         cluster = new MiniDFSCluster.Builder(conf)
           .nnTopology(topology)
@@ -222,14 +219,11 @@ public class TestEditLogTailer {
     MiniDFSNNTopology topology = new MiniDFSNNTopology()
         .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
             .addNN(new MiniDFSNNTopology.NNConf("nn1")
-                .setIpcPort(ServerSocketUtil.getPort(0, 100))
-                .setServicePort(ServerSocketUtil.getPort(0, 100)))
+                .setIpcPort(ServerSocketUtil.getPort(0, 100)))
             .addNN(new MiniDFSNNTopology.NNConf("nn2")
-                .setIpcPort(ServerSocketUtil.getPort(0, 100))
-                .setServicePort(ServerSocketUtil.getPort(0, 100)))
+                .setIpcPort(ServerSocketUtil.getPort(0, 100)))
             .addNN(new MiniDFSNNTopology.NNConf("nn3")
-                .setIpcPort(ServerSocketUtil.getPort(0, 100))
-                .setServicePort(ServerSocketUtil.getPort(0, 100))));
+                .setIpcPort(ServerSocketUtil.getPort(0, 100))));
 
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(topology)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
index f2274f9..f1f5793 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdfs.tools;
 
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -180,11 +179,9 @@ public class TestDFSHAAdmin {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol)
         .getServiceStatus();
     assertEquals(0, runTool("-getAllServiceState"));
-    assertOutputContains(String.format("%-50s %-10s", (HOST_A + ":" +
-            DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT),
+    assertOutputContains(String.format("%-50s %-10s", (HOST_A + ":" + 12345),
         STANDBY_READY_RESULT.getState()));
-    assertOutputContains(String.format("%-50s %-10s", (HOST_B + ":" +
-            DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT),
+    assertOutputContains(String.format("%-50s %-10s", (HOST_B + ":" + 12345),
         STANDBY_READY_RESULT.getState()));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
index d49a3cf..a21a31d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
@@ -77,7 +77,7 @@ public class TestDFSHAAdminMiniCluster {
     tool.setErrOut(new PrintStream(errOutBytes));
     cluster.waitActive();
     
-    nn1Port = cluster.getNameNodeServicePort(0);
+    nn1Port = cluster.getNameNodePort(0);
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
index 9943260..bbb787e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
@@ -88,11 +88,9 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
     MiniDFSNNTopology topology = new MiniDFSNNTopology()
     .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
         .addNN(new MiniDFSNNTopology.NNConf("nn1")
-            .setIpcPort(ServerSocketUtil.getPort(10021, 100))
-            .setServicePort(ServerSocketUtil.getPort(10025, 100)))
+            .setIpcPort(ServerSocketUtil.getPort(10021, 100)))
         .addNN(new MiniDFSNNTopology.NNConf("nn2")
-            .setIpcPort(ServerSocketUtil.getPort(10022, 100))
-            .setServicePort(ServerSocketUtil.getPort(10026, 100))));
+            .setIpcPort(ServerSocketUtil.getPort(10022, 100))));
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(topology)
         .numDataNodes(0)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
index 36d57a9..942719e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
@@ -24,7 +24,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -122,13 +121,13 @@ public class TestGetConf {
       TestType type, HdfsConfiguration conf) throws IOException {
     switch (type) {
     case NAMENODE:
-      return DFSUtil.getNNServiceRpcAddresses(conf);
+      return DFSUtil.getNNServiceRpcAddressesForCluster(conf);
     case BACKUP:
       return DFSUtil.getBackupNodeAddresses(conf);
     case SECONDARY:
       return DFSUtil.getSecondaryNameNodeAddresses(conf);
     case NNRPCADDRESSES:
-      return DFSUtil.getNNServiceRpcAddresses(conf);
+      return DFSUtil.getNNServiceRpcAddressesForCluster(conf);
     }
     return null;
   }
@@ -279,12 +278,10 @@ public class TestGetConf {
   public void testNonFederation() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration(false);
   
-    // Returned namenode address should match the default service address
+    // Returned namenode address should match default address
     conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:1000");
-    verifyAddresses(conf, TestType.NAMENODE, false, "localhost:" +
-        DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
-    verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:" +
-        DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
+    verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000");
+    verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000");
   
     // Returned address should match backupnode RPC address
     conf.set(DFS_NAMENODE_BACKUP_ADDRESS_KEY,"localhost:1001");
@@ -300,14 +297,12 @@ public class TestGetConf {
     conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
     verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000");
     verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000");
-
-    // Returned namenode address should match the default service address
+  
+    // Returned address should match RPC address
     conf = new HdfsConfiguration();
     conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
-    verifyAddresses(conf, TestType.NAMENODE, false, "localhost:" +
-        DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
-    verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:" +
-        DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
+    verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1001");
+    verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1001");
   }
 
   /**
@@ -335,6 +330,23 @@ public class TestGetConf {
     verifyAddresses(conf, TestType.BACKUP, false, backupAddresses);
     verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
     verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
+  
+    // Test to ensure namenode, backup, secondary namenode addresses and 
+    // namenode rpc addresses are  returned from federation configuration. 
+    // Returned namenode addresses are based on regular RPC address
+    // in the absence of service RPC address.
+    conf = new HdfsConfiguration(false);
+    setupNameServices(conf, nsCount);
+    nnAddresses = setupAddress(conf,
+        DFS_NAMENODE_RPC_ADDRESS_KEY, nsCount, 1000);
+    backupAddresses = setupAddress(conf,
+        DFS_NAMENODE_BACKUP_ADDRESS_KEY, nsCount, 2000);
+    secondaryAddresses = setupAddress(conf,
+        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, nsCount, 3000);
+    verifyAddresses(conf, TestType.NAMENODE, false, nnAddresses);
+    verifyAddresses(conf, TestType.BACKUP, false, backupAddresses);
+    verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
+    verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
   }
   
   @Test(timeout=10000)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[4/4] hadoop git commit: Revert "HDFS-10391. Always enable NameNode service RPC port. Contributed by Gergely Novak."

Posted by ar...@apache.org.
Revert "HDFS-10391. Always enable NameNode service RPC port. Contributed by Gergely Novak."

This reverts commit b4dc2fa2474088e105d8ef0d4a87b1ff1fc2549a.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f6abb28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f6abb28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f6abb28

Branch: refs/heads/branch-3.0
Commit: 5f6abb28033090bc9aca4b5bb70eaaf4bf9cd5bd
Parents: ecad184
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Sep 14 11:17:23 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Sep 14 11:17:23 2017 -0700

----------------------------------------------------------------------
 .../hdfs/client/HdfsClientConfigKeys.java       |   1 -
 .../java/org/apache/hadoop/hdfs/DFSUtil.java    | 106 +++++++--------
 .../hdfs/server/datanode/BlockPoolManager.java  |   2 +-
 .../hadoop/hdfs/server/namenode/BackupNode.java |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   5 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  29 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java | 129 ++++++++++--------
 .../hdfs/server/namenode/SecondaryNameNode.java |   2 +-
 .../hdfs/server/namenode/ha/EditLogTailer.java  |   3 +-
 .../server/namenode/ha/RemoteNameNodeInfo.java  |   2 +-
 .../server/namenode/ha/StandbyCheckpointer.java |   2 +-
 .../org/apache/hadoop/hdfs/tools/GetConf.java   |   4 +-
 .../src/main/resources/hdfs-default.xml         |   3 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  66 ++-------
 .../apache/hadoop/hdfs/MiniDFSNNTopology.java   |  17 +--
 .../org/apache/hadoop/hdfs/TestDFSUtil.java     | 133 +++----------------
 .../apache/hadoop/hdfs/TestHDFSServerPorts.java |  85 ++++++------
 .../org/apache/hadoop/hdfs/TestSafeMode.java    |   2 +-
 .../hadoop/hdfs/qjournal/MiniQJMHACluster.java  |   9 +-
 .../balancer/TestBalancerWithHANameNodes.java   |   5 +-
 .../datanode/InternalDataNodeTestUtils.java     |   9 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   6 +-
 .../datanode/TestDataNodeMetricsLogger.java     |  10 +-
 .../TestDataNodeMultipleRegistrations.java      |   8 +-
 .../datanode/TestDataNodeReconfiguration.java   |  11 +-
 .../TestDatanodeProtocolRetryPolicy.java        |   6 +-
 .../server/datanode/TestRefreshNamenodes.java   |  17 +--
 .../hdfs/server/namenode/TestBackupNode.java    |   2 -
 .../hdfs/server/namenode/TestCheckpoint.java    |   6 +-
 .../server/namenode/TestNameNodeMXBean.java     |  11 +-
 .../namenode/TestNameNodeMetricsLogger.java     |   1 -
 .../TestValidateConfigurationSettings.java      |   2 -
 .../hdfs/server/namenode/ha/HATestUtil.java     |   1 +
 .../server/namenode/ha/TestEditLogTailer.java   |  20 +--
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java       |   7 +-
 .../hdfs/tools/TestDFSHAAdminMiniCluster.java   |   2 +-
 .../hdfs/tools/TestDFSZKFailoverController.java |   6 +-
 .../apache/hadoop/hdfs/tools/TestGetConf.java   |  40 ++++--
 38 files changed, 297 insertions(+), 475 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index d6efb5c..e99b099 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -74,7 +74,6 @@ public interface HdfsClientConfigKeys {
   String  DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
   String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
   int DFS_NAMENODE_RPC_PORT_DEFAULT = 9820;
-  int DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT = 9840;
   String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
       "dfs.namenode.kerberos.principal";
   String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 3c71e76..7776dc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -35,7 +35,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT;
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
@@ -493,25 +492,61 @@ public class DFSUtil {
   }
 
   /**
+   * Returns list of InetSocketAddresses corresponding to namenodes from the
+   * configuration.
+   * 
+   * Returns namenode address specifically configured for datanodes (using
+   * service ports), if found. If not, regular RPC address configured for other
+   * clients is returned.
+   * 
+   * @param conf configuration
+   * @return list of InetSocketAddress
+   * @throws IOException on error
+   */
+  public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses(
+      Configuration conf) throws IOException {
+    // Use default address as fall back
+    String defaultAddress;
+    try {
+      defaultAddress = NetUtils.getHostPortString(
+          DFSUtilClient.getNNAddress(conf));
+    } catch (IllegalArgumentException e) {
+      defaultAddress = null;
+    }
+    
+    Map<String, Map<String, InetSocketAddress>> addressList =
+      DFSUtilClient.getAddresses(conf, defaultAddress,
+                                 DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                                 DFS_NAMENODE_RPC_ADDRESS_KEY);
+    if (addressList.isEmpty()) {
+      throw new IOException("Incorrect configuration: namenode address "
+          + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "  
+          + DFS_NAMENODE_RPC_ADDRESS_KEY
+          + " is not configured.");
+    }
+    return addressList;
+  }
+
+  /**
    * Returns list of InetSocketAddresses corresponding to the namenode
    * that manages this cluster. Note this is to be used by datanodes to get
    * the list of namenode addresses to talk to.
    *
-   * Returns namenode address specifically configured for datanodes
+   * Returns namenode address specifically configured for datanodes (using
+   * service ports), if found. If not, regular RPC address configured for other
+   * clients is returned.
    *
    * @param conf configuration
    * @return list of InetSocketAddress
    * @throws IOException on error
    */
   public static Map<String, Map<String, InetSocketAddress>>
-      getNNServiceRpcAddresses(Configuration conf) throws IOException {
+    getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException {
     // Use default address as fall back
     String defaultAddress;
     try {
-      InetSocketAddress rpcAddress = DFSUtilClient.getNNAddress(conf);
-      InetSocketAddress serviceAddress = InetSocketAddress.createUnresolved(
-          rpcAddress.getHostName(), DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
-      defaultAddress = NetUtils.getHostPortString(serviceAddress);
+      defaultAddress = NetUtils.getHostPortString(
+          DFSUtilClient.getNNAddress(conf));
     } catch (IllegalArgumentException e) {
       defaultAddress = null;
     }
@@ -534,46 +569,16 @@ public class DFSUtil {
       }
     }
 
-    // If true, then replace the port numbers in the final address list
-    // with the default service RPC port.
-    boolean replacePortNumbers = false;
-
-    // First try to lookup using the service RPC address keys.
     Map<String, Map<String, InetSocketAddress>> addressList =
-            DFSUtilClient.getAddressesForNsIds(
-                conf, parentNameServices, null,
-                DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
-
-    // Next try to lookup using the RPC address key.
-    if (addressList.isEmpty()) {
-      replacePortNumbers = true;
-      addressList = DFSUtilClient.getAddressesForNsIds(
-          conf, parentNameServices, null, DFS_NAMENODE_RPC_ADDRESS_KEY);
-    }
-
-    // Finally, fallback to the default address.
-    // This will not yield the correct address in a federated/HA setup.
-    if (addressList.isEmpty()) {
-      addressList = DFSUtilClient.getAddressesForNsIds(
-          conf, parentNameServices, defaultAddress);
-    }
-
+            DFSUtilClient.getAddressesForNsIds(conf, parentNameServices,
+                                               defaultAddress,
+                                               DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                                               DFS_NAMENODE_RPC_ADDRESS_KEY);
     if (addressList.isEmpty()) {
       throw new IOException("Incorrect configuration: namenode address "
-          + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
-          + DFS_NAMENODE_RPC_ADDRESS_KEY
-          + " is not configured.");
-    }
-
-    if (replacePortNumbers) {
-      // Replace the RPC port(s) with the default service RPC port(s)
-      addressList.forEach((nsId, addresses) -> {
-        addresses.forEach((nnId, address) -> {
-          InetSocketAddress serviceAddress = InetSocketAddress.createUnresolved(
-              address.getHostName(), DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
-          addresses.put(nnId, serviceAddress);
-        });
-      });
+              + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
+              + DFS_NAMENODE_RPC_ADDRESS_KEY
+              + " is not configured.");
     }
     return addressList;
   }
@@ -1225,17 +1230,12 @@ public class DFSUtil {
     String serviceAddrKey = DFSUtilClient.concatSuffixes(
         DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId);
 
+    String addrKey = DFSUtilClient.concatSuffixes(
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId);
+
     String serviceRpcAddr = conf.get(serviceAddrKey);
     if (serviceRpcAddr == null) {
-      String addrKey = DFSUtilClient.concatSuffixes(
-          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId);
-      String rpcAddress = conf.get(addrKey);
-      if (rpcAddress != null) {
-        InetSocketAddress rpcAddr = NetUtils.createSocketAddr(rpcAddress);
-        InetSocketAddress serviceAddr = InetSocketAddress.createUnresolved(
-            rpcAddr.getHostName(), DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
-        serviceRpcAddr = NetUtils.getHostPortString(serviceAddr);
-      }
+      serviceRpcAddr = conf.get(addrKey);
     }
     return serviceRpcAddr;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
index 677559c..f6a11c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
@@ -150,7 +150,7 @@ class BlockPoolManager {
             (DFSConfigKeys.DFS_NAMESERVICES));
 
     Map<String, Map<String, InetSocketAddress>> newAddressMap = DFSUtil
-            .getNNServiceRpcAddresses(conf);
+            .getNNServiceRpcAddressesForCluster(conf);
     Map<String, Map<String, InetSocketAddress>> newLifelineAddressMap = DFSUtil
             .getNNLifelineRpcAddressesForCluster(conf);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
index 5c2dcdc..318d8e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
@@ -318,7 +318,7 @@ public class BackupNode extends NameNode {
 
   private NamespaceInfo handshake(Configuration conf) throws IOException {
     // connect to name node
-    InetSocketAddress nnAddress = NameNode.getServiceAddress(conf);
+    InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
     this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress,
         NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
         true).getProxy();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 55695b3..d9f3c0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1157,8 +1157,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       dir.setINodeAttributeProvider(inodeAttributeProvider);
     }
     snapshotManager.registerMXBean();
-    InetSocketAddress serviceAddress = NameNode.getServiceAddress(conf);
-    this.nameNodeHostName = serviceAddress.getHostName();
+    InetSocketAddress serviceAddress = NameNode.getServiceAddress(conf, true);
+    this.nameNodeHostName = (serviceAddress != null) ?
+        serviceAddress.getHostName() : "";
   }
   
   /** 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index d700439..79bbbc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -505,17 +505,18 @@ public class NameNode extends ReconfigurableBase implements
   
   /**
    * Fetches the address for services to use when connecting to namenode
+   * based on the value of fallback returns null if the special
+   * address is not specified or returns the default namenode address
+   * to be used by both clients and services.
    * Services here are datanodes, backup node, any non client connection
    */
-  public static InetSocketAddress getServiceAddress(Configuration conf) {
-    String address = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
-    if (address == null || address.isEmpty()) {
-      InetSocketAddress rpcAddress = DFSUtilClient.getNNAddress(conf);
-      return NetUtils.createSocketAddr(rpcAddress.getHostName(),
-          HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
+  public static InetSocketAddress getServiceAddress(Configuration conf,
+                                                        boolean fallback) {
+    String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
+    if (addr == null || addr.isEmpty()) {
+      return fallback ? DFSUtilClient.getNNAddress(conf) : null;
     }
-    return NetUtils.createSocketAddr(address,
-        HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
+    return DFSUtilClient.getNNAddress(addr);
   }
 
   //
@@ -553,7 +554,7 @@ public class NameNode extends ReconfigurableBase implements
    * If the service rpc is not configured returns null
    */
   protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) {
-    return NameNode.getServiceAddress(conf);
+    return NameNode.getServiceAddress(conf, false);
   }
 
   protected InetSocketAddress getRpcServerAddress(Configuration conf) {
@@ -614,8 +615,7 @@ public class NameNode extends ReconfigurableBase implements
   }
 
   /**
-   * Modifies the configuration passed to contain the service rpc address
-   * setting.
+   * Modifies the configuration passed to contain the service rpc address setting
    */
   protected void setRpcServiceServerAddress(Configuration conf,
       InetSocketAddress serviceRPCAddress) {
@@ -1071,13 +1071,6 @@ public class NameNode extends ReconfigurableBase implements
   }
 
   /**
-   * @return NameNode service RPC address in "host:port" string form
-   */
-  public String getServiceRpcAddressHostPortString() {
-    return NetUtils.getHostPortString(getServiceRpcAddress());
-  }
-
-  /**
    * @return NameNode HTTP address, used by the Web UI, image transfer,
    *    and HTTP-based file system clients like WebHDFS
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 78790bd..1ef3f55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -333,63 +333,66 @@ public class NameNodeRpcServer implements NamenodeProtocols {
         .newReflectiveBlockingService(traceAdminXlator);
 
     InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
-    String bindHost = nn.getServiceRpcServerBindHost(conf);
-    if (bindHost == null) {
-      bindHost = serviceRpcAddr.getHostName();
-    }
+    if (serviceRpcAddr != null) {
+      String bindHost = nn.getServiceRpcServerBindHost(conf);
+      if (bindHost == null) {
+        bindHost = serviceRpcAddr.getHostName();
+      }
+      LOG.info("Service RPC server is binding to " + bindHost + ":" +
+          serviceRpcAddr.getPort());
+
+      int serviceHandlerCount =
+        conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
+                    DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
+      serviceRpcServer = new RPC.Builder(conf)
+          .setProtocol(
+              org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
+          .setInstance(clientNNPbService)
+          .setBindAddress(bindHost)
+          .setPort(serviceRpcAddr.getPort())
+          .setNumHandlers(serviceHandlerCount)
+          .setVerbose(false)
+          .setSecretManager(namesystem.getDelegationTokenSecretManager())
+          .build();
 
-    LOG.info("Service RPC server is binding to " + bindHost + ":" +
-        serviceRpcAddr.getPort());
+      // Add all the RPC protocols that the namenode implements
+      DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
+          serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, ReconfigurationProtocolPB.class,
+          reconfigurationPbService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
+          serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService,
+          serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class,
+          refreshAuthService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, 
+          refreshUserMappingService, serviceRpcServer);
+      // We support Refreshing call queue here in case the client RPC queue is full
+      DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class,
+          refreshCallQueueService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class,
+          genericRefreshService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, 
+          getUserMappingService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, TraceAdminProtocolPB.class,
+          traceAdminService, serviceRpcServer);
 
-    int serviceHandlerCount = conf.getInt(
-        DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
-        DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
-    serviceRpcServer = new RPC.Builder(conf)
-        .setProtocol(
-            org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
-        .setInstance(clientNNPbService)
-        .setBindAddress(bindHost)
-        .setPort(serviceRpcAddr.getPort())
-        .setNumHandlers(serviceHandlerCount)
-        .setVerbose(false)
-        .setSecretManager(namesystem.getDelegationTokenSecretManager())
-        .build();
-
-    // Add all the RPC protocols that the namenode implements
-    DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
-        serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, ReconfigurationProtocolPB.class,
-        reconfigurationPbService, serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
-        serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService,
-        serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class,
-        refreshAuthService, serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class,
-        refreshUserMappingService, serviceRpcServer);
-    // We support Refreshing call queue here in case the client RPC queue
-    // is full.
-    DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class,
-        refreshCallQueueService, serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class,
-        genericRefreshService, serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class,
-        getUserMappingService, serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, TraceAdminProtocolPB.class,
-        traceAdminService, serviceRpcServer);
-
-    // Update the address with the correct port.
-    InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress();
-    serviceRPCAddress = new InetSocketAddress(
-          serviceRpcAddr.getHostName(), listenAddr.getPort());
-    nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
+      // Update the address with the correct port
+      InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress();
+      serviceRPCAddress = new InetSocketAddress(
+            serviceRpcAddr.getHostName(), listenAddr.getPort());
+      nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
+    } else {
+      serviceRpcServer = null;
+      serviceRPCAddress = null;
+    }
 
     InetSocketAddress lifelineRpcAddr = nn.getLifelineRpcServerAddress(conf);
     if (lifelineRpcAddr != null) {
       RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
           ProtobufRpcEngine.class);
-      bindHost = nn.getLifelineRpcServerBindHost(conf);
+      String bindHost = nn.getLifelineRpcServerBindHost(conf);
       if (bindHost == null) {
         bindHost = lifelineRpcAddr.getHostName();
       }
@@ -419,7 +422,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
           lifelineProtoPbService, lifelineRpcServer);
 
       // Update the address with the correct port
-      listenAddr = lifelineRpcServer.getListenerAddress();
+      InetSocketAddress listenAddr = lifelineRpcServer.getListenerAddress();
       lifelineRPCAddress = new InetSocketAddress(lifelineRpcAddr.getHostName(),
           listenAddr.getPort());
       nn.setRpcLifelineServerAddress(conf, lifelineRPCAddress);
@@ -429,7 +432,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     }
 
     InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf);
-    bindHost = nn.getRpcServerBindHost(conf);
+    String bindHost = nn.getRpcServerBindHost(conf);
     if (bindHost == null) {
       bindHost = rpcAddr.getHostName();
     }
@@ -473,14 +476,16 @@ public class NameNodeRpcServer implements NamenodeProtocols {
           conf.getBoolean(
             CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
       clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
-      serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
+      if (serviceRpcServer != null) {
+        serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
+      }
       if (lifelineRpcServer != null) {
         lifelineRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
       }
     }
 
     // The rpc-server port can be ephemeral... ensure we have the correct info
-    listenAddr = clientRpcServer.getListenerAddress();
+    InetSocketAddress listenAddr = clientRpcServer.getListenerAddress();
     clientRpcAddress = new InetSocketAddress(
         rpcAddr.getHostName(), listenAddr.getPort());
     nn.setRpcServerAddress(conf, clientRpcAddress);
@@ -518,7 +523,9 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     clientRpcServer.addSuppressedLoggingExceptions(StandbyException.class);
 
     clientRpcServer.setTracer(nn.tracer);
-    serviceRpcServer.setTracer(nn.tracer);
+    if (serviceRpcServer != null) {
+      serviceRpcServer.setTracer(nn.tracer);
+    }
     if (lifelineRpcServer != null) {
       lifelineRpcServer.setTracer(nn.tracer);
     }
@@ -547,7 +554,9 @@ public class NameNodeRpcServer implements NamenodeProtocols {
    */
   void start() {
     clientRpcServer.start();
-    serviceRpcServer.start();
+    if (serviceRpcServer != null) {
+      serviceRpcServer.start();      
+    }
     if (lifelineRpcServer != null) {
       lifelineRpcServer.start();
     }
@@ -558,7 +567,9 @@ public class NameNodeRpcServer implements NamenodeProtocols {
    */
   void join() throws InterruptedException {
     clientRpcServer.join();
-    serviceRpcServer.join();
+    if (serviceRpcServer != null) {
+      serviceRpcServer.join();      
+    }
     if (lifelineRpcServer != null) {
       lifelineRpcServer.join();
     }
@@ -571,7 +582,9 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     if (clientRpcServer != null) {
       clientRpcServer.stop();
     }
-    serviceRpcServer.stop();
+    if (serviceRpcServer != null) {
+      serviceRpcServer.stop();
+    }
     if (lifelineRpcServer != null) {
       lifelineRpcServer.stop();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index e8dfb72..ff83e34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -228,7 +228,7 @@ public class SecondaryNameNode implements Runnable,
 
     // Create connection to the namenode.
     shouldRun = true;
-    nameNodeAddr = NameNode.getServiceAddress(conf);
+    nameNodeAddr = NameNode.getServiceAddress(conf, true);
 
     this.conf = conf;
     this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr, 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index fd5a70e..f57cb4b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -159,8 +159,7 @@ public class EditLogTailer {
 
       for (RemoteNameNodeInfo info : nns) {
         // overwrite the socket address, if we need to
-        InetSocketAddress ipc = NameNode.getServiceAddress(
-            info.getConfiguration());
+        InetSocketAddress ipc = NameNode.getServiceAddress(info.getConfiguration(), true);
         // sanity check the ipc address
         Preconditions.checkArgument(ipc.getPort() > 0,
             "Active NameNode must have an IPC port configured. " + "Got address '%s'", ipc);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java
index 248be55..9a51190 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java
@@ -54,7 +54,7 @@ public class RemoteNameNodeInfo {
     for (Configuration otherNode : otherNodes) {
       String otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
       // don't do any validation here as in some cases, it can be overwritten later
-      InetSocketAddress otherIpcAddr = NameNode.getServiceAddress(otherNode);
+      InetSocketAddress otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
 
 
       final String scheme = DFSUtil.getHttpClientScheme(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
index 3cbcd9c..789ed9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
@@ -121,7 +121,7 @@ public class StandbyCheckpointer {
   
   private URL getHttpAddress(Configuration conf) throws IOException {
     final String scheme = DFSUtil.getHttpClientScheme(conf);
-    String defaultHost = NameNode.getServiceAddress(conf).getHostName();
+    String defaultHost = NameNode.getServiceAddress(conf, true).getHostName();
     URI addr = DFSUtil.getInfoServerWithDefaultHost(defaultHost, conf, scheme);
     return addr.toURL();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
index e780393..e6cf16c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
@@ -187,7 +187,7 @@ public class GetConf extends Configured implements Tool {
   static class NameNodesCommandHandler extends CommandHandler {
     @Override
     int doWorkInternal(GetConf tool, String []args) throws IOException {
-      tool.printMap(DFSUtil.getNNServiceRpcAddresses(tool.getConf()));
+      tool.printMap(DFSUtil.getNNServiceRpcAddressesForCluster(tool.getConf()));
       return 0;
     }
   }
@@ -224,7 +224,7 @@ public class GetConf extends Configured implements Tool {
     public int doWorkInternal(GetConf tool, String []args) throws IOException {
       Configuration config = tool.getConf();
       List<ConfiguredNNAddress> cnnlist = DFSUtil.flattenAddressMap(
-          DFSUtil.getNNServiceRpcAddresses(config));
+          DFSUtil.getNNServiceRpcAddressesForCluster(config));
       if (!cnnlist.isEmpty()) {
         for (ConfiguredNNAddress cnn : cnnlist) {
           InetSocketAddress rpc = cnn.getAddress();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index ba72923..2d94aac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -61,7 +61,8 @@
     connecting to this address if it is configured. In the case of HA/Federation where multiple namenodes exist,
     the name service id is added to the name e.g. dfs.namenode.servicerpc-address.ns1
     dfs.namenode.rpc-address.EXAMPLENAMESERVICE
-    The value of this property will take the form of nn-host1:rpc-port. The NameNode's default service RPC port is 9840.
+    The value of this property will take the form of nn-host1:rpc-port.
+    If the value of this property is unset the value of dfs.namenode.rpc-address will be used as the default.
   </description>
 </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index aa3ed30..0345cf5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -166,7 +166,6 @@ public class MiniDFSCluster implements AutoCloseable {
    */
   public static class Builder {
     private int nameNodePort = 0;
-    private int nameNodeServicePort = 0;
     private int nameNodeHttpPort = 0;
     private final Configuration conf;
     private int numDataNodes = 1;
@@ -209,14 +208,6 @@ public class MiniDFSCluster implements AutoCloseable {
       this.nameNodePort = val;
       return this;
     }
-
-    /**
-     * Default: 0
-     */
-    public Builder nameNodeServicePort(int val) {
-      this.nameNodeServicePort = val;
-      return this;
-    }
     
     /**
      * Default: 0
@@ -408,8 +399,8 @@ public class MiniDFSCluster implements AutoCloseable {
     }
 
     /**
-     * Default: false.
-     * When true the hosts file/include file for the cluster is setup.
+     * Default: false
+     * When true the hosts file/include file for the cluster is setup
      */
     public Builder setupHostsFile(boolean val) {
       this.setupHostsFile = val;
@@ -419,7 +410,7 @@ public class MiniDFSCluster implements AutoCloseable {
     /**
      * Default: a single namenode.
      * See {@link MiniDFSNNTopology#simpleFederatedTopology(int)} to set up
-     * federated nameservices.
+     * federated nameservices
      */
     public Builder nnTopology(MiniDFSNNTopology topology) {
       this.nnTopology = topology;
@@ -470,8 +461,7 @@ public class MiniDFSCluster implements AutoCloseable {
     if (builder.nnTopology == null) {
       // If no topology is specified, build a single NN. 
       builder.nnTopology = MiniDFSNNTopology.simpleSingleNN(
-          builder.nameNodePort, builder.nameNodeServicePort,
-          builder.nameNodeHttpPort);
+          builder.nameNodePort, builder.nameNodeHttpPort);
     }
     assert builder.storageTypes == null ||
            builder.storageTypes.length == builder.numDataNodes;
@@ -780,7 +770,7 @@ public class MiniDFSCluster implements AutoCloseable {
                        manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs,
                        operation, null, racks, hosts,
                        null, simulatedCapacities, null, true, false,
-                       MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0, 0),
+                       MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0),
                        true, false, false, null, true, false);
   }
 
@@ -1259,11 +1249,6 @@ public class MiniDFSCluster implements AutoCloseable {
         DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId,
         nnConf.getNnId());
     conf.set(key, "127.0.0.1:" + nnConf.getIpcPort());
-
-    key = DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId,
-        nnConf.getNnId());
-    conf.set(key, "127.0.0.1:" + nnConf.getServicePort());
   }
   
   private static String[] createArgs(StartupOption operation) {
@@ -1297,8 +1282,6 @@ public class MiniDFSCluster implements AutoCloseable {
     // the conf
     hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
         nameserviceId, nnId), nn.getNameNodeAddressHostPortString());
-    hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        nameserviceId, nnId), nn.getServiceRpcAddressHostPortString());
     if (nn.getHttpAddress() != null) {
       hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY,
           nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress()));
@@ -1354,14 +1337,6 @@ public class MiniDFSCluster implements AutoCloseable {
     return getNN(nnIndex).conf;
   }
 
-  /**
-   * Return the cluster-wide configuration.
-   * @return
-   */
-  public Configuration getClusterConfiguration() {
-    return conf;
-  }
-
   private NameNodeInfo getNN(int nnIndex) {
     int count = 0;
     for (NameNodeInfo nn : namenodes.values()) {
@@ -1955,16 +1930,6 @@ public class MiniDFSCluster implements AutoCloseable {
   }
 
   /**
-   * Gets the service rpc port used by the NameNode, because the caller
-   * supplied port is not necessarily the actual port used.
-   * Assumption: cluster has a single namenode
-   */
-  public int getNameNodeServicePort() {
-    checkSingleNameNode();
-    return getNameNodeServicePort(0);
-  }
-
-  /**
    * @return the service rpc port used by the NameNode at the given index.
    */     
   public int getNameNodeServicePort(int nnIndex) {
@@ -2591,14 +2556,12 @@ public class MiniDFSCluster implements AutoCloseable {
     }
 
     NameNodeInfo info = getNN(nnIndex);
-    InetSocketAddress nameNodeAddress = info.nameNode.getNameNodeAddress();
-    assert nameNodeAddress.getPort() != 0;
-    DFSClient client = new DFSClient(nameNodeAddress, conf);
+    InetSocketAddress addr = info.nameNode.getServiceRpcAddress();
+    assert addr.getPort() != 0;
+    DFSClient client = new DFSClient(addr, conf);
 
     // ensure all datanodes have registered and sent heartbeat to the namenode
-    InetSocketAddress serviceAddress = info.nameNode.getServiceRpcAddress();
-    while (shouldWait(client.datanodeReport(DatanodeReportType.LIVE),
-        serviceAddress)) {
+    while (shouldWait(client.datanodeReport(DatanodeReportType.LIVE), addr)) {
       try {
         LOG.info("Waiting for cluster to become active");
         Thread.sleep(100);
@@ -3093,18 +3056,13 @@ public class MiniDFSCluster implements AutoCloseable {
     }
   }
 
-  public void addNameNode(Configuration conf, int namenodePort)
-      throws IOException{
-    addNameNode(conf, namenodePort, 0);
-  }
-
   /**
    * Add a namenode to a federated cluster and start it. Configuration of
    * datanodes in the cluster is refreshed to register with the new namenode.
    * 
    * @return newly started namenode
    */
-  public void addNameNode(Configuration conf, int namenodePort, int servicePort)
+  public void addNameNode(Configuration conf, int namenodePort)
       throws IOException {
     if(!federation)
       throw new IOException("cannot add namenode to non-federated cluster");
@@ -3118,9 +3076,7 @@ public class MiniDFSCluster implements AutoCloseable {
   
     String nnId = null;
     initNameNodeAddress(conf, nameserviceId,
-        new NNConf(nnId)
-            .setIpcPort(namenodePort)
-            .setServicePort(servicePort));
+        new NNConf(nnId).setIpcPort(namenodePort));
     // figure out the current number of NNs
     NameNodeInfo[] infos = this.getNameNodeInfos(nameserviceId);
     int nnIndex = infos == null ? 0 : infos.length;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
index b1d609a..b9786a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
@@ -43,13 +43,12 @@ public class MiniDFSNNTopology {
    * Set up a simple non-federated non-HA NN.
    */
   public static MiniDFSNNTopology simpleSingleNN(
-      int rpcPort, int servicePort, int httpPort) {
+      int nameNodePort, int nameNodeHttpPort) {
     return new MiniDFSNNTopology()
       .addNameservice(new MiniDFSNNTopology.NSConf(null)
         .addNN(new MiniDFSNNTopology.NNConf(null)
-          .setIpcPort(rpcPort)
-          .setServicePort(servicePort)
-          .setHttpPort(httpPort)));
+          .setHttpPort(nameNodeHttpPort)
+          .setIpcPort(nameNodePort)));
   }
   
 
@@ -222,7 +221,6 @@ public class MiniDFSNNTopology {
     private final String nnId;
     private int httpPort;
     private int ipcPort;
-    private int servicePort;
     private String clusterId;
     
     public NNConf(String nnId) {
@@ -236,10 +234,6 @@ public class MiniDFSNNTopology {
     int getIpcPort() {
       return ipcPort;
     }
-
-    int getServicePort() {
-      return servicePort;
-    }
     
     int getHttpPort() {
       return httpPort;
@@ -259,11 +253,6 @@ public class MiniDFSNNTopology {
       return this;
     }
 
-    public NNConf setServicePort(int servicePort) {
-      this.servicePort = servicePort;
-      return this;
-    }
-
     public NNConf setClusterId(String clusterId) {
       this.clusterId = clusterId;
       return this;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 4ae2a77..f811d3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -33,7 +33,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT;
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
 import static org.hamcrest.CoreMatchers.not;
@@ -84,9 +83,9 @@ import com.google.common.collect.Sets;
 
 public class TestDFSUtil {
 
-  private static final String NS1_NN_ADDR    = "ns1-nn.example.com:9820";
-  private static final String NS1_NN1_ADDR   = "ns1-nn1.example.com:9820";
-  private static final String NS1_NN2_ADDR   = "ns1-nn2.example.com:9820";
+  static final String NS1_NN_ADDR    = "ns1-nn.example.com:9820";
+  static final String NS1_NN1_ADDR   = "ns1-nn1.example.com:9820";
+  static final String NS1_NN2_ADDR   = "ns1-nn2.example.com:9820";
 
   /**
    * Reset to default UGI settings since some tests change them.
@@ -274,13 +273,13 @@ public class TestDFSUtil {
     assertEquals(1, nn1Map.size());
     InetSocketAddress addr = nn1Map.get(null);
     assertEquals("localhost", addr.getHostName());
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, addr.getPort());
+    assertEquals(9000, addr.getPort());
     
     Map<String, InetSocketAddress> nn2Map = nnMap.get("nn2");
     assertEquals(1, nn2Map.size());
     addr = nn2Map.get(null);
     assertEquals("localhost", addr.getHostName());
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, addr.getPort());
+    assertEquals(9001, addr.getPort());
 
     // Test - can look up nameservice ID from service address
     checkNameServiceId(conf, NN1_ADDRESS, "nn1");
@@ -315,8 +314,7 @@ public class TestDFSUtil {
     Map<String, InetSocketAddress> defaultNsMap = addrMap.get(null);
     assertEquals(1, defaultNsMap.size());
     
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT,
-        defaultNsMap.get(null).getPort());
+    assertEquals(9999, defaultNsMap.get(null).getPort());
   }
   
   /**
@@ -493,10 +491,6 @@ public class TestDFSUtil {
     final String NS1_NN2_HOST = "ns1-nn2.example.com:9820";
     final String NS2_NN1_HOST = "ns2-nn1.example.com:9820";
     final String NS2_NN2_HOST = "ns2-nn2.example.com:9820";
-    final String NS1_NN1_SERVICE_HOST = "ns1-nn1.example.com:9840";
-    final String NS1_NN2_SERVICE_HOST = "ns1-nn2.example.com:9840";
-    final String NS2_NN1_SERVICE_HOST = "ns2-nn1.example.com:9840";
-    final String NS2_NN2_SERVICE_HOST = "ns2-nn2.example.com:9840";
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
     
     // Two nameservices, each with two NNs.
@@ -530,14 +524,12 @@ public class TestDFSUtil {
     assertEquals(NS2_NN1_HOST, map.get("ns2").get("ns2-nn1").toString());
     assertEquals(NS2_NN2_HOST, map.get("ns2").get("ns2-nn2").toString());
     
-    assertEquals(NS1_NN1_SERVICE_HOST,
+    assertEquals(NS1_NN1_HOST, 
         DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn1"));
-    assertEquals(NS1_NN2_SERVICE_HOST,
+    assertEquals(NS1_NN2_HOST, 
         DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn2"));
-    assertEquals(NS2_NN1_SERVICE_HOST,
+    assertEquals(NS2_NN1_HOST, 
         DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn1"));
-    assertEquals(NS2_NN2_SERVICE_HOST,
-        DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn2"));
 
     // No nameservice was given and we can't determine which service addr
     // to use as two nameservices could share a namenode ID.
@@ -563,11 +555,9 @@ public class TestDFSUtil {
     
     // One nameservice with two NNs
     final String NS1_NN1_HOST = "ns1-nn1.example.com:9820";
-    final String NS1_NN1_HOST_SVC = "ns1-nn1.example.com:9821";
-    final String NS1_NN1_HOST_DEFAULT_SVC = "ns1-nn1.example.com:9840";
-    final String NS1_NN2_HOST = "ns1-nn2.example.com:9820";
+    final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:9821";
+    final String NS1_NN2_HOST = "ns1-nn1.example.com:9820";
     final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:9821";
-    final String NS1_NN2_HOST_DEFAULT_SVC = "ns1-nn2.example.com:9840";
    
     conf.set(DFS_NAMESERVICES, "ns1");
     conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2"); 
@@ -577,15 +567,12 @@ public class TestDFSUtil {
     conf.set(DFSUtil.addKeySuffixes(
         DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST);
 
-    // The default service rpc address is used if no service address is defined
-    assertEquals(NS1_NN1_HOST_DEFAULT_SVC,
-        DFSUtil.getNamenodeServiceAddr(conf, null, "nn1"));
-    assertEquals(NS1_NN2_HOST_DEFAULT_SVC,
-        DFSUtil.getNamenodeServiceAddr(conf, null, "nn2"));
+    // The rpc address is used if no service address is defined
+    assertEquals(NS1_NN1_HOST, DFSUtil.getNamenodeServiceAddr(conf, null, "nn1"));
+    assertEquals(NS1_NN2_HOST, DFSUtil.getNamenodeServiceAddr(conf, null, "nn2"));
 
     // A nameservice is specified explicitly
-    assertEquals(NS1_NN1_HOST_DEFAULT_SVC,
-        DFSUtil.getNamenodeServiceAddr(conf, "ns1", "nn1"));
+    assertEquals(NS1_NN1_HOST, DFSUtil.getNamenodeServiceAddr(conf, "ns1", "nn1"));
     assertEquals(null, DFSUtil.getNamenodeServiceAddr(conf, "invalid", "nn1"));
     
     // The service addrs are used when they are defined
@@ -1009,92 +996,6 @@ public class TestDFSUtil {
   }
 
   @Test
-  public void testGetNNServiceRpcAddresses() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-    final String NN_HOST = "nn.example.com";
-    final String NN_ADDRESS = "hdfs://" + NN_HOST + ":9000/";
-    conf.set(FS_DEFAULT_NAME_KEY, NN_ADDRESS);
-
-    // No service RPC, no rpc
-    Map<String, Map<String, InetSocketAddress>> nsMap = DFSUtil
-        .getNNServiceRpcAddresses(conf);
-    assertEquals(1, nsMap.size());
-    InetSocketAddress address = nsMap.get(null).get(null);
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT,
-        address.getPort());
-    assertEquals(NN_HOST, address.getHostName());
-
-    // No service RPC
-    final String RPC_ADDRESS = NN_HOST + ":9191";
-    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, RPC_ADDRESS);
-    nsMap = DFSUtil.getNNServiceRpcAddresses(conf);
-    assertEquals(1, nsMap.size());
-    address = nsMap.get(null).get(null);
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT,
-        address.getPort());
-    assertEquals(NN_HOST, address.getHostName());
-
-    // Service RPC present
-    final String SERVICE_RPC_ADDRESS = NN_HOST + ":9292";
-    conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, SERVICE_RPC_ADDRESS);
-    nsMap = DFSUtil.getNNServiceRpcAddresses(conf);
-    assertEquals(1, nsMap.size());
-    address = nsMap.get(null).get(null);
-    assertEquals(9292, address.getPort());
-    assertEquals(NN_HOST, address.getHostName());
-  }
-
-  @Test
-  public void testGetNNServiceRpcAddressesForHA() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-
-    final String NS = "mycluster";
-    final String NN1_HOST = "nn1.example.com";
-    final String NN2_HOST = "nn2.example.com";
-    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://mycluster");
-
-    conf.set(DFS_NAMESERVICES, NS);
-    conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NS),
-        "nn1,nn2");
-    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, NS, "nn1"),
-        NN1_HOST + ":9820");
-    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, NS, "nn2"),
-        NN2_HOST + ":9820");
-
-    assertTrue(HAUtil.isHAEnabled(conf, NS));
-
-    // Without Service RPC keys
-    Map<String, Map<String, InetSocketAddress>> nsMap =
-        DFSUtil.getNNServiceRpcAddresses(conf);
-    assertEquals(1, nsMap.size());
-    Map<String, InetSocketAddress> nnMap = nsMap.get(NS);
-    assertEquals(2, nnMap.size());
-    InetSocketAddress nn1Address = nnMap.get("nn1");
-    assertEquals(NN1_HOST, nn1Address.getHostName());
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, nn1Address.getPort());
-    InetSocketAddress nn2Address = nnMap.get("nn2");
-    assertEquals(NN2_HOST, nn2Address.getHostName());
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, nn2Address.getPort());
-
-    // With Service RPC keys
-    final int CUSTOM_SERVICE_PORT = 9191;
-    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        NS, "nn1"), NN1_HOST + ":" + CUSTOM_SERVICE_PORT);
-    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        NS, "nn2"), NN2_HOST + ":" + CUSTOM_SERVICE_PORT);
-    nsMap = DFSUtil.getNNServiceRpcAddresses(conf);
-    assertEquals(1, nsMap.size());
-    nnMap = nsMap.get(NS);
-    assertEquals(2, nnMap.size());
-    nn1Address = nnMap.get("nn1");
-    assertEquals(NN1_HOST, nn1Address.getHostName());
-    assertEquals(CUSTOM_SERVICE_PORT, nn1Address.getPort());
-    nn2Address = nnMap.get("nn2");
-    assertEquals(NN2_HOST, nn2Address.getHostName());
-    assertEquals(CUSTOM_SERVICE_PORT, nn2Address.getPort());
-  }
-
-  @Test
   public void testGetNNServiceRpcAddressesForNsIds() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.set(DFS_NAMESERVICES, "nn1,nn2");
@@ -1116,13 +1017,13 @@ public class TestDFSUtil {
     }
 
     Map<String, Map<String, InetSocketAddress>> nnMap = DFSUtil
-            .getNNServiceRpcAddresses(conf);
+            .getNNServiceRpcAddressesForCluster(conf);
     assertEquals(1, nnMap.size());
     assertTrue(nnMap.containsKey("nn1"));
 
     conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "nn3");
     try {
-      DFSUtil.getNNServiceRpcAddresses(conf);
+      DFSUtil.getNNServiceRpcAddressesForCluster(conf);
       fail("Should fail for misconfiguration");
     } catch (IOException ignored) {
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
index 1914b78..59e8555 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Ignore;
 import org.junit.Test;
 
 import java.io.File;
@@ -278,14 +277,17 @@ public class TestHDFSServerPorts {
       // different http port
       conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
       started = canStartNameNode(conf2);
-      assertFalse("Should've failed on service port", started);
 
-      // reset conf2 since NameNode modifies it
-      FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
-      conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
-      // Set Service address
-      conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,  THIS_HOST);
-      started = canStartNameNode(conf2);
+      if (withService) {
+        assertFalse("Should've failed on service port", started);
+
+        // reset conf2 since NameNode modifies it
+        FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
+        // Set Service address      
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,  THIS_HOST);
+        started = canStartNameNode(conf2);        
+      }
       assertTrue(started);
     } finally {
       stopNameNode(nn);
@@ -357,39 +359,38 @@ public class TestHDFSServerPorts {
     }
   }
     
-  /**
-   * Verify BackupNode port usage.
-   */
-  @Ignore
-  @Test(timeout = 300000)
-  public void testBackupNodePorts() throws Exception {
-    NameNode nn = null;
-    try {
-      nn = startNameNode();
-
-      Configuration backup_config = new HdfsConfiguration(config);
-      backup_config.set(
-          DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
-      // bind http server to the same port as name-node
-      backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
-          backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
-
-      LOG.info("= Starting 1 on: " + backup_config.get(
-          DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
-
-      assertFalse("Backup started on same port as Namenode",
-                         canStartBackupNode(backup_config)); // should fail
-
-      // bind http server to a different port
-      backup_config.set(
-          DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST);
-      LOG.info("= Starting 2 on: " + backup_config.get(
-          DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
-
-      boolean started = canStartBackupNode(backup_config);
-      assertTrue("Backup Namenode should've started", started); // should start now
-    } finally {
-      stopNameNode(nn);
-    }
+    /**
+     * Verify BackupNode port usage.
+     */
+    @Test(timeout = 300000)
+    public void testBackupNodePorts() throws Exception {
+      NameNode nn = null;
+      try {
+        nn = startNameNode();
+
+        Configuration backup_config = new HdfsConfiguration(config);
+        backup_config.set(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
+        // bind http server to the same port as name-node
+        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, 
+            backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
+
+        LOG.info("= Starting 1 on: " + backup_config.get(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
+
+        assertFalse("Backup started on same port as Namenode", 
+                           canStartBackupNode(backup_config)); // should fail
+
+        // bind http server to a different port
+        backup_config.set(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST);
+        LOG.info("= Starting 2 on: " + backup_config.get(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
+
+        boolean started = canStartBackupNode(backup_config);
+        assertTrue("Backup Namenode should've started", started); // should start now
+      } finally {
+        stopNameNode(nn);
+      }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index df6dc03..f25d28f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -324,7 +324,7 @@ public class TestSafeMode {
     } catch (RemoteException re) {
       assertEquals(SafeModeException.class.getName(), re.getClassName());
       GenericTestUtils.assertExceptionContains(
-          NameNode.getServiceAddress(conf).getHostName(), re);
+          NameNode.getServiceAddress(conf, true).getHostName(), re);
     } catch (IOException ioe) {
       fail("Encountered exception" + " " + StringUtils.stringifyException(ioe));
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
index 501ba77..c163894 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
@@ -77,9 +77,7 @@ public class MiniQJMHACluster {
   public static MiniDFSNNTopology createDefaultTopology(int nns, int startingPort) {
     MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(NAMESERVICE);
     for (int i = 0; i < nns; i++) {
-      nameservice.addNN(new MiniDFSNNTopology.NNConf("nn" + i)
-          .setIpcPort(startingPort++)
-          .setServicePort(startingPort++)
+      nameservice.addNN(new MiniDFSNNTopology.NNConf("nn" + i).setIpcPort(startingPort++)
           .setHttpPort(startingPort++));
     }
 
@@ -150,9 +148,8 @@ public class MiniQJMHACluster {
     int port = basePort;
     for (int i = 0; i < numNNs; i++) {
       nns.add("127.0.0.1:" + port);
-      // increment by 3 each time to account for the http and the service port
-      // in the config setting
-      port += 3;
+      // increment by 2 each time to account for the http port in the config setting
+      port += 2;
     }
 
     // use standard failover configurations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
index 516f159..1444193 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
@@ -89,9 +89,8 @@ public class TestBalancerWithHANameNodes {
           / numOfDatanodes, (short) numOfDatanodes, 1);
 
       // start up an empty node with the same capacity and on the same rack
-      cluster.startDataNodes(cluster.getClusterConfiguration(),
-          1, true, null, new String[] {newNodeRack},
-          new long[] {newNodeCapacity});
+      cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack },
+          new long[] { newNodeCapacity });
       totalCapacity += newNodeCapacity;
       TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client,
           cluster);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
index c199c9c..876a854 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
@@ -105,11 +105,8 @@ public class InternalDataNodeTestUtils {
    *
    * @throws IOException
    */
-  public static DataNode startDNWithMockNN(
-      Configuration conf,
-      final InetSocketAddress nnSocketAddr,
-      final InetSocketAddress nnServiceAddr,
-      final String dnDataDir)
+  public static DataNode startDNWithMockNN(Configuration conf,
+      final InetSocketAddress nnSocketAddr, final String dnDataDir)
       throws IOException {
 
     FileSystem.setDefaultUri(conf, "hdfs://" + nnSocketAddr.getHostName() + ":"
@@ -152,7 +149,7 @@ public class InternalDataNodeTestUtils {
       @Override
       DatanodeProtocolClientSideTranslatorPB connectToNN(
           InetSocketAddress nnAddr) throws IOException {
-        Assert.assertEquals(nnServiceAddr, nnAddr);
+        Assert.assertEquals(nnSocketAddr, nnAddr);
         return namenode;
       }
     };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 98450f6..311d5a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -124,6 +124,8 @@ public class TestBlockRecovery {
   private final static long RECOVERY_ID = 3000L;
   private final static String CLUSTER_ID = "testClusterID";
   private final static String POOL_ID = "BP-TEST";
+  private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
+      "localhost", 5020);
   private final static long BLOCK_ID = 1000L;
   private final static long GEN_STAMP = 2000L;
   private final static long BLOCK_LEN = 3000L;
@@ -186,7 +188,7 @@ public class TestBlockRecovery {
     }
     conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
     FileSystem.setDefaultUri(conf,
-        "hdfs://localhost:5020");
+        "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
     ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
     File dataDir = new File(DATA_DIR);
     FileUtil.fullyDelete(dataDir);
@@ -229,7 +231,7 @@ public class TestBlockRecovery {
       @Override
       DatanodeProtocolClientSideTranslatorPB connectToNN(
           InetSocketAddress nnAddr) throws IOException {
-        Assert.assertEquals("localhost:9840", nnAddr.toString());
+        Assert.assertEquals(NN_ADDR, nnAddr);
         return namenode;
       }
     };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
index bee6c1d..32fda37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
@@ -61,16 +61,11 @@ import com.google.common.base.Supplier;
 public class TestDataNodeMetricsLogger {
   static final Log LOG = LogFactory.getLog(TestDataNodeMetricsLogger.class);
 
-  @Rule
-  public Timeout globalTimeout = new Timeout(120_000);
-
   private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory()
       + "data";
 
   private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
       "localhost", 5020);
-  private final static InetSocketAddress NN_SERVICE_ADDR =
-      new InetSocketAddress("localhost", 5021);
 
   private DataNode dn;
 
@@ -91,13 +86,10 @@ public class TestDataNodeMetricsLogger {
     conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
     conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
     conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        NN_SERVICE_ADDR.getHostName() + ":" + NN_SERVICE_ADDR.getPort());
     conf.setInt(DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY,
         enableMetricsLogging ? 1 : 0); // If enabled, log early and log often
 
-    dn = InternalDataNodeTestUtils.startDNWithMockNN(
-        conf, NN_ADDR, NN_SERVICE_ADDR, DATA_DIR);
+    dn = InternalDataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
index 25650fd..8e1e236 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
@@ -109,16 +109,16 @@ public class TestDataNodeMultipleRegistrations {
       BPOfferService bpos2 = dn.getAllBpOs().get(1);
 
       // The order of bpos is not guaranteed, so fix the order
-      if (getNNSocketAddress(bpos1).equals(nn2.getServiceRpcAddress())) {
+      if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) {
         BPOfferService tmp = bpos1;
         bpos1 = bpos2;
         bpos2 = tmp;
       }
 
       assertEquals("wrong nn address", getNNSocketAddress(bpos1),
-          nn1.getServiceRpcAddress());
+          nn1.getNameNodeAddress());
       assertEquals("wrong nn address", getNNSocketAddress(bpos2),
-          nn2.getServiceRpcAddress());
+          nn2.getNameNodeAddress());
       assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
       assertEquals("wrong bpid", bpos2.getBlockPoolId(), bpid2);
       assertEquals("wrong cid", dn.getClusterId(), cid1);
@@ -182,7 +182,7 @@ public class TestDataNodeMultipleRegistrations {
 
       assertEquals("wrong nn address",
           getNNSocketAddress(bpos1),
-          nn1.getServiceRpcAddress());
+          nn1.getNameNodeAddress());
       assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
       assertEquals("wrong cid", dn.getClusterId(), cid1);
       cluster.shutdown();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
index 884c93d..1dfd3c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
@@ -51,10 +51,8 @@ public class TestDataNodeReconfiguration {
   private static final Log LOG = LogFactory.getLog(TestBlockRecovery.class);
   private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory()
       + "data";
-  private final static InetSocketAddress NN_ADDR =
-      new InetSocketAddress("localhost", 5020);
-  private final static InetSocketAddress NN_SERVICE_ADDR =
-      new InetSocketAddress("localhost", 5021);
+  private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
+      "localhost", 5020);
   private final int NUM_NAME_NODE = 1;
   private final int NUM_DATA_NODE = 10;
   private MiniDFSCluster cluster;
@@ -101,13 +99,10 @@ public class TestDataNodeReconfiguration {
     conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
     conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
     conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        NN_SERVICE_ADDR.getHostName() + ":" + NN_SERVICE_ADDR.getPort());
 
     DataNode[] result = new DataNode[numDateNode];
     for (int i = 0; i < numDateNode; i++) {
-      result[i] = InternalDataNodeTestUtils.startDNWithMockNN(
-          conf, NN_ADDR, NN_SERVICE_ADDR, DATA_DIR);
+      result[i] = InternalDataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR);
     }
     return result;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
index 5218021..bb1d9ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
@@ -78,6 +78,8 @@ public class TestDatanodeProtocolRetryPolicy {
   ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
   private final static String CLUSTER_ID = "testClusterID";
   private final static String POOL_ID = "BP-TEST";
+  private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
+      "localhost", 5020);
   private static DatanodeRegistration datanodeRegistration =
       DFSTestUtil.getLocalDatanodeRegistration();
 
@@ -99,7 +101,7 @@ public class TestDatanodeProtocolRetryPolicy {
     conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
     conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
     FileSystem.setDefaultUri(conf,
-        "hdfs://localhost:5020");
+        "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
     File dataDir = new File(DATA_DIR);
     FileUtil.fullyDelete(dataDir);
     dataDir.mkdirs();
@@ -226,7 +228,7 @@ public class TestDatanodeProtocolRetryPolicy {
       @Override
       DatanodeProtocolClientSideTranslatorPB connectToNN(
           InetSocketAddress nnAddr) throws IOException {
-        Assert.assertEquals("localhost:9840", nnAddr.toString());
+        Assert.assertEquals(NN_ADDR, nnAddr);
         return namenode;
       }
     };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
index 37d1b57..f8594ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
@@ -44,11 +44,6 @@ public class TestRefreshNamenodes {
   private final int nnPort3 = 2227;
   private final int nnPort4 = 2230;
 
-  private final int nnServicePort1 = 2222;
-  private final int nnServicePort2 = 2225;
-  private final int nnServicePort3 = 2228;
-  private final int nnServicePort4 = 2231;
-
   @Test
   public void testRefreshNamenodes() throws IOException {
     // Start cluster with a single NN and DN
@@ -57,9 +52,7 @@ public class TestRefreshNamenodes {
     try {
       MiniDFSNNTopology topology = new MiniDFSNNTopology()
         .addNameservice(new NSConf("ns1").addNN(
-            new NNConf(null)
-                .setIpcPort(nnPort1)
-                .setServicePort(nnServicePort1)))
+            new NNConf(null).setIpcPort(nnPort1)))
         .setFederation(true);
       cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(topology)
@@ -68,20 +61,20 @@ public class TestRefreshNamenodes {
       DataNode dn = cluster.getDataNodes().get(0);
       assertEquals(1, dn.getAllBpOs().size());
 
-      cluster.addNameNode(conf, nnPort2, nnServicePort2);
+      cluster.addNameNode(conf, nnPort2);
       assertEquals(2, dn.getAllBpOs().size());
 
-      cluster.addNameNode(conf, nnPort3, nnServicePort3);
+      cluster.addNameNode(conf, nnPort3);
       assertEquals(3, dn.getAllBpOs().size());
 
-      cluster.addNameNode(conf, nnPort4, nnServicePort4);
+      cluster.addNameNode(conf, nnPort4);
 
       // Ensure a BPOfferService in the datanodes corresponds to
       // a namenode in the cluster
       Set<InetSocketAddress> nnAddrsFromCluster = Sets.newHashSet();
       for (int i = 0; i < 4; i++) {
         assertTrue(nnAddrsFromCluster.add(
-            cluster.getNameNode(i).getServiceRpcAddress()));
+            cluster.getNameNode(i).getNameNodeAddress()));
       }
       
       Set<InetSocketAddress> nnAddrsFromDN = Sets.newHashSet();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
index 5c58e0a..10d9f11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
 import com.google.common.base.Supplier;
@@ -62,7 +61,6 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 
-@Ignore("Temporarily disabling the BackupNode unit test.")
 public class TestBackupNode {
   public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index 4282c22..2e49674 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -1364,9 +1364,9 @@ public class TestCheckpoint {
       Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
       Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
       InetSocketAddress nn1RpcAddress = cluster.getNameNode(0)
-          .getServiceRpcAddress();
+          .getNameNodeAddress();
       InetSocketAddress nn2RpcAddress = cluster.getNameNode(1)
-          .getServiceRpcAddress();
+          .getNameNodeAddress();
       String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
       String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();
 
@@ -1923,7 +1923,6 @@ public class TestCheckpoint {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
           .format(true).build();
       int origPort = cluster.getNameNodePort();
-      int origServicePort = cluster.getNameNodeServicePort();
       int origHttpPort = cluster.getNameNode().getHttpAddress().getPort();
       Configuration snnConf = new Configuration(conf);
       File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(),
@@ -1950,7 +1949,6 @@ public class TestCheckpoint {
       cluster = new MiniDFSCluster.Builder(conf)
           .numDataNodes(0)
           .nameNodePort(origPort)
-          .nameNodeServicePort(origServicePort)
           .nameNodeHttpPort(origHttpPort)
           .format(true).build();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index d21b275..36638e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -661,15 +661,12 @@ public class TestNameNodeMXBean {
     for (int i = 0; i < 5; i++) {
       try{
         // Have to specify IPC ports so the NNs can talk to each other.
-        int[] ports = ServerSocketUtil.getPorts(4);
+        int[] ports = ServerSocketUtil.getPorts(2);
         MiniDFSNNTopology topology = new MiniDFSNNTopology()
             .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-                .addNN(new MiniDFSNNTopology.NNConf("nn1")
-                    .setIpcPort(ports[0])
-                    .setServicePort(ports[1]))
-                .addNN(new MiniDFSNNTopology.NNConf("nn2")
-                    .setIpcPort(ports[2])
-                    .setServicePort(ports[3])));
+                .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(ports[0]))
+                .addNN(
+                    new MiniDFSNNTopology.NNConf("nn2").setIpcPort(ports[1])));
 
         cluster = new MiniDFSCluster.Builder(conf)
             .nnTopology(topology).numDataNodes(0)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
index d7216c0..9a0e67c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
@@ -110,7 +110,6 @@ public class TestNameNodeMetricsLogger {
       throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:0");
-    conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "0.0.0.0:0");
     conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
     conf.setInt(DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_KEY,
         enableMetricsLogging ? 1 : 0);  // If enabled, log early and log often

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
index 981785a..0cf1fed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
@@ -125,8 +125,6 @@ public class TestValidateConfigurationSettings {
     // Set ephemeral ports 
     conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
         "127.0.0.1:0");
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        "127.0.0.1:0");
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
         "127.0.0.1:0");
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f6abb28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
index a367167..169bbee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/4] hadoop git commit: Revert "HDFS-10391. Always enable NameNode service RPC port. Contributed by Gergely Novak."

Posted by ar...@apache.org.
Revert "HDFS-10391. Always enable NameNode service RPC port. Contributed by Gergely Novak."

This reverts commit aa4b6fbe754ab7e3cf8ee106598d550f6e14783e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65a94100
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65a94100
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65a94100

Branch: refs/heads/trunk
Commit: 65a941008d4bbf906772399d3f035f2a0da5abfa
Parents: 66ca0a6
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Sep 14 11:17:08 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Sep 14 11:17:08 2017 -0700

----------------------------------------------------------------------
 .../hdfs/client/HdfsClientConfigKeys.java       |   1 -
 .../java/org/apache/hadoop/hdfs/DFSUtil.java    | 106 +++++++--------
 .../hdfs/server/datanode/BlockPoolManager.java  |   2 +-
 .../hadoop/hdfs/server/namenode/BackupNode.java |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   5 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  29 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java | 129 ++++++++++--------
 .../hdfs/server/namenode/SecondaryNameNode.java |   2 +-
 .../hdfs/server/namenode/ha/EditLogTailer.java  |   3 +-
 .../server/namenode/ha/RemoteNameNodeInfo.java  |   2 +-
 .../server/namenode/ha/StandbyCheckpointer.java |   2 +-
 .../org/apache/hadoop/hdfs/tools/GetConf.java   |   4 +-
 .../src/main/resources/hdfs-default.xml         |   3 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  66 ++-------
 .../apache/hadoop/hdfs/MiniDFSNNTopology.java   |  17 +--
 .../org/apache/hadoop/hdfs/TestDFSUtil.java     | 133 +++----------------
 .../apache/hadoop/hdfs/TestHDFSServerPorts.java |  85 ++++++------
 .../org/apache/hadoop/hdfs/TestSafeMode.java    |   2 +-
 .../hadoop/hdfs/qjournal/MiniQJMHACluster.java  |   9 +-
 .../balancer/TestBalancerWithHANameNodes.java   |   5 +-
 .../datanode/InternalDataNodeTestUtils.java     |   9 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   6 +-
 .../datanode/TestDataNodeMetricsLogger.java     |  10 +-
 .../TestDataNodeMultipleRegistrations.java      |   8 +-
 .../datanode/TestDataNodeReconfiguration.java   |  11 +-
 .../TestDatanodeProtocolRetryPolicy.java        |   6 +-
 .../server/datanode/TestRefreshNamenodes.java   |  17 +--
 .../hdfs/server/namenode/TestBackupNode.java    |   2 -
 .../hdfs/server/namenode/TestCheckpoint.java    |   6 +-
 .../server/namenode/TestNameNodeMXBean.java     |  11 +-
 .../namenode/TestNameNodeMetricsLogger.java     |   1 -
 .../TestValidateConfigurationSettings.java      |   2 -
 .../hdfs/server/namenode/ha/HATestUtil.java     |   1 +
 .../server/namenode/ha/TestEditLogTailer.java   |  20 +--
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java       |   7 +-
 .../hdfs/tools/TestDFSHAAdminMiniCluster.java   |   2 +-
 .../hdfs/tools/TestDFSZKFailoverController.java |   6 +-
 .../apache/hadoop/hdfs/tools/TestGetConf.java   |  40 ++++--
 38 files changed, 297 insertions(+), 475 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index d6efb5c..e99b099 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -74,7 +74,6 @@ public interface HdfsClientConfigKeys {
   String  DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
   String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
   int DFS_NAMENODE_RPC_PORT_DEFAULT = 9820;
-  int DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT = 9840;
   String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
       "dfs.namenode.kerberos.principal";
   String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 3c71e76..7776dc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -35,7 +35,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT;
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
@@ -493,25 +492,61 @@ public class DFSUtil {
   }
 
   /**
+   * Returns list of InetSocketAddresses corresponding to namenodes from the
+   * configuration.
+   * 
+   * Returns namenode address specifically configured for datanodes (using
+   * service ports), if found. If not, regular RPC address configured for other
+   * clients is returned.
+   * 
+   * @param conf configuration
+   * @return list of InetSocketAddress
+   * @throws IOException on error
+   */
+  public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses(
+      Configuration conf) throws IOException {
+    // Use default address as fall back
+    String defaultAddress;
+    try {
+      defaultAddress = NetUtils.getHostPortString(
+          DFSUtilClient.getNNAddress(conf));
+    } catch (IllegalArgumentException e) {
+      defaultAddress = null;
+    }
+    
+    Map<String, Map<String, InetSocketAddress>> addressList =
+      DFSUtilClient.getAddresses(conf, defaultAddress,
+                                 DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                                 DFS_NAMENODE_RPC_ADDRESS_KEY);
+    if (addressList.isEmpty()) {
+      throw new IOException("Incorrect configuration: namenode address "
+          + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "  
+          + DFS_NAMENODE_RPC_ADDRESS_KEY
+          + " is not configured.");
+    }
+    return addressList;
+  }
+
+  /**
    * Returns list of InetSocketAddresses corresponding to the namenode
    * that manages this cluster. Note this is to be used by datanodes to get
    * the list of namenode addresses to talk to.
    *
-   * Returns namenode address specifically configured for datanodes
+   * Returns namenode address specifically configured for datanodes (using
+   * service ports), if found. If not, regular RPC address configured for other
+   * clients is returned.
    *
    * @param conf configuration
    * @return list of InetSocketAddress
    * @throws IOException on error
    */
   public static Map<String, Map<String, InetSocketAddress>>
-      getNNServiceRpcAddresses(Configuration conf) throws IOException {
+    getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException {
     // Use default address as fall back
     String defaultAddress;
     try {
-      InetSocketAddress rpcAddress = DFSUtilClient.getNNAddress(conf);
-      InetSocketAddress serviceAddress = InetSocketAddress.createUnresolved(
-          rpcAddress.getHostName(), DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
-      defaultAddress = NetUtils.getHostPortString(serviceAddress);
+      defaultAddress = NetUtils.getHostPortString(
+          DFSUtilClient.getNNAddress(conf));
     } catch (IllegalArgumentException e) {
       defaultAddress = null;
     }
@@ -534,46 +569,16 @@ public class DFSUtil {
       }
     }
 
-    // If true, then replace the port numbers in the final address list
-    // with the default service RPC port.
-    boolean replacePortNumbers = false;
-
-    // First try to lookup using the service RPC address keys.
     Map<String, Map<String, InetSocketAddress>> addressList =
-            DFSUtilClient.getAddressesForNsIds(
-                conf, parentNameServices, null,
-                DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
-
-    // Next try to lookup using the RPC address key.
-    if (addressList.isEmpty()) {
-      replacePortNumbers = true;
-      addressList = DFSUtilClient.getAddressesForNsIds(
-          conf, parentNameServices, null, DFS_NAMENODE_RPC_ADDRESS_KEY);
-    }
-
-    // Finally, fallback to the default address.
-    // This will not yield the correct address in a federated/HA setup.
-    if (addressList.isEmpty()) {
-      addressList = DFSUtilClient.getAddressesForNsIds(
-          conf, parentNameServices, defaultAddress);
-    }
-
+            DFSUtilClient.getAddressesForNsIds(conf, parentNameServices,
+                                               defaultAddress,
+                                               DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                                               DFS_NAMENODE_RPC_ADDRESS_KEY);
     if (addressList.isEmpty()) {
       throw new IOException("Incorrect configuration: namenode address "
-          + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
-          + DFS_NAMENODE_RPC_ADDRESS_KEY
-          + " is not configured.");
-    }
-
-    if (replacePortNumbers) {
-      // Replace the RPC port(s) with the default service RPC port(s)
-      addressList.forEach((nsId, addresses) -> {
-        addresses.forEach((nnId, address) -> {
-          InetSocketAddress serviceAddress = InetSocketAddress.createUnresolved(
-              address.getHostName(), DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
-          addresses.put(nnId, serviceAddress);
-        });
-      });
+              + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
+              + DFS_NAMENODE_RPC_ADDRESS_KEY
+              + " is not configured.");
     }
     return addressList;
   }
@@ -1225,17 +1230,12 @@ public class DFSUtil {
     String serviceAddrKey = DFSUtilClient.concatSuffixes(
         DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId);
 
+    String addrKey = DFSUtilClient.concatSuffixes(
+        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId);
+
     String serviceRpcAddr = conf.get(serviceAddrKey);
     if (serviceRpcAddr == null) {
-      String addrKey = DFSUtilClient.concatSuffixes(
-          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId);
-      String rpcAddress = conf.get(addrKey);
-      if (rpcAddress != null) {
-        InetSocketAddress rpcAddr = NetUtils.createSocketAddr(rpcAddress);
-        InetSocketAddress serviceAddr = InetSocketAddress.createUnresolved(
-            rpcAddr.getHostName(), DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
-        serviceRpcAddr = NetUtils.getHostPortString(serviceAddr);
-      }
+      serviceRpcAddr = conf.get(addrKey);
     }
     return serviceRpcAddr;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
index 677559c..f6a11c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
@@ -150,7 +150,7 @@ class BlockPoolManager {
             (DFSConfigKeys.DFS_NAMESERVICES));
 
     Map<String, Map<String, InetSocketAddress>> newAddressMap = DFSUtil
-            .getNNServiceRpcAddresses(conf);
+            .getNNServiceRpcAddressesForCluster(conf);
     Map<String, Map<String, InetSocketAddress>> newLifelineAddressMap = DFSUtil
             .getNNLifelineRpcAddressesForCluster(conf);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
index 5c2dcdc..318d8e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
@@ -318,7 +318,7 @@ public class BackupNode extends NameNode {
 
   private NamespaceInfo handshake(Configuration conf) throws IOException {
     // connect to name node
-    InetSocketAddress nnAddress = NameNode.getServiceAddress(conf);
+    InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
     this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress,
         NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
         true).getProxy();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 55695b3..d9f3c0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1157,8 +1157,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       dir.setINodeAttributeProvider(inodeAttributeProvider);
     }
     snapshotManager.registerMXBean();
-    InetSocketAddress serviceAddress = NameNode.getServiceAddress(conf);
-    this.nameNodeHostName = serviceAddress.getHostName();
+    InetSocketAddress serviceAddress = NameNode.getServiceAddress(conf, true);
+    this.nameNodeHostName = (serviceAddress != null) ?
+        serviceAddress.getHostName() : "";
   }
   
   /** 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index d700439..79bbbc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -505,17 +505,18 @@ public class NameNode extends ReconfigurableBase implements
   
   /**
    * Fetches the address for services to use when connecting to namenode
+   * based on the value of fallback returns null if the special
+   * address is not specified or returns the default namenode address
+   * to be used by both clients and services.
    * Services here are datanodes, backup node, any non client connection
    */
-  public static InetSocketAddress getServiceAddress(Configuration conf) {
-    String address = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
-    if (address == null || address.isEmpty()) {
-      InetSocketAddress rpcAddress = DFSUtilClient.getNNAddress(conf);
-      return NetUtils.createSocketAddr(rpcAddress.getHostName(),
-          HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
+  public static InetSocketAddress getServiceAddress(Configuration conf,
+                                                        boolean fallback) {
+    String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
+    if (addr == null || addr.isEmpty()) {
+      return fallback ? DFSUtilClient.getNNAddress(conf) : null;
     }
-    return NetUtils.createSocketAddr(address,
-        HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT);
+    return DFSUtilClient.getNNAddress(addr);
   }
 
   //
@@ -553,7 +554,7 @@ public class NameNode extends ReconfigurableBase implements
    * If the service rpc is not configured returns null
    */
   protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) {
-    return NameNode.getServiceAddress(conf);
+    return NameNode.getServiceAddress(conf, false);
   }
 
   protected InetSocketAddress getRpcServerAddress(Configuration conf) {
@@ -614,8 +615,7 @@ public class NameNode extends ReconfigurableBase implements
   }
 
   /**
-   * Modifies the configuration passed to contain the service rpc address
-   * setting.
+   * Modifies the configuration passed to contain the service rpc address setting
    */
   protected void setRpcServiceServerAddress(Configuration conf,
       InetSocketAddress serviceRPCAddress) {
@@ -1071,13 +1071,6 @@ public class NameNode extends ReconfigurableBase implements
   }
 
   /**
-   * @return NameNode service RPC address in "host:port" string form
-   */
-  public String getServiceRpcAddressHostPortString() {
-    return NetUtils.getHostPortString(getServiceRpcAddress());
-  }
-
-  /**
    * @return NameNode HTTP address, used by the Web UI, image transfer,
    *    and HTTP-based file system clients like WebHDFS
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 78790bd..1ef3f55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -333,63 +333,66 @@ public class NameNodeRpcServer implements NamenodeProtocols {
         .newReflectiveBlockingService(traceAdminXlator);
 
     InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
-    String bindHost = nn.getServiceRpcServerBindHost(conf);
-    if (bindHost == null) {
-      bindHost = serviceRpcAddr.getHostName();
-    }
+    if (serviceRpcAddr != null) {
+      String bindHost = nn.getServiceRpcServerBindHost(conf);
+      if (bindHost == null) {
+        bindHost = serviceRpcAddr.getHostName();
+      }
+      LOG.info("Service RPC server is binding to " + bindHost + ":" +
+          serviceRpcAddr.getPort());
+
+      int serviceHandlerCount =
+        conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
+                    DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
+      serviceRpcServer = new RPC.Builder(conf)
+          .setProtocol(
+              org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
+          .setInstance(clientNNPbService)
+          .setBindAddress(bindHost)
+          .setPort(serviceRpcAddr.getPort())
+          .setNumHandlers(serviceHandlerCount)
+          .setVerbose(false)
+          .setSecretManager(namesystem.getDelegationTokenSecretManager())
+          .build();
 
-    LOG.info("Service RPC server is binding to " + bindHost + ":" +
-        serviceRpcAddr.getPort());
+      // Add all the RPC protocols that the namenode implements
+      DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
+          serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, ReconfigurationProtocolPB.class,
+          reconfigurationPbService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
+          serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService,
+          serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class,
+          refreshAuthService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, 
+          refreshUserMappingService, serviceRpcServer);
+      // We support Refreshing call queue here in case the client RPC queue is full
+      DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class,
+          refreshCallQueueService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class,
+          genericRefreshService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, 
+          getUserMappingService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, TraceAdminProtocolPB.class,
+          traceAdminService, serviceRpcServer);
 
-    int serviceHandlerCount = conf.getInt(
-        DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
-        DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
-    serviceRpcServer = new RPC.Builder(conf)
-        .setProtocol(
-            org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
-        .setInstance(clientNNPbService)
-        .setBindAddress(bindHost)
-        .setPort(serviceRpcAddr.getPort())
-        .setNumHandlers(serviceHandlerCount)
-        .setVerbose(false)
-        .setSecretManager(namesystem.getDelegationTokenSecretManager())
-        .build();
-
-    // Add all the RPC protocols that the namenode implements
-    DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
-        serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, ReconfigurationProtocolPB.class,
-        reconfigurationPbService, serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
-        serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService,
-        serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class,
-        refreshAuthService, serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class,
-        refreshUserMappingService, serviceRpcServer);
-    // We support Refreshing call queue here in case the client RPC queue
-    // is full.
-    DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class,
-        refreshCallQueueService, serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class,
-        genericRefreshService, serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class,
-        getUserMappingService, serviceRpcServer);
-    DFSUtil.addPBProtocol(conf, TraceAdminProtocolPB.class,
-        traceAdminService, serviceRpcServer);
-
-    // Update the address with the correct port.
-    InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress();
-    serviceRPCAddress = new InetSocketAddress(
-          serviceRpcAddr.getHostName(), listenAddr.getPort());
-    nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
+      // Update the address with the correct port
+      InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress();
+      serviceRPCAddress = new InetSocketAddress(
+            serviceRpcAddr.getHostName(), listenAddr.getPort());
+      nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
+    } else {
+      serviceRpcServer = null;
+      serviceRPCAddress = null;
+    }
 
     InetSocketAddress lifelineRpcAddr = nn.getLifelineRpcServerAddress(conf);
     if (lifelineRpcAddr != null) {
       RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
           ProtobufRpcEngine.class);
-      bindHost = nn.getLifelineRpcServerBindHost(conf);
+      String bindHost = nn.getLifelineRpcServerBindHost(conf);
       if (bindHost == null) {
         bindHost = lifelineRpcAddr.getHostName();
       }
@@ -419,7 +422,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
           lifelineProtoPbService, lifelineRpcServer);
 
       // Update the address with the correct port
-      listenAddr = lifelineRpcServer.getListenerAddress();
+      InetSocketAddress listenAddr = lifelineRpcServer.getListenerAddress();
       lifelineRPCAddress = new InetSocketAddress(lifelineRpcAddr.getHostName(),
           listenAddr.getPort());
       nn.setRpcLifelineServerAddress(conf, lifelineRPCAddress);
@@ -429,7 +432,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     }
 
     InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf);
-    bindHost = nn.getRpcServerBindHost(conf);
+    String bindHost = nn.getRpcServerBindHost(conf);
     if (bindHost == null) {
       bindHost = rpcAddr.getHostName();
     }
@@ -473,14 +476,16 @@ public class NameNodeRpcServer implements NamenodeProtocols {
           conf.getBoolean(
             CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
       clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
-      serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
+      if (serviceRpcServer != null) {
+        serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
+      }
       if (lifelineRpcServer != null) {
         lifelineRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
       }
     }
 
     // The rpc-server port can be ephemeral... ensure we have the correct info
-    listenAddr = clientRpcServer.getListenerAddress();
+    InetSocketAddress listenAddr = clientRpcServer.getListenerAddress();
     clientRpcAddress = new InetSocketAddress(
         rpcAddr.getHostName(), listenAddr.getPort());
     nn.setRpcServerAddress(conf, clientRpcAddress);
@@ -518,7 +523,9 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     clientRpcServer.addSuppressedLoggingExceptions(StandbyException.class);
 
     clientRpcServer.setTracer(nn.tracer);
-    serviceRpcServer.setTracer(nn.tracer);
+    if (serviceRpcServer != null) {
+      serviceRpcServer.setTracer(nn.tracer);
+    }
     if (lifelineRpcServer != null) {
       lifelineRpcServer.setTracer(nn.tracer);
     }
@@ -547,7 +554,9 @@ public class NameNodeRpcServer implements NamenodeProtocols {
    */
   void start() {
     clientRpcServer.start();
-    serviceRpcServer.start();
+    if (serviceRpcServer != null) {
+      serviceRpcServer.start();      
+    }
     if (lifelineRpcServer != null) {
       lifelineRpcServer.start();
     }
@@ -558,7 +567,9 @@ public class NameNodeRpcServer implements NamenodeProtocols {
    */
   void join() throws InterruptedException {
     clientRpcServer.join();
-    serviceRpcServer.join();
+    if (serviceRpcServer != null) {
+      serviceRpcServer.join();      
+    }
     if (lifelineRpcServer != null) {
       lifelineRpcServer.join();
     }
@@ -571,7 +582,9 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     if (clientRpcServer != null) {
       clientRpcServer.stop();
     }
-    serviceRpcServer.stop();
+    if (serviceRpcServer != null) {
+      serviceRpcServer.stop();
+    }
     if (lifelineRpcServer != null) {
       lifelineRpcServer.stop();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index e8dfb72..ff83e34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -228,7 +228,7 @@ public class SecondaryNameNode implements Runnable,
 
     // Create connection to the namenode.
     shouldRun = true;
-    nameNodeAddr = NameNode.getServiceAddress(conf);
+    nameNodeAddr = NameNode.getServiceAddress(conf, true);
 
     this.conf = conf;
     this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr, 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index fd5a70e..f57cb4b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -159,8 +159,7 @@ public class EditLogTailer {
 
       for (RemoteNameNodeInfo info : nns) {
         // overwrite the socket address, if we need to
-        InetSocketAddress ipc = NameNode.getServiceAddress(
-            info.getConfiguration());
+        InetSocketAddress ipc = NameNode.getServiceAddress(info.getConfiguration(), true);
         // sanity check the ipc address
         Preconditions.checkArgument(ipc.getPort() > 0,
             "Active NameNode must have an IPC port configured. " + "Got address '%s'", ipc);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java
index 248be55..9a51190 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java
@@ -54,7 +54,7 @@ public class RemoteNameNodeInfo {
     for (Configuration otherNode : otherNodes) {
       String otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
       // don't do any validation here as in some cases, it can be overwritten later
-      InetSocketAddress otherIpcAddr = NameNode.getServiceAddress(otherNode);
+      InetSocketAddress otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
 
 
       final String scheme = DFSUtil.getHttpClientScheme(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
index 3cbcd9c..789ed9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
@@ -121,7 +121,7 @@ public class StandbyCheckpointer {
   
   private URL getHttpAddress(Configuration conf) throws IOException {
     final String scheme = DFSUtil.getHttpClientScheme(conf);
-    String defaultHost = NameNode.getServiceAddress(conf).getHostName();
+    String defaultHost = NameNode.getServiceAddress(conf, true).getHostName();
     URI addr = DFSUtil.getInfoServerWithDefaultHost(defaultHost, conf, scheme);
     return addr.toURL();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
index e780393..e6cf16c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
@@ -187,7 +187,7 @@ public class GetConf extends Configured implements Tool {
   static class NameNodesCommandHandler extends CommandHandler {
     @Override
     int doWorkInternal(GetConf tool, String []args) throws IOException {
-      tool.printMap(DFSUtil.getNNServiceRpcAddresses(tool.getConf()));
+      tool.printMap(DFSUtil.getNNServiceRpcAddressesForCluster(tool.getConf()));
       return 0;
     }
   }
@@ -224,7 +224,7 @@ public class GetConf extends Configured implements Tool {
     public int doWorkInternal(GetConf tool, String []args) throws IOException {
       Configuration config = tool.getConf();
       List<ConfiguredNNAddress> cnnlist = DFSUtil.flattenAddressMap(
-          DFSUtil.getNNServiceRpcAddresses(config));
+          DFSUtil.getNNServiceRpcAddressesForCluster(config));
       if (!cnnlist.isEmpty()) {
         for (ConfiguredNNAddress cnn : cnnlist) {
           InetSocketAddress rpc = cnn.getAddress();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 88c102a..af40a34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -61,7 +61,8 @@
     connecting to this address if it is configured. In the case of HA/Federation where multiple namenodes exist,
     the name service id is added to the name e.g. dfs.namenode.servicerpc-address.ns1
     dfs.namenode.rpc-address.EXAMPLENAMESERVICE
-    The value of this property will take the form of nn-host1:rpc-port. The NameNode's default service RPC port is 9840.
+    The value of this property will take the form of nn-host1:rpc-port.
+    If the value of this property is unset the value of dfs.namenode.rpc-address will be used as the default.
   </description>
 </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index aa3ed30..0345cf5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -166,7 +166,6 @@ public class MiniDFSCluster implements AutoCloseable {
    */
   public static class Builder {
     private int nameNodePort = 0;
-    private int nameNodeServicePort = 0;
     private int nameNodeHttpPort = 0;
     private final Configuration conf;
     private int numDataNodes = 1;
@@ -209,14 +208,6 @@ public class MiniDFSCluster implements AutoCloseable {
       this.nameNodePort = val;
       return this;
     }
-
-    /**
-     * Default: 0
-     */
-    public Builder nameNodeServicePort(int val) {
-      this.nameNodeServicePort = val;
-      return this;
-    }
     
     /**
      * Default: 0
@@ -408,8 +399,8 @@ public class MiniDFSCluster implements AutoCloseable {
     }
 
     /**
-     * Default: false.
-     * When true the hosts file/include file for the cluster is setup.
+     * Default: false
+     * When true the hosts file/include file for the cluster is setup
      */
     public Builder setupHostsFile(boolean val) {
       this.setupHostsFile = val;
@@ -419,7 +410,7 @@ public class MiniDFSCluster implements AutoCloseable {
     /**
      * Default: a single namenode.
      * See {@link MiniDFSNNTopology#simpleFederatedTopology(int)} to set up
-     * federated nameservices.
+     * federated nameservices
      */
     public Builder nnTopology(MiniDFSNNTopology topology) {
       this.nnTopology = topology;
@@ -470,8 +461,7 @@ public class MiniDFSCluster implements AutoCloseable {
     if (builder.nnTopology == null) {
       // If no topology is specified, build a single NN. 
       builder.nnTopology = MiniDFSNNTopology.simpleSingleNN(
-          builder.nameNodePort, builder.nameNodeServicePort,
-          builder.nameNodeHttpPort);
+          builder.nameNodePort, builder.nameNodeHttpPort);
     }
     assert builder.storageTypes == null ||
            builder.storageTypes.length == builder.numDataNodes;
@@ -780,7 +770,7 @@ public class MiniDFSCluster implements AutoCloseable {
                        manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs,
                        operation, null, racks, hosts,
                        null, simulatedCapacities, null, true, false,
-                       MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0, 0),
+                       MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0),
                        true, false, false, null, true, false);
   }
 
@@ -1259,11 +1249,6 @@ public class MiniDFSCluster implements AutoCloseable {
         DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId,
         nnConf.getNnId());
     conf.set(key, "127.0.0.1:" + nnConf.getIpcPort());
-
-    key = DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId,
-        nnConf.getNnId());
-    conf.set(key, "127.0.0.1:" + nnConf.getServicePort());
   }
   
   private static String[] createArgs(StartupOption operation) {
@@ -1297,8 +1282,6 @@ public class MiniDFSCluster implements AutoCloseable {
     // the conf
     hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
         nameserviceId, nnId), nn.getNameNodeAddressHostPortString());
-    hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        nameserviceId, nnId), nn.getServiceRpcAddressHostPortString());
     if (nn.getHttpAddress() != null) {
       hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY,
           nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress()));
@@ -1354,14 +1337,6 @@ public class MiniDFSCluster implements AutoCloseable {
     return getNN(nnIndex).conf;
   }
 
-  /**
-   * Return the cluster-wide configuration.
-   * @return
-   */
-  public Configuration getClusterConfiguration() {
-    return conf;
-  }
-
   private NameNodeInfo getNN(int nnIndex) {
     int count = 0;
     for (NameNodeInfo nn : namenodes.values()) {
@@ -1955,16 +1930,6 @@ public class MiniDFSCluster implements AutoCloseable {
   }
 
   /**
-   * Gets the service rpc port used by the NameNode, because the caller
-   * supplied port is not necessarily the actual port used.
-   * Assumption: cluster has a single namenode
-   */
-  public int getNameNodeServicePort() {
-    checkSingleNameNode();
-    return getNameNodeServicePort(0);
-  }
-
-  /**
    * @return the service rpc port used by the NameNode at the given index.
    */     
   public int getNameNodeServicePort(int nnIndex) {
@@ -2591,14 +2556,12 @@ public class MiniDFSCluster implements AutoCloseable {
     }
 
     NameNodeInfo info = getNN(nnIndex);
-    InetSocketAddress nameNodeAddress = info.nameNode.getNameNodeAddress();
-    assert nameNodeAddress.getPort() != 0;
-    DFSClient client = new DFSClient(nameNodeAddress, conf);
+    InetSocketAddress addr = info.nameNode.getServiceRpcAddress();
+    assert addr.getPort() != 0;
+    DFSClient client = new DFSClient(addr, conf);
 
     // ensure all datanodes have registered and sent heartbeat to the namenode
-    InetSocketAddress serviceAddress = info.nameNode.getServiceRpcAddress();
-    while (shouldWait(client.datanodeReport(DatanodeReportType.LIVE),
-        serviceAddress)) {
+    while (shouldWait(client.datanodeReport(DatanodeReportType.LIVE), addr)) {
       try {
         LOG.info("Waiting for cluster to become active");
         Thread.sleep(100);
@@ -3093,18 +3056,13 @@ public class MiniDFSCluster implements AutoCloseable {
     }
   }
 
-  public void addNameNode(Configuration conf, int namenodePort)
-      throws IOException{
-    addNameNode(conf, namenodePort, 0);
-  }
-
   /**
    * Add a namenode to a federated cluster and start it. Configuration of
    * datanodes in the cluster is refreshed to register with the new namenode.
    * 
    * @return newly started namenode
    */
-  public void addNameNode(Configuration conf, int namenodePort, int servicePort)
+  public void addNameNode(Configuration conf, int namenodePort)
       throws IOException {
     if(!federation)
       throw new IOException("cannot add namenode to non-federated cluster");
@@ -3118,9 +3076,7 @@ public class MiniDFSCluster implements AutoCloseable {
   
     String nnId = null;
     initNameNodeAddress(conf, nameserviceId,
-        new NNConf(nnId)
-            .setIpcPort(namenodePort)
-            .setServicePort(servicePort));
+        new NNConf(nnId).setIpcPort(namenodePort));
     // figure out the current number of NNs
     NameNodeInfo[] infos = this.getNameNodeInfos(nameserviceId);
     int nnIndex = infos == null ? 0 : infos.length;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
index b1d609a..b9786a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
@@ -43,13 +43,12 @@ public class MiniDFSNNTopology {
    * Set up a simple non-federated non-HA NN.
    */
   public static MiniDFSNNTopology simpleSingleNN(
-      int rpcPort, int servicePort, int httpPort) {
+      int nameNodePort, int nameNodeHttpPort) {
     return new MiniDFSNNTopology()
       .addNameservice(new MiniDFSNNTopology.NSConf(null)
         .addNN(new MiniDFSNNTopology.NNConf(null)
-          .setIpcPort(rpcPort)
-          .setServicePort(servicePort)
-          .setHttpPort(httpPort)));
+          .setHttpPort(nameNodeHttpPort)
+          .setIpcPort(nameNodePort)));
   }
   
 
@@ -222,7 +221,6 @@ public class MiniDFSNNTopology {
     private final String nnId;
     private int httpPort;
     private int ipcPort;
-    private int servicePort;
     private String clusterId;
     
     public NNConf(String nnId) {
@@ -236,10 +234,6 @@ public class MiniDFSNNTopology {
     int getIpcPort() {
       return ipcPort;
     }
-
-    int getServicePort() {
-      return servicePort;
-    }
     
     int getHttpPort() {
       return httpPort;
@@ -259,11 +253,6 @@ public class MiniDFSNNTopology {
       return this;
     }
 
-    public NNConf setServicePort(int servicePort) {
-      this.servicePort = servicePort;
-      return this;
-    }
-
     public NNConf setClusterId(String clusterId) {
       this.clusterId = clusterId;
       return this;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 4ae2a77..f811d3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -33,7 +33,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
-import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT;
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
 import static org.hamcrest.CoreMatchers.not;
@@ -84,9 +83,9 @@ import com.google.common.collect.Sets;
 
 public class TestDFSUtil {
 
-  private static final String NS1_NN_ADDR    = "ns1-nn.example.com:9820";
-  private static final String NS1_NN1_ADDR   = "ns1-nn1.example.com:9820";
-  private static final String NS1_NN2_ADDR   = "ns1-nn2.example.com:9820";
+  static final String NS1_NN_ADDR    = "ns1-nn.example.com:9820";
+  static final String NS1_NN1_ADDR   = "ns1-nn1.example.com:9820";
+  static final String NS1_NN2_ADDR   = "ns1-nn2.example.com:9820";
 
   /**
    * Reset to default UGI settings since some tests change them.
@@ -274,13 +273,13 @@ public class TestDFSUtil {
     assertEquals(1, nn1Map.size());
     InetSocketAddress addr = nn1Map.get(null);
     assertEquals("localhost", addr.getHostName());
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, addr.getPort());
+    assertEquals(9000, addr.getPort());
     
     Map<String, InetSocketAddress> nn2Map = nnMap.get("nn2");
     assertEquals(1, nn2Map.size());
     addr = nn2Map.get(null);
     assertEquals("localhost", addr.getHostName());
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, addr.getPort());
+    assertEquals(9001, addr.getPort());
 
     // Test - can look up nameservice ID from service address
     checkNameServiceId(conf, NN1_ADDRESS, "nn1");
@@ -315,8 +314,7 @@ public class TestDFSUtil {
     Map<String, InetSocketAddress> defaultNsMap = addrMap.get(null);
     assertEquals(1, defaultNsMap.size());
     
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT,
-        defaultNsMap.get(null).getPort());
+    assertEquals(9999, defaultNsMap.get(null).getPort());
   }
   
   /**
@@ -493,10 +491,6 @@ public class TestDFSUtil {
     final String NS1_NN2_HOST = "ns1-nn2.example.com:9820";
     final String NS2_NN1_HOST = "ns2-nn1.example.com:9820";
     final String NS2_NN2_HOST = "ns2-nn2.example.com:9820";
-    final String NS1_NN1_SERVICE_HOST = "ns1-nn1.example.com:9840";
-    final String NS1_NN2_SERVICE_HOST = "ns1-nn2.example.com:9840";
-    final String NS2_NN1_SERVICE_HOST = "ns2-nn1.example.com:9840";
-    final String NS2_NN2_SERVICE_HOST = "ns2-nn2.example.com:9840";
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
     
     // Two nameservices, each with two NNs.
@@ -530,14 +524,12 @@ public class TestDFSUtil {
     assertEquals(NS2_NN1_HOST, map.get("ns2").get("ns2-nn1").toString());
     assertEquals(NS2_NN2_HOST, map.get("ns2").get("ns2-nn2").toString());
     
-    assertEquals(NS1_NN1_SERVICE_HOST,
+    assertEquals(NS1_NN1_HOST, 
         DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn1"));
-    assertEquals(NS1_NN2_SERVICE_HOST,
+    assertEquals(NS1_NN2_HOST, 
         DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn2"));
-    assertEquals(NS2_NN1_SERVICE_HOST,
+    assertEquals(NS2_NN1_HOST, 
         DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn1"));
-    assertEquals(NS2_NN2_SERVICE_HOST,
-        DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn2"));
 
     // No nameservice was given and we can't determine which service addr
     // to use as two nameservices could share a namenode ID.
@@ -563,11 +555,9 @@ public class TestDFSUtil {
     
     // One nameservice with two NNs
     final String NS1_NN1_HOST = "ns1-nn1.example.com:9820";
-    final String NS1_NN1_HOST_SVC = "ns1-nn1.example.com:9821";
-    final String NS1_NN1_HOST_DEFAULT_SVC = "ns1-nn1.example.com:9840";
-    final String NS1_NN2_HOST = "ns1-nn2.example.com:9820";
+    final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:9821";
+    final String NS1_NN2_HOST = "ns1-nn1.example.com:9820";
     final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:9821";
-    final String NS1_NN2_HOST_DEFAULT_SVC = "ns1-nn2.example.com:9840";
    
     conf.set(DFS_NAMESERVICES, "ns1");
     conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2"); 
@@ -577,15 +567,12 @@ public class TestDFSUtil {
     conf.set(DFSUtil.addKeySuffixes(
         DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST);
 
-    // The default service rpc address is used if no service address is defined
-    assertEquals(NS1_NN1_HOST_DEFAULT_SVC,
-        DFSUtil.getNamenodeServiceAddr(conf, null, "nn1"));
-    assertEquals(NS1_NN2_HOST_DEFAULT_SVC,
-        DFSUtil.getNamenodeServiceAddr(conf, null, "nn2"));
+    // The rpc address is used if no service address is defined
+    assertEquals(NS1_NN1_HOST, DFSUtil.getNamenodeServiceAddr(conf, null, "nn1"));
+    assertEquals(NS1_NN2_HOST, DFSUtil.getNamenodeServiceAddr(conf, null, "nn2"));
 
     // A nameservice is specified explicitly
-    assertEquals(NS1_NN1_HOST_DEFAULT_SVC,
-        DFSUtil.getNamenodeServiceAddr(conf, "ns1", "nn1"));
+    assertEquals(NS1_NN1_HOST, DFSUtil.getNamenodeServiceAddr(conf, "ns1", "nn1"));
     assertEquals(null, DFSUtil.getNamenodeServiceAddr(conf, "invalid", "nn1"));
     
     // The service addrs are used when they are defined
@@ -1009,92 +996,6 @@ public class TestDFSUtil {
   }
 
   @Test
-  public void testGetNNServiceRpcAddresses() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-    final String NN_HOST = "nn.example.com";
-    final String NN_ADDRESS = "hdfs://" + NN_HOST + ":9000/";
-    conf.set(FS_DEFAULT_NAME_KEY, NN_ADDRESS);
-
-    // No service RPC, no rpc
-    Map<String, Map<String, InetSocketAddress>> nsMap = DFSUtil
-        .getNNServiceRpcAddresses(conf);
-    assertEquals(1, nsMap.size());
-    InetSocketAddress address = nsMap.get(null).get(null);
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT,
-        address.getPort());
-    assertEquals(NN_HOST, address.getHostName());
-
-    // No service RPC
-    final String RPC_ADDRESS = NN_HOST + ":9191";
-    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, RPC_ADDRESS);
-    nsMap = DFSUtil.getNNServiceRpcAddresses(conf);
-    assertEquals(1, nsMap.size());
-    address = nsMap.get(null).get(null);
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT,
-        address.getPort());
-    assertEquals(NN_HOST, address.getHostName());
-
-    // Service RPC present
-    final String SERVICE_RPC_ADDRESS = NN_HOST + ":9292";
-    conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, SERVICE_RPC_ADDRESS);
-    nsMap = DFSUtil.getNNServiceRpcAddresses(conf);
-    assertEquals(1, nsMap.size());
-    address = nsMap.get(null).get(null);
-    assertEquals(9292, address.getPort());
-    assertEquals(NN_HOST, address.getHostName());
-  }
-
-  @Test
-  public void testGetNNServiceRpcAddressesForHA() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-
-    final String NS = "mycluster";
-    final String NN1_HOST = "nn1.example.com";
-    final String NN2_HOST = "nn2.example.com";
-    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://mycluster");
-
-    conf.set(DFS_NAMESERVICES, NS);
-    conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NS),
-        "nn1,nn2");
-    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, NS, "nn1"),
-        NN1_HOST + ":9820");
-    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, NS, "nn2"),
-        NN2_HOST + ":9820");
-
-    assertTrue(HAUtil.isHAEnabled(conf, NS));
-
-    // Without Service RPC keys
-    Map<String, Map<String, InetSocketAddress>> nsMap =
-        DFSUtil.getNNServiceRpcAddresses(conf);
-    assertEquals(1, nsMap.size());
-    Map<String, InetSocketAddress> nnMap = nsMap.get(NS);
-    assertEquals(2, nnMap.size());
-    InetSocketAddress nn1Address = nnMap.get("nn1");
-    assertEquals(NN1_HOST, nn1Address.getHostName());
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, nn1Address.getPort());
-    InetSocketAddress nn2Address = nnMap.get("nn2");
-    assertEquals(NN2_HOST, nn2Address.getHostName());
-    assertEquals(DFS_NAMENODE_SERVICE_RPC_PORT_DEFAULT, nn2Address.getPort());
-
-    // With Service RPC keys
-    final int CUSTOM_SERVICE_PORT = 9191;
-    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        NS, "nn1"), NN1_HOST + ":" + CUSTOM_SERVICE_PORT);
-    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        NS, "nn2"), NN2_HOST + ":" + CUSTOM_SERVICE_PORT);
-    nsMap = DFSUtil.getNNServiceRpcAddresses(conf);
-    assertEquals(1, nsMap.size());
-    nnMap = nsMap.get(NS);
-    assertEquals(2, nnMap.size());
-    nn1Address = nnMap.get("nn1");
-    assertEquals(NN1_HOST, nn1Address.getHostName());
-    assertEquals(CUSTOM_SERVICE_PORT, nn1Address.getPort());
-    nn2Address = nnMap.get("nn2");
-    assertEquals(NN2_HOST, nn2Address.getHostName());
-    assertEquals(CUSTOM_SERVICE_PORT, nn2Address.getPort());
-  }
-
-  @Test
   public void testGetNNServiceRpcAddressesForNsIds() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.set(DFS_NAMESERVICES, "nn1,nn2");
@@ -1116,13 +1017,13 @@ public class TestDFSUtil {
     }
 
     Map<String, Map<String, InetSocketAddress>> nnMap = DFSUtil
-            .getNNServiceRpcAddresses(conf);
+            .getNNServiceRpcAddressesForCluster(conf);
     assertEquals(1, nnMap.size());
     assertTrue(nnMap.containsKey("nn1"));
 
     conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "nn3");
     try {
-      DFSUtil.getNNServiceRpcAddresses(conf);
+      DFSUtil.getNNServiceRpcAddressesForCluster(conf);
       fail("Should fail for misconfiguration");
     } catch (IOException ignored) {
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
index 1914b78..59e8555 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Ignore;
 import org.junit.Test;
 
 import java.io.File;
@@ -278,14 +277,17 @@ public class TestHDFSServerPorts {
       // different http port
       conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
       started = canStartNameNode(conf2);
-      assertFalse("Should've failed on service port", started);
 
-      // reset conf2 since NameNode modifies it
-      FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
-      conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
-      // Set Service address
-      conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,  THIS_HOST);
-      started = canStartNameNode(conf2);
+      if (withService) {
+        assertFalse("Should've failed on service port", started);
+
+        // reset conf2 since NameNode modifies it
+        FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
+        // Set Service address      
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,  THIS_HOST);
+        started = canStartNameNode(conf2);        
+      }
       assertTrue(started);
     } finally {
       stopNameNode(nn);
@@ -357,39 +359,38 @@ public class TestHDFSServerPorts {
     }
   }
     
-  /**
-   * Verify BackupNode port usage.
-   */
-  @Ignore
-  @Test(timeout = 300000)
-  public void testBackupNodePorts() throws Exception {
-    NameNode nn = null;
-    try {
-      nn = startNameNode();
-
-      Configuration backup_config = new HdfsConfiguration(config);
-      backup_config.set(
-          DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
-      // bind http server to the same port as name-node
-      backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
-          backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
-
-      LOG.info("= Starting 1 on: " + backup_config.get(
-          DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
-
-      assertFalse("Backup started on same port as Namenode",
-                         canStartBackupNode(backup_config)); // should fail
-
-      // bind http server to a different port
-      backup_config.set(
-          DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST);
-      LOG.info("= Starting 2 on: " + backup_config.get(
-          DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
-
-      boolean started = canStartBackupNode(backup_config);
-      assertTrue("Backup Namenode should've started", started); // should start now
-    } finally {
-      stopNameNode(nn);
-    }
+    /**
+     * Verify BackupNode port usage.
+     */
+    @Test(timeout = 300000)
+    public void testBackupNodePorts() throws Exception {
+      NameNode nn = null;
+      try {
+        nn = startNameNode();
+
+        Configuration backup_config = new HdfsConfiguration(config);
+        backup_config.set(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
+        // bind http server to the same port as name-node
+        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, 
+            backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
+
+        LOG.info("= Starting 1 on: " + backup_config.get(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
+
+        assertFalse("Backup started on same port as Namenode", 
+                           canStartBackupNode(backup_config)); // should fail
+
+        // bind http server to a different port
+        backup_config.set(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST);
+        LOG.info("= Starting 2 on: " + backup_config.get(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
+
+        boolean started = canStartBackupNode(backup_config);
+        assertTrue("Backup Namenode should've started", started); // should start now
+      } finally {
+        stopNameNode(nn);
+      }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index df6dc03..f25d28f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -324,7 +324,7 @@ public class TestSafeMode {
     } catch (RemoteException re) {
       assertEquals(SafeModeException.class.getName(), re.getClassName());
       GenericTestUtils.assertExceptionContains(
-          NameNode.getServiceAddress(conf).getHostName(), re);
+          NameNode.getServiceAddress(conf, true).getHostName(), re);
     } catch (IOException ioe) {
       fail("Encountered exception" + " " + StringUtils.stringifyException(ioe));
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
index 501ba77..c163894 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
@@ -77,9 +77,7 @@ public class MiniQJMHACluster {
   public static MiniDFSNNTopology createDefaultTopology(int nns, int startingPort) {
     MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(NAMESERVICE);
     for (int i = 0; i < nns; i++) {
-      nameservice.addNN(new MiniDFSNNTopology.NNConf("nn" + i)
-          .setIpcPort(startingPort++)
-          .setServicePort(startingPort++)
+      nameservice.addNN(new MiniDFSNNTopology.NNConf("nn" + i).setIpcPort(startingPort++)
           .setHttpPort(startingPort++));
     }
 
@@ -150,9 +148,8 @@ public class MiniQJMHACluster {
     int port = basePort;
     for (int i = 0; i < numNNs; i++) {
       nns.add("127.0.0.1:" + port);
-      // increment by 3 each time to account for the http and the service port
-      // in the config setting
-      port += 3;
+      // increment by 2 each time to account for the http port in the config setting
+      port += 2;
     }
 
     // use standard failover configurations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
index 516f159..1444193 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
@@ -89,9 +89,8 @@ public class TestBalancerWithHANameNodes {
           / numOfDatanodes, (short) numOfDatanodes, 1);
 
       // start up an empty node with the same capacity and on the same rack
-      cluster.startDataNodes(cluster.getClusterConfiguration(),
-          1, true, null, new String[] {newNodeRack},
-          new long[] {newNodeCapacity});
+      cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack },
+          new long[] { newNodeCapacity });
       totalCapacity += newNodeCapacity;
       TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client,
           cluster);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
index c199c9c..876a854 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
@@ -105,11 +105,8 @@ public class InternalDataNodeTestUtils {
    *
    * @throws IOException
    */
-  public static DataNode startDNWithMockNN(
-      Configuration conf,
-      final InetSocketAddress nnSocketAddr,
-      final InetSocketAddress nnServiceAddr,
-      final String dnDataDir)
+  public static DataNode startDNWithMockNN(Configuration conf,
+      final InetSocketAddress nnSocketAddr, final String dnDataDir)
       throws IOException {
 
     FileSystem.setDefaultUri(conf, "hdfs://" + nnSocketAddr.getHostName() + ":"
@@ -152,7 +149,7 @@ public class InternalDataNodeTestUtils {
       @Override
       DatanodeProtocolClientSideTranslatorPB connectToNN(
           InetSocketAddress nnAddr) throws IOException {
-        Assert.assertEquals(nnServiceAddr, nnAddr);
+        Assert.assertEquals(nnSocketAddr, nnAddr);
         return namenode;
       }
     };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 98450f6..311d5a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -124,6 +124,8 @@ public class TestBlockRecovery {
   private final static long RECOVERY_ID = 3000L;
   private final static String CLUSTER_ID = "testClusterID";
   private final static String POOL_ID = "BP-TEST";
+  private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
+      "localhost", 5020);
   private final static long BLOCK_ID = 1000L;
   private final static long GEN_STAMP = 2000L;
   private final static long BLOCK_LEN = 3000L;
@@ -186,7 +188,7 @@ public class TestBlockRecovery {
     }
     conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
     FileSystem.setDefaultUri(conf,
-        "hdfs://localhost:5020");
+        "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
     ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
     File dataDir = new File(DATA_DIR);
     FileUtil.fullyDelete(dataDir);
@@ -229,7 +231,7 @@ public class TestBlockRecovery {
       @Override
       DatanodeProtocolClientSideTranslatorPB connectToNN(
           InetSocketAddress nnAddr) throws IOException {
-        Assert.assertEquals("localhost:9840", nnAddr.toString());
+        Assert.assertEquals(NN_ADDR, nnAddr);
         return namenode;
       }
     };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
index bee6c1d..32fda37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
@@ -61,16 +61,11 @@ import com.google.common.base.Supplier;
 public class TestDataNodeMetricsLogger {
   static final Log LOG = LogFactory.getLog(TestDataNodeMetricsLogger.class);
 
-  @Rule
-  public Timeout globalTimeout = new Timeout(120_000);
-
   private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory()
       + "data";
 
   private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
       "localhost", 5020);
-  private final static InetSocketAddress NN_SERVICE_ADDR =
-      new InetSocketAddress("localhost", 5021);
 
   private DataNode dn;
 
@@ -91,13 +86,10 @@ public class TestDataNodeMetricsLogger {
     conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
     conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
     conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        NN_SERVICE_ADDR.getHostName() + ":" + NN_SERVICE_ADDR.getPort());
     conf.setInt(DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY,
         enableMetricsLogging ? 1 : 0); // If enabled, log early and log often
 
-    dn = InternalDataNodeTestUtils.startDNWithMockNN(
-        conf, NN_ADDR, NN_SERVICE_ADDR, DATA_DIR);
+    dn = InternalDataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
index 25650fd..8e1e236 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
@@ -109,16 +109,16 @@ public class TestDataNodeMultipleRegistrations {
       BPOfferService bpos2 = dn.getAllBpOs().get(1);
 
       // The order of bpos is not guaranteed, so fix the order
-      if (getNNSocketAddress(bpos1).equals(nn2.getServiceRpcAddress())) {
+      if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) {
         BPOfferService tmp = bpos1;
         bpos1 = bpos2;
         bpos2 = tmp;
       }
 
       assertEquals("wrong nn address", getNNSocketAddress(bpos1),
-          nn1.getServiceRpcAddress());
+          nn1.getNameNodeAddress());
       assertEquals("wrong nn address", getNNSocketAddress(bpos2),
-          nn2.getServiceRpcAddress());
+          nn2.getNameNodeAddress());
       assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
       assertEquals("wrong bpid", bpos2.getBlockPoolId(), bpid2);
       assertEquals("wrong cid", dn.getClusterId(), cid1);
@@ -182,7 +182,7 @@ public class TestDataNodeMultipleRegistrations {
 
       assertEquals("wrong nn address",
           getNNSocketAddress(bpos1),
-          nn1.getServiceRpcAddress());
+          nn1.getNameNodeAddress());
       assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
       assertEquals("wrong cid", dn.getClusterId(), cid1);
       cluster.shutdown();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
index 884c93d..1dfd3c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java
@@ -51,10 +51,8 @@ public class TestDataNodeReconfiguration {
   private static final Log LOG = LogFactory.getLog(TestBlockRecovery.class);
   private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory()
       + "data";
-  private final static InetSocketAddress NN_ADDR =
-      new InetSocketAddress("localhost", 5020);
-  private final static InetSocketAddress NN_SERVICE_ADDR =
-      new InetSocketAddress("localhost", 5021);
+  private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
+      "localhost", 5020);
   private final int NUM_NAME_NODE = 1;
   private final int NUM_DATA_NODE = 10;
   private MiniDFSCluster cluster;
@@ -101,13 +99,10 @@ public class TestDataNodeReconfiguration {
     conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
     conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
     conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        NN_SERVICE_ADDR.getHostName() + ":" + NN_SERVICE_ADDR.getPort());
 
     DataNode[] result = new DataNode[numDateNode];
     for (int i = 0; i < numDateNode; i++) {
-      result[i] = InternalDataNodeTestUtils.startDNWithMockNN(
-          conf, NN_ADDR, NN_SERVICE_ADDR, DATA_DIR);
+      result[i] = InternalDataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR);
     }
     return result;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
index 5218021..bb1d9ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
@@ -78,6 +78,8 @@ public class TestDatanodeProtocolRetryPolicy {
   ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
   private final static String CLUSTER_ID = "testClusterID";
   private final static String POOL_ID = "BP-TEST";
+  private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
+      "localhost", 5020);
   private static DatanodeRegistration datanodeRegistration =
       DFSTestUtil.getLocalDatanodeRegistration();
 
@@ -99,7 +101,7 @@ public class TestDatanodeProtocolRetryPolicy {
     conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
     conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
     FileSystem.setDefaultUri(conf,
-        "hdfs://localhost:5020");
+        "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
     File dataDir = new File(DATA_DIR);
     FileUtil.fullyDelete(dataDir);
     dataDir.mkdirs();
@@ -226,7 +228,7 @@ public class TestDatanodeProtocolRetryPolicy {
       @Override
       DatanodeProtocolClientSideTranslatorPB connectToNN(
           InetSocketAddress nnAddr) throws IOException {
-        Assert.assertEquals("localhost:9840", nnAddr.toString());
+        Assert.assertEquals(NN_ADDR, nnAddr);
         return namenode;
       }
     };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
index 37d1b57..f8594ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java
@@ -44,11 +44,6 @@ public class TestRefreshNamenodes {
   private final int nnPort3 = 2227;
   private final int nnPort4 = 2230;
 
-  private final int nnServicePort1 = 2222;
-  private final int nnServicePort2 = 2225;
-  private final int nnServicePort3 = 2228;
-  private final int nnServicePort4 = 2231;
-
   @Test
   public void testRefreshNamenodes() throws IOException {
     // Start cluster with a single NN and DN
@@ -57,9 +52,7 @@ public class TestRefreshNamenodes {
     try {
       MiniDFSNNTopology topology = new MiniDFSNNTopology()
         .addNameservice(new NSConf("ns1").addNN(
-            new NNConf(null)
-                .setIpcPort(nnPort1)
-                .setServicePort(nnServicePort1)))
+            new NNConf(null).setIpcPort(nnPort1)))
         .setFederation(true);
       cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(topology)
@@ -68,20 +61,20 @@ public class TestRefreshNamenodes {
       DataNode dn = cluster.getDataNodes().get(0);
       assertEquals(1, dn.getAllBpOs().size());
 
-      cluster.addNameNode(conf, nnPort2, nnServicePort2);
+      cluster.addNameNode(conf, nnPort2);
       assertEquals(2, dn.getAllBpOs().size());
 
-      cluster.addNameNode(conf, nnPort3, nnServicePort3);
+      cluster.addNameNode(conf, nnPort3);
       assertEquals(3, dn.getAllBpOs().size());
 
-      cluster.addNameNode(conf, nnPort4, nnServicePort4);
+      cluster.addNameNode(conf, nnPort4);
 
       // Ensure a BPOfferService in the datanodes corresponds to
       // a namenode in the cluster
       Set<InetSocketAddress> nnAddrsFromCluster = Sets.newHashSet();
       for (int i = 0; i < 4; i++) {
         assertTrue(nnAddrsFromCluster.add(
-            cluster.getNameNode(i).getServiceRpcAddress()));
+            cluster.getNameNode(i).getNameNodeAddress()));
       }
       
       Set<InetSocketAddress> nnAddrsFromDN = Sets.newHashSet();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
index 5c58e0a..10d9f11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 
 import com.google.common.base.Supplier;
@@ -62,7 +61,6 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 
-@Ignore("Temporarily disabling the BackupNode unit test.")
 public class TestBackupNode {
   public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index 4282c22..2e49674 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -1364,9 +1364,9 @@ public class TestCheckpoint {
       Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
       Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
       InetSocketAddress nn1RpcAddress = cluster.getNameNode(0)
-          .getServiceRpcAddress();
+          .getNameNodeAddress();
       InetSocketAddress nn2RpcAddress = cluster.getNameNode(1)
-          .getServiceRpcAddress();
+          .getNameNodeAddress();
       String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
       String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();
 
@@ -1923,7 +1923,6 @@ public class TestCheckpoint {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
           .format(true).build();
       int origPort = cluster.getNameNodePort();
-      int origServicePort = cluster.getNameNodeServicePort();
       int origHttpPort = cluster.getNameNode().getHttpAddress().getPort();
       Configuration snnConf = new Configuration(conf);
       File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(),
@@ -1950,7 +1949,6 @@ public class TestCheckpoint {
       cluster = new MiniDFSCluster.Builder(conf)
           .numDataNodes(0)
           .nameNodePort(origPort)
-          .nameNodeServicePort(origServicePort)
           .nameNodeHttpPort(origHttpPort)
           .format(true).build();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index d21b275..36638e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -661,15 +661,12 @@ public class TestNameNodeMXBean {
     for (int i = 0; i < 5; i++) {
       try{
         // Have to specify IPC ports so the NNs can talk to each other.
-        int[] ports = ServerSocketUtil.getPorts(4);
+        int[] ports = ServerSocketUtil.getPorts(2);
         MiniDFSNNTopology topology = new MiniDFSNNTopology()
             .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-                .addNN(new MiniDFSNNTopology.NNConf("nn1")
-                    .setIpcPort(ports[0])
-                    .setServicePort(ports[1]))
-                .addNN(new MiniDFSNNTopology.NNConf("nn2")
-                    .setIpcPort(ports[2])
-                    .setServicePort(ports[3])));
+                .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(ports[0]))
+                .addNN(
+                    new MiniDFSNNTopology.NNConf("nn2").setIpcPort(ports[1])));
 
         cluster = new MiniDFSCluster.Builder(conf)
             .nnTopology(topology).numDataNodes(0)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
index d7216c0..9a0e67c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
@@ -110,7 +110,6 @@ public class TestNameNodeMetricsLogger {
       throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:0");
-    conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "0.0.0.0:0");
     conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
     conf.setInt(DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_KEY,
         enableMetricsLogging ? 1 : 0);  // If enabled, log early and log often

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
index 981785a..0cf1fed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java
@@ -125,8 +125,6 @@ public class TestValidateConfigurationSettings {
     // Set ephemeral ports 
     conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
         "127.0.0.1:0");
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        "127.0.0.1:0");
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
         "127.0.0.1:0");
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65a94100/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
index a367167..169bbee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.test.GenericTestUtils;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org