You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2017/09/15 23:02:21 UTC

hadoop git commit: HDFS-12450. Fixing TestNamenodeHeartbeat and support non-HA. Contributed by Inigo Goiri.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10467 679e31aad -> 2d490d330


HDFS-12450. Fixing TestNamenodeHeartbeat and support non-HA. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d490d33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d490d33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d490d33

Branch: refs/heads/HDFS-10467
Commit: 2d490d33043ecdd18ac7d3700ca8ee1b9a7a7688
Parents: 679e31a
Author: Inigo Goiri <in...@apache.org>
Authored: Fri Sep 15 16:02:12 2017 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri Sep 15 16:02:12 2017 -0700

----------------------------------------------------------------------
 .../router/NamenodeHeartbeatService.java        | 47 ++++++++++++--------
 .../server/federation/RouterDFSCluster.java     | 23 +++++++++-
 2 files changed, 50 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d490d33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
index fe4f939..38f63e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
@@ -94,8 +94,9 @@ public class NamenodeHeartbeatService extends PeriodicService {
    */
   public NamenodeHeartbeatService(
       ActiveNamenodeResolver resolver, String nsId, String nnId) {
-    super(NamenodeHeartbeatService.class.getSimpleName() + " " + nsId + " " +
-        nnId);
+    super(NamenodeHeartbeatService.class.getSimpleName() +
+        (nsId == null ? "" : " " + nsId) +
+        (nnId == null ? "" : " " + nnId));
 
     this.resolver = resolver;
 
@@ -109,28 +110,28 @@ public class NamenodeHeartbeatService extends PeriodicService {
 
     this.conf = configuration;
 
+    String nnDesc = nameserviceId;
     if (this.namenodeId != null && !this.namenodeId.isEmpty()) {
       this.localTarget = new NNHAServiceTarget(
           conf, nameserviceId, namenodeId);
+      nnDesc += "-" + namenodeId;
     } else {
       this.localTarget = null;
     }
 
     // Get the RPC address for the clients to connect
     this.rpcAddress = getRpcAddress(conf, nameserviceId, namenodeId);
-    LOG.info("{}-{} RPC address: {}",
-        nameserviceId, namenodeId, rpcAddress);
+    LOG.info("{} RPC address: {}", nnDesc, rpcAddress);
 
     // Get the Service RPC address for monitoring
     this.serviceAddress =
         DFSUtil.getNamenodeServiceAddr(conf, nameserviceId, namenodeId);
     if (this.serviceAddress == null) {
-      LOG.error("Cannot locate RPC service address for NN {}-{}, " +
-          "using RPC address {}", nameserviceId, namenodeId, this.rpcAddress);
+      LOG.error("Cannot locate RPC service address for NN {}, " +
+          "using RPC address {}", nnDesc, this.rpcAddress);
       this.serviceAddress = this.rpcAddress;
     }
-    LOG.info("{}-{} Service RPC address: {}",
-        nameserviceId, namenodeId, serviceAddress);
+    LOG.info("{} Service RPC address: {}", nnDesc, serviceAddress);
 
     // Get the Lifeline RPC address for faster monitoring
     this.lifelineAddress =
@@ -138,13 +139,12 @@ public class NamenodeHeartbeatService extends PeriodicService {
     if (this.lifelineAddress == null) {
       this.lifelineAddress = this.serviceAddress;
     }
-    LOG.info("{}-{} Lifeline RPC address: {}",
-        nameserviceId, namenodeId, lifelineAddress);
+    LOG.info("{} Lifeline RPC address: {}", nnDesc, lifelineAddress);
 
     // Get the Web address for UI
     this.webAddress =
         DFSUtil.getNamenodeWebAddr(conf, nameserviceId, namenodeId);
-    LOG.info("{}-{} Web address: {}", nameserviceId, namenodeId, webAddress);
+    LOG.info("{} Web address: {}", nnDesc, webAddress);
 
     this.setIntervalMs(conf.getLong(
         DFS_ROUTER_HEARTBEAT_INTERVAL_MS,
@@ -173,7 +173,7 @@ public class NamenodeHeartbeatService extends PeriodicService {
     String confKey = DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
     String ret = conf.get(confKey);
 
-    if (nsId != null && nnId != null) {
+    if (nsId != null || nnId != null) {
       // Get if for the proper nameservice and namenode
       confKey = DFSUtil.addKeySuffixes(confKey, nsId, nnId);
       ret = conf.get(confKey);
@@ -182,10 +182,16 @@ public class NamenodeHeartbeatService extends PeriodicService {
       if (ret == null) {
         Map<String, InetSocketAddress> rpcAddresses =
             DFSUtil.getRpcAddressesForNameserviceId(conf, nsId, null);
-        if (rpcAddresses.containsKey(nnId)) {
-          InetSocketAddress sockAddr = rpcAddresses.get(nnId);
+        InetSocketAddress sockAddr = null;
+        if (nnId != null) {
+          sockAddr = rpcAddresses.get(nnId);
+        } else if (rpcAddresses.size() == 1) {
+          // Get the only namenode in the namespace
+          sockAddr = rpcAddresses.values().iterator().next();
+        }
+        if (sockAddr != null) {
           InetAddress addr = sockAddr.getAddress();
-          ret = addr.getHostAddress() + ":" + sockAddr.getPort();
+          ret = addr.getHostName() + ":" + sockAddr.getPort();
         }
       }
     }
@@ -279,9 +285,14 @@ public class NamenodeHeartbeatService extends PeriodicService {
           HAServiceStatus status = haProtocol.getServiceStatus();
           report.setHAServiceState(status.getState());
         } catch (Throwable e) {
-          // Failed to fetch HA status, ignoring failure
-          LOG.error("Cannot fetch HA status for {}: {}",
-              getNamenodeDesc(), e.getMessage(), e);
+          if (e.getMessage().startsWith("HA for namenode is not enabled")) {
+            LOG.error("HA for {} is not enabled", getNamenodeDesc());
+            localTarget = null;
+          } else {
+            // Failed to fetch HA status, ignoring failure
+            LOG.error("Cannot fetch HA status for {}: {}",
+                getNamenodeDesc(), e.getMessage(), e);
+          }
         }
       }
     } catch(IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d490d33/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterDFSCluster.java
index 1ee49d5..e79674d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterDFSCluster.java
@@ -23,6 +23,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY;
@@ -31,6 +33,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_CACHE_TIME_TO_LIVE
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HEARTBEAT_INTERVAL_MS;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_MONITOR_NAMENODE;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS;
@@ -136,8 +139,7 @@ public class RouterDFSCluster {
     private RouterClient adminClient;
     private URI fileSystemUri;
 
-    public RouterContext(Configuration conf, String nsId, String nnId)
-        throws URISyntaxException {
+    public RouterContext(Configuration conf, String nsId, String nnId) {
       this.conf = conf;
       this.nameserviceId = nsId;
       this.namenodeId = nnId;
@@ -397,10 +399,14 @@ public class RouterDFSCluster {
 
         conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY + "." + suffix,
             "127.0.0.1:" + context.rpcPort);
+        conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + "." + suffix,
+            "127.0.0.1:" + context.servicePort);
         conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY + "." + suffix,
             "127.0.0.1:" + context.httpPort);
         conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY + "." + suffix,
             "0.0.0.0");
+        conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY + "." + suffix,
+            "0.0.0.0");
       }
     }
 
@@ -457,6 +463,19 @@ public class RouterDFSCluster {
       conf.set(DFS_HA_NAMENODE_ID_KEY, nnId);
     }
 
+    // Namenodes to monitor
+    StringBuilder sb = new StringBuilder();
+    for (String ns : this.nameservices) {
+      for (NamenodeContext context : getNamenodes(ns)) {
+        String suffix = context.getConfSuffix();
+        if (sb.length() != 0) {
+          sb.append(",");
+        }
+        sb.append(suffix);
+      }
+    }
+    conf.set(DFS_ROUTER_MONITOR_NAMENODE, sb.toString());
+
     // Add custom overrides if available
     if (this.routerOverrides != null) {
       for (Entry<String, String> entry : this.routerOverrides) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org