You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by li...@apache.org on 2020/04/25 21:09:35 UTC
[hadoop] branch branch-2.9 updated: HDFS-15281. Make sure ZKFC uses
dfs.namenode.rpc-address to bind to host address (#1964)
This is an automated email from the ASF dual-hosted git repository.
liuml07 pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-2.9 by this push:
new 110a528 HDFS-15281. Make sure ZKFC uses dfs.namenode.rpc-address to bind to host address (#1964)
110a528 is described below
commit 110a528a9849c8cbba3791fe6fe8d4ffc52e43a2
Author: Dhiraj <dh...@users.noreply.github.com>
AuthorDate: Sat Apr 25 13:04:32 2020 -0700
HDFS-15281. Make sure ZKFC uses dfs.namenode.rpc-address to bind to host address (#1964)
Contributed by Dhiraj Hegde.
Signed-off-by: Mingliang Liu <li...@apache.org>
Signed-off-by: Inigo Goiri <in...@apache.org>
---
.../org/apache/hadoop/ha/ZKFailoverController.java | 3 ++-
.../hadoop/hdfs/tools/DFSZKFailoverController.java | 28 ++++++++++++++++++----
.../hdfs/tools/TestDFSZKFailoverController.java | 24 +++++++++++++++++++
3 files changed, 49 insertions(+), 6 deletions(-)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index cf65ae1..9402665 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -315,9 +315,10 @@ public abstract class ZKFailoverController {
healthMonitor.addServiceStateCallback(new ServiceStateCallBacks());
healthMonitor.start();
}
-
+
protected void initRPC() throws IOException {
InetSocketAddress bindAddr = getRpcAddressToBindTo();
+ LOG.info("ZKFC RpcServer binding to {}", bindAddr);
rpcServer = new ZKFCRpcServer(conf, bindAddr, this, getPolicyProvider());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
index a710748b..d4b1b2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
@@ -108,21 +108,39 @@ public class DFSZKFailoverController extends ZKFailoverController {
@Override
protected InetSocketAddress getRpcAddressToBindTo() {
int zkfcPort = getZkfcPort(conf);
- return new InetSocketAddress(localTarget.getAddress().getAddress(),
- zkfcPort);
+ String zkfcBindAddr = getZkfcServerBindHost(conf);
+ if (zkfcBindAddr == null || zkfcBindAddr.isEmpty()) {
+ zkfcBindAddr = localTarget.getAddress().getAddress().getHostAddress();
+ }
+ return new InetSocketAddress(zkfcBindAddr, zkfcPort);
}
-
@Override
protected PolicyProvider getPolicyProvider() {
return new HDFSPolicyProvider();
}
-
+
static int getZkfcPort(Configuration conf) {
return conf.getInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY,
DFSConfigKeys.DFS_HA_ZKFC_PORT_DEFAULT);
}
-
+
+ /**
+ * Given a configuration get the bind host that could be used by ZKFC.
+ * We derive it from NN service rpc bind host or NN rpc bind host.
+ *
+ * @param conf input configuration
+ * @return the bind host address found in conf
+ */
+ private static String getZkfcServerBindHost(Configuration conf) {
+ String addr = conf.getTrimmed(
+ DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
+ if (addr == null || addr.isEmpty()) {
+ addr = conf.getTrimmed(DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY);
+ }
+ return addr;
+ }
+
public static DFSZKFailoverController create(Configuration conf) {
Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
index bbb787e..b67ea5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSZKFailoverController.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.tools;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -50,6 +51,8 @@ import org.junit.Test;
import com.google.common.base.Supplier;
public class TestDFSZKFailoverController extends ClientBaseWithFixes {
+ private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1";
+ private static final String WILDCARD_ADDRESS = "0.0.0.0";
private Configuration conf;
private MiniDFSCluster cluster;
private TestContext ctx;
@@ -199,6 +202,27 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
waitForHAState(0, HAServiceState.ACTIVE);
waitForHAState(1, HAServiceState.STANDBY);
}
+
+ @Test(timeout=30000)
+ public void testWithoutBindAddressSet() throws Exception {
+ DFSZKFailoverController zkfc = DFSZKFailoverController.create(
+ conf);
+
+ assertEquals("Bind address not expected to be wildcard by default.",
+ zkfc.getRpcAddressToBindTo().getHostString(),
+ LOCALHOST_SERVER_ADDRESS);
+ }
+
+ @Test(timeout=30000)
+ public void testWithBindAddressSet() throws Exception {
+ conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
+ DFSZKFailoverController zkfc = DFSZKFailoverController.create(
+ conf);
+ String addr = zkfc.getRpcAddressToBindTo().getHostString();
+
+ assertEquals("Bind address " + addr + " is not wildcard.",
+ addr, WILDCARD_ADDRESS);
+ }
@Test(timeout=30000)
public void testManualFailoverWithDFSHAAdmin() throws Exception {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org