You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by br...@apache.org on 2021/07/30 03:13:10 UTC

[hadoop] 04/05: HADOOP-12432. Add support for include/exclude lists on IPv6 setup. Contributed by Nemanja Matkovic And Hemanth Boyina.

This is an automated email from the ASF dual-hosted git repository.

brahma pushed a commit to branch HADOOP-17800
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b30674140bc58b68760879fc9534e55eb8743753
Author: Brahma Reddy Battula <br...@apache.org>
AuthorDate: Fri Jul 30 08:31:31 2021 +0530

    HADOOP-12432. Add support for include/exclude lists on IPv6 setup. Contributed by Nemanja Matkovic And Hemanth Boyina.
---
 .../server/blockmanagement/HostFileManager.java    |  9 ++--
 .../blockmanagement/TestHostFileManager.java       | 49 +++++++++++++++-------
 .../hdfs/server/namenode/TestHostsFiles.java       |  9 ++--
 .../apache/hadoop/hdfs/util/HostsFileWriter.java   | 11 +++--
 4 files changed, 49 insertions(+), 29 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
index 57b6902..dcbd131 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
@@ -23,12 +23,11 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.HostsFileReader;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URISyntaxException;
 import java.util.HashSet;
 
 /**
@@ -89,16 +88,14 @@ public class HostFileManager extends HostConfigManager {
   @VisibleForTesting
   static InetSocketAddress parseEntry(String type, String fn, String line) {
     try {
-      URI uri = new URI("dummy", line, null, null, null);
-      int port = uri.getPort() == -1 ? 0 : uri.getPort();
-      InetSocketAddress addr = new InetSocketAddress(uri.getHost(), port);
+      InetSocketAddress addr = NetUtils.createSocketAddr(line, 0);
       if (addr.isUnresolved()) {
         LOG.warn(String.format("Failed to resolve address `%s` in `%s`. " +
                 "Ignoring in the %s list.", line, fn, type));
         return null;
       }
       return addr;
-    } catch (URISyntaxException e) {
+    } catch (IllegalArgumentException e) {
       LOG.warn(String.format("Failed to parse `%s` in `%s`. " + "Ignoring in " +
               "the %s list.", line, fn, type));
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java
index 38d0905..2139ac5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java
@@ -110,13 +110,19 @@ public class TestHostFileManager {
     includedNodes.add(entry("127.0.0.1:12345"));
     includedNodes.add(entry("localhost:12345"));
     includedNodes.add(entry("127.0.0.1:12345"));
+
+    includedNodes.add(entry("[::1]:42"));
+    includedNodes.add(entry("[0:0:0:0:0:0:0:1]:42"));
+    includedNodes.add(entry("[::1]:42"));
+
     includedNodes.add(entry("127.0.0.2"));
 
     excludedNodes.add(entry("127.0.0.1:12346"));
     excludedNodes.add(entry("127.0.30.1:12346"));
+    excludedNodes.add(entry("[::1]:24"));
 
-    Assert.assertEquals(2, includedNodes.size());
-    Assert.assertEquals(2, excludedNodes.size());
+    Assert.assertEquals(3, includedNodes.size());
+    Assert.assertEquals(3, excludedNodes.size());
 
     hm.refresh(includedNodes, excludedNodes);
 
@@ -125,20 +131,33 @@ public class TestHostFileManager {
     Map<String, DatanodeDescriptor> dnMap = (Map<String,
             DatanodeDescriptor>) Whitebox.getInternalState(dm, "datanodeMap");
 
-    // After the de-duplication, there should be only one DN from the included
+    // After the de-duplication, there should be three DN from the included
     // nodes declared as dead.
-    Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
-            .DatanodeReportType.ALL).size());
-    Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
-            .DatanodeReportType.DEAD).size());
-    dnMap.put("uuid-foo", new DatanodeDescriptor(new DatanodeID("127.0.0.1",
-            "localhost", "uuid-foo", 12345, 1020, 1021, 1022)));
-    Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
-            .DatanodeReportType.DEAD).size());
-    dnMap.put("uuid-bar", new DatanodeDescriptor(new DatanodeID("127.0.0.2",
-            "127.0.0.2", "uuid-bar", 12345, 1020, 1021, 1022)));
-    Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
-            .DatanodeReportType.DEAD).size());
+    Assert.assertEquals(3,
+        dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL)
+            .size());
+    Assert.assertEquals(3,
+        dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
+            .size());
+    dnMap.put("uuid-foo", new DatanodeDescriptor(
+        new DatanodeID("127.0.0.1", "localhost", "uuid-foo", 12345, 1020, 1021,
+            1022)));
+    Assert.assertEquals(2,
+        dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
+            .size());
+    dnMap.put("uuid-bar", new DatanodeDescriptor(
+        new DatanodeID("127.0.0.2", "127.0.0.2", "uuid-bar", 12345, 1020, 1021,
+            1022)));
+    Assert.assertEquals(1,
+        dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
+            .size());
+    dnMap.put("uuid-baz", new DatanodeDescriptor(
+        new DatanodeID("[::1]", "localhost", "uuid-baz", 42, 1020, 1021,
+            1022)));
+    Assert.assertEquals(0,
+        dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
+            .size());
+
     DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" +
             ".3", "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022));
     DFSTestUtil.setDatanodeDead(spam);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
index e86413d..4fa5a3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
@@ -151,14 +151,15 @@ public class TestHostsFiles {
 
     HostsFileWriter hostsFileWriter = new HostsFileWriter();
     hostsFileWriter.initialize(conf, "temp/decommission");
-    hostsFileWriter.initIncludeHosts(new String[]
-        {"localhost:52","127.0.0.1:7777"});
+    hostsFileWriter.initIncludeHosts(
+        new String[] {"localhost:52", "127.0.0.1:7777", "[::1]:42",
+            "[0:0:0:0:0:0:0:1]:24"});
 
     MiniDFSCluster cluster = null;
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       final FSNamesystem ns = cluster.getNameNode().getNamesystem();
-      assertTrue(ns.getNumDeadDataNodes() == 2);
+      assertTrue(ns.getNumDeadDataNodes() == 4);
       assertTrue(ns.getNumLiveDataNodes() == 0);
 
       // Testing using MBeans
@@ -166,7 +167,7 @@ public class TestHostsFiles {
       ObjectName mxbeanName = new ObjectName(
           "Hadoop:service=NameNode,name=FSNamesystemState");
       String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
-      assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
+      assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 4);
       assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
     } finally {
       if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
index e171e2b..d1c779c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/HostsFileWriter.java
@@ -141,11 +141,14 @@ public class HostsFileWriter {
           includeHosts.toString());
     } else {
       HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
-      for(String hostNameAndPort : hostNameAndPorts) {
-        String[] hostAndPort = hostNameAndPort.split(":");
+      for (String hostNameAndPort : hostNameAndPorts) {
+        int i = hostNameAndPort.lastIndexOf(':');
+        String port =
+            hostNameAndPort.substring(hostNameAndPort.lastIndexOf(":") + 1);
+        String addr = hostNameAndPort.substring(0, i);
         DatanodeAdminProperties dn = new DatanodeAdminProperties();
-        dn.setHostName(hostAndPort[0]);
-        dn.setPort(Integer.parseInt(hostAndPort[1]));
+        dn.setHostName(addr);
+        dn.setPort(Integer.parseInt(port));
         allDNs.add(dn);
       }
       CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org