You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ta...@apache.org on 2021/06/08 07:14:16 UTC

[hadoop] branch trunk updated: HDFS-16048. RBF: Print network topology on the router web (#3062)

This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new c748fce  HDFS-16048. RBF: Print network topology on the router web (#3062)
c748fce is described below

commit c748fce17ace8b45ee0f3c3967d87893765eea61
Author: litao <to...@gmail.com>
AuthorDate: Tue Jun 8 15:14:06 2021 +0800

    HDFS-16048. RBF: Print network topology on the router web (#3062)
    
    Reviewed-by: Inigo Goiri <in...@apache.org>
    Reviewed-by: Hemanth Boyina <he...@apache.org>
    Reviewed-by: Akira Ajisaka <aa...@apache.org>
---
 .../server/federation/router/RouterHttpServer.java |   3 +
 .../router/RouterNetworkTopologyServlet.java       |  69 +++++++
 .../src/main/webapps/router/explorer.html          |   1 +
 .../src/main/webapps/router/federationhealth.html  |   1 +
 .../router/TestRouterNetworkTopologyServlet.java   | 210 +++++++++++++++++++++
 .../hdfs/server/namenode/NameNodeHttpServer.java   |   2 +-
 .../server/namenode/NetworkTopologyServlet.java    |   9 +-
 7 files changed, 290 insertions(+), 5 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java
index b1fcc0c..8504439 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHttpServer.java
@@ -125,6 +125,9 @@ public class RouterHttpServer extends AbstractService {
         RouterFsckServlet.PATH_SPEC,
         RouterFsckServlet.class,
         true);
+    httpServer.addInternalServlet(RouterNetworkTopologyServlet.SERVLET_NAME,
+        RouterNetworkTopologyServlet.PATH_SPEC,
+        RouterNetworkTopologyServlet.class);
   }
 
   public InetSocketAddress getHttpAddress() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNetworkTopologyServlet.java
new file mode 100644
index 0000000..e517066
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNetworkTopologyServlet.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.NetworkTopologyServlet;
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.util.StringUtils;
+
+import javax.servlet.ServletContext;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * A servlet to print out the network topology from router.
+ */
+public class RouterNetworkTopologyServlet extends NetworkTopologyServlet {
+
+  @Override
+  public void doGet(HttpServletRequest request, HttpServletResponse response)
+      throws IOException {
+    final ServletContext context = getServletContext();
+
+    String format = parseAcceptHeader(request);
+    if (FORMAT_TEXT.equals(format)) {
+      response.setContentType("text/plain; charset=UTF-8");
+    } else if (FORMAT_JSON.equals(format)) {
+      response.setContentType("application/json; charset=UTF-8");
+    }
+
+    Router router = RouterHttpServer.getRouterFromContext(context);
+    DatanodeInfo[] datanodeReport =
+        router.getRpcServer().getDatanodeReport(
+            HdfsConstants.DatanodeReportType.ALL);
+    List<Node> datanodeInfos = Arrays.asList(datanodeReport);
+
+    try (PrintStream out = new PrintStream(
+            response.getOutputStream(), false, "UTF-8")) {
+      printTopology(out, datanodeInfos, format);
+    } catch (Throwable t) {
+      String errMsg = "Print network topology failed. "
+              + StringUtils.stringifyException(t);
+      response.sendError(HttpServletResponse.SC_GONE, errMsg);
+      throw new IOException(errMsg);
+    } finally {
+      response.getOutputStream().close();
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html
index 80b38e7..49c3e66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html
@@ -48,6 +48,7 @@
             <li><a href="jmx">Metrics</a></li>
             <li><a href="conf">Configuration</a></li>
             <li><a href="stacks">Process Thread Dump</a></li>
+            <li><a href="topology">Network Topology</a></li>
           </ul>
         </li>
       </ul>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
index eca395f..80b4b3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
@@ -52,6 +52,7 @@
         <li><a href="jmx">Metrics</a></li>
         <li><a href="conf">Configuration</a></li>
         <li><a href="stacks">Process Thread Dump</a></li>
+        <li><a href="topology">Network Topology</a></li>
       </ul>
     </li>
   </ul>
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java
new file mode 100644
index 0000000..e120c69
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java
@@ -0,0 +1,210 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver;
+import org.apache.hadoop.io.IOUtils;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.util.Iterator;
+import java.util.Map;
+
+import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HTTP_ENABLE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestRouterNetworkTopologyServlet {
+
+  private static StateStoreDFSCluster clusterWithDatanodes;
+  private static StateStoreDFSCluster clusterNoDatanodes;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    // Builder configuration
+    Configuration routerConf =
+        new RouterConfigBuilder().stateStore().admin().quota().rpc().build();
+    routerConf.set(DFS_ROUTER_HTTP_ENABLE, "true");
+    Configuration hdfsConf = new Configuration(false);
+
+    // Build and start a federated cluster
+    clusterWithDatanodes = new StateStoreDFSCluster(false, 2,
+        MultipleDestinationMountTableResolver.class);
+    clusterWithDatanodes.addNamenodeOverrides(hdfsConf);
+    clusterWithDatanodes.addRouterOverrides(routerConf);
+    clusterWithDatanodes.setNumDatanodesPerNameservice(9);
+    clusterWithDatanodes.setIndependentDNs();
+    clusterWithDatanodes.setRacks(
+        new String[] {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2",
+            "/rack2", "/rack3", "/rack3", "/rack3", "/rack4", "/rack4",
+            "/rack4", "/rack5", "/rack5", "/rack5", "/rack6", "/rack6",
+            "/rack6"});
+    clusterWithDatanodes.startCluster();
+    clusterWithDatanodes.startRouters();
+    clusterWithDatanodes.waitClusterUp();
+    clusterWithDatanodes.waitActiveNamespaces();
+
+    // Build and start a federated cluster
+    clusterNoDatanodes = new StateStoreDFSCluster(false, 2,
+        MultipleDestinationMountTableResolver.class);
+    clusterNoDatanodes.addNamenodeOverrides(hdfsConf);
+    clusterNoDatanodes.addRouterOverrides(routerConf);
+    clusterNoDatanodes.setNumDatanodesPerNameservice(0);
+    clusterNoDatanodes.setIndependentDNs();
+    clusterNoDatanodes.startCluster();
+    clusterNoDatanodes.startRouters();
+    clusterNoDatanodes.waitClusterUp();
+    clusterNoDatanodes.waitActiveNamespaces();
+  }
+
+  @Test
+  public void testPrintTopologyTextFormat() throws Exception {
+    // get http Address
+    String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter()
+        .getHttpServerAddress().toString();
+
+    // send http request
+    URL url = new URL("http:/" + httpAddress + "/topology");
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setReadTimeout(20000);
+    conn.setConnectTimeout(20000);
+    conn.connect();
+
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
+    StringBuilder sb =
+        new StringBuilder("-- Network Topology -- \n");
+    sb.append(out);
+    sb.append("\n-- Network Topology -- ");
+    String topology = sb.toString();
+
+    // assert rack info
+    assertTrue(topology.contains("/ns0/rack1"));
+    assertTrue(topology.contains("/ns0/rack2"));
+    assertTrue(topology.contains("/ns0/rack3"));
+    assertTrue(topology.contains("/ns1/rack4"));
+    assertTrue(topology.contains("/ns1/rack5"));
+    assertTrue(topology.contains("/ns1/rack6"));
+
+    // assert node number
+    assertEquals(18,
+        topology.split("127.0.0.1").length - 1);
+  }
+
+  @Test
+  public void testPrintTopologyJsonFormat() throws Exception {
+    // get http Address
+    String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter()
+            .getHttpServerAddress().toString();
+
+    // send http request
+    URL url = new URL("http:/" + httpAddress + "/topology");
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setReadTimeout(20000);
+    conn.setConnectTimeout(20000);
+    conn.setRequestProperty("Accept", "application/json");
+    conn.connect();
+
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
+    String topology = out.toString();
+
+    // parse json
+    JsonNode racks = new ObjectMapper().readTree(topology);
+
+    // assert rack number
+    assertEquals(6, racks.size());
+
+    // assert rack info
+    assertTrue(topology.contains("/ns0/rack1"));
+    assertTrue(topology.contains("/ns0/rack2"));
+    assertTrue(topology.contains("/ns0/rack3"));
+    assertTrue(topology.contains("/ns1/rack4"));
+    assertTrue(topology.contains("/ns1/rack5"));
+    assertTrue(topology.contains("/ns1/rack6"));
+
+    // assert node number
+    Iterator<JsonNode> elements = racks.elements();
+    int dataNodesCount = 0;
+    while(elements.hasNext()){
+      JsonNode rack = elements.next();
+      Iterator<Map.Entry<String, JsonNode>> fields = rack.fields();
+      while (fields.hasNext()) {
+        dataNodesCount += fields.next().getValue().size();
+      }
+    }
+    assertEquals(18, dataNodesCount);
+  }
+
+  @Test
+  public void testPrintTopologyNoDatanodesTextFormat() throws Exception {
+    // get http Address
+    String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter()
+        .getHttpServerAddress().toString();
+
+    // send http request
+    URL url = new URL("http:/" + httpAddress + "/topology");
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setReadTimeout(20000);
+    conn.setConnectTimeout(20000);
+    conn.connect();
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
+    StringBuilder sb =
+        new StringBuilder("-- Network Topology -- \n");
+    sb.append(out);
+    sb.append("\n-- Network Topology -- ");
+    String topology = sb.toString();
+
+    // assert node number
+    assertTrue(topology.contains("No DataNodes"));
+  }
+
+  @Test
+  public void testPrintTopologyNoDatanodesJsonFormat() throws Exception {
+    // get http Address
+    String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter()
+        .getHttpServerAddress().toString();
+
+    // send http request
+    URL url = new URL("http:/" + httpAddress + "/topology");
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setReadTimeout(20000);
+    conn.setConnectTimeout(20000);
+    conn.setRequestProperty("Accept", "application/json");
+    conn.connect();
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
+    StringBuilder sb =
+        new StringBuilder("-- Network Topology -- \n");
+    sb.append(out);
+    sb.append("\n-- Network Topology -- ");
+    String topology = sb.toString();
+
+    // assert node number
+    assertTrue(topology.contains("No DataNodes"));
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index 7ca5241..c05398a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -253,7 +253,7 @@ public class NameNodeHttpServer {
     httpServer.addInternalServlet(IsNameNodeActiveServlet.SERVLET_NAME,
         IsNameNodeActiveServlet.PATH_SPEC,
         IsNameNodeActiveServlet.class);
-    httpServer.addInternalServlet("topology",
+    httpServer.addInternalServlet(NetworkTopologyServlet.SERVLET_NAME,
         NetworkTopologyServlet.PATH_SPEC, NetworkTopologyServlet.class);
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java
index 5d08971..c07d596 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NetworkTopologyServlet.java
@@ -46,6 +46,7 @@ import java.util.TreeSet;
 @InterfaceAudience.Private
 public class NetworkTopologyServlet extends DfsServlet {
 
+  public static final String SERVLET_NAME = "topology";
   public static final String PATH_SPEC = "/topology";
 
   protected static final String FORMAT_JSON = "json";
@@ -90,7 +91,7 @@ public class NetworkTopologyServlet extends DfsServlet {
    * @param leaves leaves nodes under base scope
    * @param format the response format
    */
-  public void printTopology(PrintStream stream, List<Node> leaves,
+  protected void printTopology(PrintStream stream, List<Node> leaves,
       String format) throws BadFormatException, IOException {
     if (leaves.isEmpty()) {
       stream.print("No DataNodes");
@@ -120,7 +121,7 @@ public class NetworkTopologyServlet extends DfsServlet {
     }
   }
 
-  private void printJsonFormat(PrintStream stream, Map<String,
+  protected void printJsonFormat(PrintStream stream, Map<String,
       TreeSet<String>> tree, ArrayList<String> racks) throws IOException {
     JsonFactory dumpFactory = new JsonFactory();
     JsonGenerator dumpGenerator = dumpFactory.createGenerator(stream);
@@ -152,7 +153,7 @@ public class NetworkTopologyServlet extends DfsServlet {
     }
   }
 
-  private void printTextFormat(PrintStream stream, Map<String,
+  protected void printTextFormat(PrintStream stream, Map<String,
       TreeSet<String>> tree, ArrayList<String> racks) {
     for(String r : racks) {
       stream.println("Rack: " + r);
@@ -171,7 +172,7 @@ public class NetworkTopologyServlet extends DfsServlet {
   }
 
   @VisibleForTesting
-  static String parseAcceptHeader(HttpServletRequest request) {
+  protected static String parseAcceptHeader(HttpServletRequest request) {
     String format = request.getHeader(HttpHeaders.ACCEPT);
     return format != null && format.contains(FORMAT_JSON) ?
             FORMAT_JSON : FORMAT_TEXT;

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org