You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by to...@apache.org on 2022/06/16 11:19:17 UTC

[hadoop] branch trunk updated: HDFS-16581.Print node status when executing printTopology. (#4321)

This is an automated email from the ASF dual-hosted git repository.

tomscut pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 6cbeae2e52c HDFS-16581.Print node status when executing printTopology. (#4321)
6cbeae2e52c is described below

commit 6cbeae2e52cc0c0c328c72c227c7b67dc48ec986
Author: jianghuazhu <74...@qq.com>
AuthorDate: Thu Jun 16 19:18:58 2022 +0800

    HDFS-16581.Print node status when executing printTopology. (#4321)
    
    Reviewed-by: Viraj Jasani <vj...@apache.org>
    Signed-off-by: Tao Li <to...@apache.org>
---
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java     | 60 ++++++++++++----------
 .../org/apache/hadoop/hdfs/tools/TestDFSAdmin.java | 48 +++++++++++++++++
 2 files changed, 80 insertions(+), 28 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index a9eb5522133..1d3e8da77a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -35,7 +35,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
-import java.util.TreeSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
@@ -1648,40 +1647,45 @@ public class DFSAdmin extends FsShell {
    * @throws IOException If an error while getting datanode report
    */
   public int printTopology() throws IOException {
-      DistributedFileSystem dfs = getDFS();
-      final DatanodeInfo[] report = dfs.getDataNodeStats();
-
-      // Build a map of rack -> nodes from the datanode report
-      HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
-      for(DatanodeInfo dni : report) {
-        String location = dni.getNetworkLocation();
-        String name = dni.getName();
-        
-        if(!tree.containsKey(location)) {
-          tree.put(location, new TreeSet<String>());
-        }
+    DistributedFileSystem dfs = getDFS();
+    final DatanodeInfo[] report = dfs.getDataNodeStats();
+
+    // Build a map of rack -> nodes from the datanode report
+    Map<String, HashMap<String, String>> map = new HashMap<>();
+    for(DatanodeInfo dni : report) {
+      String location = dni.getNetworkLocation();
+      String name = dni.getName();
+      String dnState = dni.getAdminState().toString();
         
-        tree.get(location).add(name);
+      if(!map.containsKey(location)) {
+        map.put(location, new HashMap<>());
       }
+
+      Map<String, String> node = map.get(location);
+      node.put(name, dnState);
+    }
       
-      // Sort the racks (and nodes) alphabetically, display in order
-      ArrayList<String> racks = new ArrayList<String>(tree.keySet());
-      Collections.sort(racks);
+    // Sort the racks (and nodes) alphabetically, display in order
+    List<String> racks = new ArrayList<>(map.keySet());
+    Collections.sort(racks);
       
-      for(String r : racks) {
-        System.out.println("Rack: " + r);
-        TreeSet<String> nodes = tree.get(r);
-
-        for(String n : nodes) {
-          System.out.print("   " + n);
-          String hostname = NetUtils.getHostNameOfIP(n);
-          if(hostname != null)
-            System.out.print(" (" + hostname + ")");
-          System.out.println();
+    for(String r : racks) {
+      System.out.println("Rack: " + r);
+      Map<String, String> nodes = map.get(r);
+
+      for(Map.Entry<String, String> entry : nodes.entrySet()) {
+        String n = entry.getKey();
+        System.out.print("   " + n);
+        String hostname = NetUtils.getHostNameOfIP(n);
+        if(hostname != null) {
+          System.out.print(" (" + hostname + ")");
         }
-
+        System.out.print(" " + entry.getValue());
         System.out.println();
       }
+
+      System.out.println();
+    }
     return 0;
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index b4ae9bcaab2..73c976d88da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -70,6 +70,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -520,6 +522,52 @@ public class TestDFSAdmin {
     }
   }
 
+  @Test(timeout = 30000)
+  public void testPrintTopologyWithStatus() throws Exception {
+    redirectStream();
+    final Configuration dfsConf = new HdfsConfiguration();
+    final File baseDir = new File(
+            PathUtils.getTestDir(getClass()),
+            GenericTestUtils.getMethodName());
+    dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
+
+    final int numDn = 4;
+    final String[] racks = {
+        "/d1/r1", "/d1/r2",
+        "/d2/r1", "/d2/r2"};
+
+    try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf)
+            .numDataNodes(numDn).racks(racks).build()) {
+      miniCluster.waitActive();
+      assertEquals(numDn, miniCluster.getDataNodes().size());
+
+      DatanodeManager dm = miniCluster.getNameNode().getNamesystem().
+          getBlockManager().getDatanodeManager();
+      DatanodeDescriptor maintenanceNode = dm.getDatanode(
+          miniCluster.getDataNodes().get(1).getDatanodeId());
+      maintenanceNode.setInMaintenance();
+      DatanodeDescriptor demissionNode = dm.getDatanode(
+          miniCluster.getDataNodes().get(2).getDatanodeId());
+      demissionNode.setDecommissioned();
+
+      final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
+
+      resetStream();
+      final int ret = ToolRunner.run(dfsAdmin, new String[] {"-printTopology"});
+
+      /* collect outputs */
+      final List<String> outs = Lists.newArrayList();
+      scanIntoList(out, outs);
+
+      /* verify results */
+      assertEquals(0, ret);
+      assertTrue(outs.get(1).contains(DatanodeInfo.AdminStates.NORMAL.toString()));
+      assertTrue(outs.get(4).contains(DatanodeInfo.AdminStates.IN_MAINTENANCE.toString()));
+      assertTrue(outs.get(7).contains(DatanodeInfo.AdminStates.DECOMMISSIONED.toString()));
+      assertTrue(outs.get(10).contains(DatanodeInfo.AdminStates.NORMAL.toString()));
+    }
+  }
+
   @Test(timeout = 30000)
   public void testNameNodeGetReconfigurationStatus() throws IOException,
       InterruptedException, TimeoutException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org