You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2016/06/22 00:37:04 UTC

hadoop git commit: HDFS-10550. DiskBalancer: fix issue of order dependency in iteration in ReportCommand test. Contributed by Xiaobing Zhou.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 3a0a329c4 -> 90a032653


HDFS-10550. DiskBalancer: fix issue of order dependency in iteration in ReportCommand test. Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90a03265
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90a03265
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90a03265

Branch: refs/heads/HDFS-1312
Commit: 90a0326537cedc800abe4fa3623ddc0606e7e5e1
Parents: 3a0a329
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Jun 21 17:34:58 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Jun 21 17:34:58 2016 -0700

----------------------------------------------------------------------
 .../diskbalancer/command/ReportCommand.java     | 17 +++--
 .../command/TestDiskBalancerCommand.java        | 74 +++++++++-----------
 2 files changed, 44 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90a03265/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
index eb6afcc..40729f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command;
 
 import java.io.PrintStream;
 import java.util.Collections;
+import java.util.List;
 import java.util.ListIterator;
 
 import org.apache.commons.cli.CommandLine;
@@ -32,6 +33,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSe
 import org.apache.hadoop.hdfs.tools.DiskBalancer;
 
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 
 /**
  * Executes the report command.
@@ -164,9 +166,10 @@ public class ReportCommand extends Command {
             dbdn.getVolumeCount(),
             dbdn.getNodeDataDensity()));
 
+        List<String> volumeList = Lists.newArrayList();
         for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) {
           for (DiskBalancerVolume vol : vset.getVolumes()) {
-            result.appendln(String.format(volumeFormat,
+            volumeList.add(String.format(volumeFormat,
                 vol.getStorageType(),
                 vol.getPath(),
                 vol.getUsedRatio(),
@@ -181,6 +184,10 @@ public class ReportCommand extends Command {
                 vol.isTransient() ? trueStr : falseStr));
           }
         }
+
+        Collections.sort(volumeList);
+        result.appendln(
+            StringUtils.join(volumeList.toArray(), System.lineSeparator()));
       }
     }
   }
@@ -194,13 +201,13 @@ public class ReportCommand extends Command {
         " datanode, or prints out the list of nodes that will benefit from " +
         "running disk balancer. Top defaults to " + getDefaultTop();
     String footer = ". E.g.:\n"
-        + "hdfs diskbalancer -uri http://namenode.uri -report\n"
-        + "hdfs diskbalancer -uri http://namenode.uri -report -top 5\n"
-        + "hdfs diskbalancer -uri http://namenode.uri -report "
+        + "hdfs diskbalancer -fs http://namenode.uri -report\n"
+        + "hdfs diskbalancer -fs http://namenode.uri -report -top 5\n"
+        + "hdfs diskbalancer -fs http://namenode.uri -report "
         + "-node {DataNodeID | IP | Hostname}";
 
     HelpFormatter helpFormatter = new HelpFormatter();
-    helpFormatter.printHelp("hdfs diskbalancer -uri http://namenode.uri " +
+    helpFormatter.printHelp("hdfs diskbalancer -fs http://namenode.uri " +
         "-report [options]",
         header, DiskBalancer.getReportOptions(), footer);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90a03265/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index 57e59f6..3accbc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -71,8 +71,10 @@ public class TestDiskBalancerCommand {
     }
   }
 
-  private void testReportSimple() throws Exception {
-    final String cmdLine = String.format("hdfs diskbalancer -uri %s -report",
+  /* test basic report */
+  @Test(timeout=60000)
+  public void testReportSimple() throws Exception {
+    final String cmdLine = String.format("hdfs diskbalancer -fs %s -report",
         clusterJson.toString());
     final List<String> outputs = runCommand(cmdLine);
 
@@ -98,9 +100,11 @@ public class TestDiskBalancerCommand {
 
   }
 
-  private void testReportLessThanTotal() throws Exception {
+  /* test less than 64 DataNode(s) as total, e.g., -report -top 32 */
+  @Test(timeout=60000)
+  public void testReportLessThanTotal() throws Exception {
     final String cmdLine = String.format(
-        "hdfs diskbalancer -uri %s -report -top 32", clusterJson.toString());
+        "hdfs diskbalancer -fs %s -report -top 32", clusterJson.toString());
     final List<String> outputs = runCommand(cmdLine);
 
     assertThat(
@@ -120,9 +124,11 @@ public class TestDiskBalancerCommand {
             containsString("9 volumes with node data density 1.97"))));
   }
 
-  private void testReportMoreThanTotal() throws Exception {
+  /* test more than 64 DataNode(s) as total, e.g., -report -top 128 */
+  @Test(timeout=60000)
+  public void testReportMoreThanTotal() throws Exception {
     final String cmdLine = String.format(
-        "hdfs diskbalancer -uri %s -report -top 128", clusterJson.toString());
+        "hdfs diskbalancer -fs %s -report -top 128", clusterJson.toString());
     final List<String> outputs = runCommand(cmdLine);
 
     assertThat(
@@ -143,9 +149,11 @@ public class TestDiskBalancerCommand {
 
   }
 
-  private void testReportInvalidTopLimit() throws Exception {
+  /* test invalid top limit, e.g., -report -top xx */
+  @Test(timeout=60000)
+  public void testReportInvalidTopLimit() throws Exception {
     final String cmdLine = String.format(
-        "hdfs diskbalancer -uri %s -report -top xx", clusterJson.toString());
+        "hdfs diskbalancer -fs %s -report -top xx", clusterJson.toString());
     final List<String> outputs = runCommand(cmdLine);
 
     assertThat(
@@ -169,10 +177,12 @@ public class TestDiskBalancerCommand {
             containsString("9 volumes with node data density 1.97"))));
   }
 
-  private void testReportNode() throws Exception {
+  /* test -report -node DataNodeID */
+  @Test(timeout=60000)
+  public void testReportNode() throws Exception {
     final String cmdLine = String
         .format(
-            "hdfs diskbalancer -uri %s -report -node "
+            "hdfs diskbalancer -fs %s -report -node "
                 + "a87654a9-54c7-4693-8dd9-c9c7021dc340",
             clusterJson.toString());
     final List<String> outputs = runCommand(cmdLine);
@@ -192,9 +202,9 @@ public class TestDiskBalancerCommand {
     assertThat(
         outputs.get(3),
         is(allOf(containsString("DISK"),
-            containsString("/tmp/disk/xx3j3ph3zd"),
-            containsString("0.72 used: 289544224916/400000000000"),
-            containsString("0.28 free: 110455775084/400000000000"))));
+            containsString("/tmp/disk/KmHefYNURo"),
+            containsString("0.20 used: 39160240782/200000000000"),
+            containsString("0.80 free: 160839759218/200000000000"))));
     assertThat(
         outputs.get(4),
         is(allOf(containsString("DISK"),
@@ -204,16 +214,15 @@ public class TestDiskBalancerCommand {
     assertThat(
         outputs.get(5),
         is(allOf(containsString("DISK"),
-            containsString("DISK"),
-            containsString("/tmp/disk/KmHefYNURo"),
-            containsString("0.20 used: 39160240782/200000000000"),
-            containsString("0.80 free: 160839759218/200000000000"))));
+            containsString("/tmp/disk/xx3j3ph3zd"),
+            containsString("0.72 used: 289544224916/400000000000"),
+            containsString("0.28 free: 110455775084/400000000000"))));
     assertThat(
         outputs.get(6),
         is(allOf(containsString("RAM_DISK"),
-            containsString("/tmp/disk/MXRyYsCz3U"),
-            containsString("0.55 used: 438102096853/800000000000"),
-            containsString("0.45 free: 361897903147/800000000000"))));
+            containsString("/tmp/disk/BoBlQFxhfw"),
+            containsString("0.60 used: 477590453390/800000000000"),
+            containsString("0.40 free: 322409546610/800000000000"))));
     assertThat(
         outputs.get(7),
         is(allOf(containsString("RAM_DISK"),
@@ -223,9 +232,9 @@ public class TestDiskBalancerCommand {
     assertThat(
         outputs.get(8),
         is(allOf(containsString("RAM_DISK"),
-            containsString("/tmp/disk/BoBlQFxhfw"),
-            containsString("0.60 used: 477590453390/800000000000"),
-            containsString("0.40 free: 322409546610/800000000000"))));
+            containsString("/tmp/disk/MXRyYsCz3U"),
+            containsString("0.55 used: 438102096853/800000000000"),
+            containsString("0.45 free: 361897903147/800000000000"))));
     assertThat(
         outputs.get(9),
         is(allOf(containsString("SSD"),
@@ -247,25 +256,6 @@ public class TestDiskBalancerCommand {
   }
 
   @Test(timeout=60000)
-  public void testReportCommmand() throws Exception {
-
-    /* test basic report */
-    testReportSimple();
-
-    /* test less than 64 DataNode(s) as total, e.g., -report -top 32 */
-    testReportLessThanTotal();
-
-    /* test more than 64 DataNode(s) as total, e.g., -report -top 128 */
-    testReportMoreThanTotal();
-
-    /* test invalid top limit, e.g., -report -top xx */
-    testReportInvalidTopLimit();
-
-    /* test -report -node DataNodeID */
-    testReportNode();
-  }
-
-  @Test
   public void testReadClusterFromJson() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org