You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2016/06/24 06:06:07 UTC

[36/49] hadoop git commit: HDFS-10500. Diskbalancer: Print out information when a plan is not generated. Contributed by Anu Engineer.

HDFS-10500. Diskbalancer: Print out information when a plan is not generated. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78a1032b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78a1032b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78a1032b

Branch: refs/heads/trunk
Commit: 78a1032b71af7672840da98808e2bebac3cc11d1
Parents: d2ff793
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Jun 9 13:43:19 2016 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Jun 23 18:21:08 2016 -0700

----------------------------------------------------------------------
 .../diskbalancer/command/CancelCommand.java     |  4 +-
 .../server/diskbalancer/command/Command.java    | 19 +-----
 .../diskbalancer/command/ExecuteCommand.java    |  2 +-
 .../diskbalancer/command/PlanCommand.java       | 61 ++++++++++----------
 4 files changed, 37 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78a1032b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
index f395802..3834d9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
@@ -70,7 +70,7 @@ public class CancelCommand extends Command {
       // points us to the plan file, we can compute the hash as well as read
       // the address of the datanode from the plan file.
       String planFile = cmd.getOptionValue(DiskBalancer.CANCEL);
-      Preconditions.checkArgument(planFile == null || planFile.isEmpty(),
+      Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
           "Invalid plan file specified.");
       String planData = null;
       try (FSDataInputStream plan = open(planFile)) {
@@ -88,7 +88,7 @@ public class CancelCommand extends Command {
    */
   private void cancelPlan(String planData) throws IOException {
     Preconditions.checkNotNull(planData);
-    NodePlan plan = readPlan(planData);
+    NodePlan plan = NodePlan.parseJson(planData);
     String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
     Preconditions.checkNotNull(dataNodeAddress);
     ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78a1032b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index fb975a8..94a21d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -31,16 +31,13 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
-import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
-
+import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.htrace.fasterxml.jackson.databind.ObjectMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -59,10 +56,10 @@ import java.util.Date;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.List;
 
 /**
  * Common interface for command handling.
@@ -394,16 +391,4 @@ public abstract class Command extends Configured {
   protected DiskBalancerCluster getCluster() {
     return cluster;
   }
-
-  /**
-   * Returns a plan from the Json Data.
-   *
-   * @param planData - Json String
-   * @return NodePlan
-   * @throws IOException
-   */
-  protected NodePlan readPlan(String planData) throws IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    return mapper.readValue(planData, NodePlan.class);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78a1032b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
index 6d30e86..85f2a86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
@@ -79,7 +79,7 @@ public class ExecuteCommand extends Command {
    */
   private void submitPlan(String planData) throws IOException {
     Preconditions.checkNotNull(planData);
-    NodePlan plan = readPlan(planData);
+    NodePlan plan = NodePlan.parseJson(planData);
     String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
     Preconditions.checkNotNull(dataNodeAddress);
     ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78a1032b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index 7cf0df1..91ab7fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
@@ -25,13 +25,12 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
+import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
-import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
-    .DiskBalancerDataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
+import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import java.io.IOException;
@@ -122,11 +121,14 @@ public class PlanCommand extends Command {
     setNodesToProcess(node);
     populatePathNames(node);
 
+    NodePlan plan = null;
     List<NodePlan> plans = getCluster().computePlan(this.thresholdPercentage);
     setPlanParams(plans);
 
-    LOG.info("Writing plan to : {}", getOutputPath());
-    System.out.printf("Writing plan to : %s%n", getOutputPath());
+    if (plans.size() > 0) {
+      plan = plans.get(0);
+    }
+
 
     try (FSDataOutputStream beforeStream = create(String.format(
         DiskBalancer.BEFORE_TEMPLATE,
@@ -135,18 +137,24 @@ public class PlanCommand extends Command {
           .getBytes(StandardCharsets.UTF_8));
     }
 
-    try (FSDataOutputStream planStream = create(String.format(
-        DiskBalancer.PLAN_TEMPLATE,
-        cmd.getOptionValue(DiskBalancer.PLAN)))) {
-      planStream.write(getPlan(plans).getBytes(StandardCharsets.UTF_8));
+    if (plan != null) {
+      LOG.info("Writing plan to : {}", getOutputPath());
+      try (FSDataOutputStream planStream = create(String.format(
+          DiskBalancer.PLAN_TEMPLATE,
+          cmd.getOptionValue(DiskBalancer.PLAN)))) {
+        planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
+      }
+    } else {
+      LOG.info("No plan generated. DiskBalancing not needed for node: {} " +
+              "threshold used: {}", cmd.getOptionValue(DiskBalancer.PLAN),
+          this.thresholdPercentage);
     }
 
-    if (cmd.hasOption(DiskBalancer.VERBOSE)) {
+    if (cmd.hasOption(DiskBalancer.VERBOSE) && plans.size() > 0) {
       printToScreen(plans);
     }
   }
 
-
   /**
    * Reads the Physical path of the disks we are balancing. This is needed to
    * make the disk balancer human friendly and not used in balancing.
@@ -210,14 +218,21 @@ public class PlanCommand extends Command {
   static private void printToScreen(List<NodePlan> plans) {
     System.out.println("\nPlan :\n");
     System.out.println(StringUtils.repeat("=", 80));
-    System.out.println("Source Disk\t\t Dest.Disk\t\t Move Size\t Type\n ");
+
+    System.out.println(
+        StringUtils.center("Source Disk", 30) +
+        StringUtils.center("Dest.Disk", 30) +
+        StringUtils.center("Size", 10) +
+        StringUtils.center("Type", 10));
+
     for (NodePlan plan : plans) {
       for (Step step : plan.getVolumeSetPlans()) {
-        System.out.println(String.format("%s\t%s\t%s\t%s",
-            step.getSourceVolume().getPath(),
-            step.getDestinationVolume().getPath(),
-            step.getSizeString(step.getBytesToMove()),
-            step.getDestinationVolume().getStorageType()));
+        System.out.println(String.format("%s %s %s %s",
+            StringUtils.center(step.getSourceVolume().getPath(), 30),
+            StringUtils.center(step.getDestinationVolume().getPath(), 30),
+            StringUtils.center(step.getSizeString(step.getBytesToMove()), 10),
+            StringUtils.center(step.getDestinationVolume().getStorageType(),
+                10)));
       }
     }
 
@@ -243,16 +258,4 @@ public class PlanCommand extends Command {
       }
     }
   }
-
-  /**
-   * Returns a Json represenation of the plans.
-   *
-   * @param plan - List of plans.
-   * @return String.
-   * @throws IOException
-   */
-  private String getPlan(List<NodePlan> plan) throws IOException {
-    ObjectMapper mapper = new ObjectMapper();
-    return mapper.writeValueAsString(plan);
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org