You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2016/06/24 01:36:34 UTC

[38/49] hadoop git commit: HDFS-10478. DiskBalancer: resolve volume path names. Contributed by Anu Engineer.

HDFS-10478. DiskBalancer: resolve volume path names. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64ccb232
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64ccb232
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64ccb232

Branch: refs/heads/HDFS-1312
Commit: 64ccb232ccf204991a28fa0211917fa935ad30c5
Parents: 47dcb0f
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Jun 7 10:29:35 2016 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Jun 23 18:21:08 2016 -0700

----------------------------------------------------------------------
 .../diskbalancer/command/PlanCommand.java       | 37 +++++++++++++++
 .../connectors/DBNameNodeConnector.java         |  1 -
 .../hdfs/server/diskbalancer/TestPlanner.java   | 47 ++++++++++++++++++++
 3 files changed, 84 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64ccb232/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index d346c84..7cf0df1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
@@ -23,6 +23,10 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
+import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
+import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
 import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
     .DiskBalancerDataNode;
@@ -32,7 +36,9 @@ import org.codehaus.jackson.map.ObjectMapper;
 
 import java.io.IOException;
 import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 /**
  * Class that implements Plan Command.
@@ -111,7 +117,10 @@ public class PlanCommand extends Command {
           cmd.getOptionValue(DiskBalancer.PLAN));
     }
     this.thresholdPercentage = getThresholdPercentage(cmd);
+
+    LOG.debug("threshold Percentage is {}", this.thresholdPercentage);
     setNodesToProcess(node);
+    populatePathNames(node);
 
     List<NodePlan> plans = getCluster().computePlan(this.thresholdPercentage);
     setPlanParams(plans);
@@ -137,6 +146,32 @@ public class PlanCommand extends Command {
     }
   }
 
+
+  /**
+   * Reads the Physical path of the disks we are balancing. This is needed to
+   * make the disk balancer human friendly and not used in balancing.
+   *
+   * @param node - Disk Balancer Node.
+   */
+  private void populatePathNames(DiskBalancerDataNode node) throws IOException {
+    String dnAddress = node.getDataNodeIP() + ":" + node.getDataNodePort();
+    ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
+    String volumeNameJson = dnClient.getDiskBalancerSetting(
+        DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
+    ObjectMapper mapper = new ObjectMapper();
+
+    @SuppressWarnings("unchecked")
+    Map<String, String> volumeMap =
+        mapper.readValue(volumeNameJson, HashMap.class);
+    for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
+      for (DiskBalancerVolume vol : set.getVolumes()) {
+        if (volumeMap.containsKey(vol.getUuid())) {
+          vol.setPath(volumeMap.get(vol.getUuid()));
+        }
+      }
+    }
+  }
+
   /**
    * Gets extended help for this command.
    *
@@ -198,9 +233,11 @@ public class PlanCommand extends Command {
     for (NodePlan plan : plans) {
       for (Step step : plan.getVolumeSetPlans()) {
         if (this.bandwidth > 0) {
+          LOG.debug("Setting bandwidth to {}", this.bandwidth);
           step.setBandwidth(this.bandwidth);
         }
         if (this.maxError > 0) {
+          LOG.debug("Setting max error to {}", this.maxError);
           step.setMaxDiskErrors(this.maxError);
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64ccb232/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
index acf1fa1..b044baf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
@@ -155,7 +155,6 @@ class DBNameNodeConnector implements ClusterConnector {
           .READ_ONLY_SHARED) || report.isFailed());
       volume.setStorageType(storage.getStorageType().name());
       volume.setIsTransient(storage.getStorageType().isTransient());
-      //volume.setPath(storage.getVolumePath());
       node.addVolume(volume);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64ccb232/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java
index 77c2aa3..d01e8ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestPlanner.java
@@ -456,4 +456,51 @@ public class TestPlanner {
     assertEquals(newPlan.getVolumeSetPlans().size(),
         copy.getVolumeSetPlans().size());
   }
+
+  @Test
+  public void testGreedyPlannerLargeDisksWithData() throws Exception {
+    NullConnector nullConnector = new NullConnector();
+    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
+
+    DiskBalancerDataNode node =
+        new DiskBalancerDataNode(UUID.randomUUID().toString());
+
+    // All disks have same capacity of data
+    DiskBalancerVolume volume1 = createVolume("volume1", 1968, 88);
+    DiskBalancerVolume volume2 = createVolume("volume2", 1968, 88);
+    DiskBalancerVolume volume3 = createVolume("volume3", 1968, 111);
+    DiskBalancerVolume volume4 = createVolume("volume4", 1968, 111);
+    DiskBalancerVolume volume5 = createVolume("volume5", 1968, 30);
+    DiskBalancerVolume volume6 = createVolume("volume6", 1563, 30);
+    DiskBalancerVolume volume7 = createVolume("volume7", 1563, 30);
+    DiskBalancerVolume volume8 = createVolume("volume8", 1563, 30);
+    DiskBalancerVolume volume9 = createVolume("volume9", 1563, 210);
+
+
+
+
+    node.addVolume(volume1);
+    node.addVolume(volume2);
+    node.addVolume(volume3);
+
+    node.addVolume(volume4);
+    node.addVolume(volume5);
+    node.addVolume(volume6);
+
+    node.addVolume(volume7);
+    node.addVolume(volume8);
+    node.addVolume(volume9);
+
+
+    nullConnector.addNode(node);
+    cluster.readClusterInfo();
+    Assert.assertEquals(1, cluster.getNodes().size());
+
+    GreedyPlanner planner = new GreedyPlanner(1.0f, node);
+    NodePlan plan = new NodePlan(node.getDataNodeName(),
+        node.getDataNodePort());
+    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
+
+    assertTrue(plan.getVolumeSetPlans().size() > 2);
+  }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org