You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2016/06/13 21:16:38 UTC

hadoop git commit: HDFS-10518. DiskBalancer: Pretty-print json in Query command. Contributed by Anu Engineer.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 84e8c0e27 -> 9d3bb1544


HDFS-10518. DiskBalancer: Pretty-print json in Query command. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d3bb154
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d3bb154
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d3bb154

Branch: refs/heads/HDFS-1312
Commit: 9d3bb1544acdca45a1a3e1f9cfda2a99adf0d9f8
Parents: 84e8c0e
Author: Anu Engineer <ae...@apache.org>
Authored: Mon Jun 13 14:11:23 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Mon Jun 13 14:11:23 2016 -0700

----------------------------------------------------------------------
 .../server/datanode/DiskBalancerWorkItem.java   | 42 ++++++++++++++++
 .../server/datanode/DiskBalancerWorkStatus.java |  2 +
 .../hdfs/server/datanode/DiskBalancer.java      | 53 ++++++++++++++++++--
 .../diskbalancer/command/QueryCommand.java      |  5 +-
 .../TestDiskBalancerWithMockMover.java          | 20 ++++++++
 5 files changed, 116 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3bb154/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
index fe908d8..f46a987 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
@@ -34,6 +34,8 @@ import java.io.IOException;
 @InterfaceStability.Unstable
 @JsonInclude(JsonInclude.Include.NON_DEFAULT)
 public class DiskBalancerWorkItem {
+  private  long startTime;
+  private long secondsElapsed;
   private long bytesToCopy;
   private long bytesCopied;
   private long errorCount;
@@ -242,4 +244,44 @@ public class DiskBalancerWorkItem {
   public void setBandwidth(long bandwidth) {
     this.bandwidth = bandwidth;
   }
+
+
+  /**
+   * Records the Start time of execution.
+   * @return startTime
+   */
+  public long getStartTime() {
+    return startTime;
+  }
+
+  /**
+   * Sets the Start time.
+   * @param startTime  - Time stamp for start of execution.
+   */
+  public void setStartTime(long startTime) {
+    this.startTime = startTime;
+  }
+
+  /**
+   * Gets the number of seconds elapsed from the start time.
+   *
+   * The reason why we have this is of time skews. The client's current time
+   * may not match with the server time stamp, hence the elapsed second
+   * cannot be computed from only startTime.
+   *
+   * @return seconds elapsed from start time.
+   */
+  public long getSecondsElapsed() {
+    return secondsElapsed;
+  }
+
+  /**
+   * Sets number of seconds elapsed.
+   *
+   * This is updated whenever we update the other counters.
+   * @param secondsElapsed  - seconds elapsed.
+   */
+  public void setSecondsElapsed(long secondsElapsed) {
+    this.secondsElapsed = secondsElapsed;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3bb154/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
index ca5e5f0..1f62f47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
@@ -24,6 +24,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.SerializationConfig;
 
 import static org.codehaus.jackson.map.type.TypeFactory.defaultInstance;
 
@@ -128,6 +129,7 @@ public class DiskBalancerWorkStatus {
    **/
   public String currentStateString() throws IOException {
     ObjectMapper mapper = new ObjectMapper();
+    mapper.enable(SerializationConfig.Feature.INDENT_OUTPUT);
     return mapper.writeValueAsString(currentState);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3bb154/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index 7f768ea..5fde7c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -552,6 +552,21 @@ public class DiskBalancer {
      * @return FsDatasetSpi
      */
     FsDatasetSpi getDataset();
+
+    /**
+     * Returns time when this plan started executing.
+     *
+     * @return Start time in milliseconds.
+     */
+    long getStartTime();
+
+    /**
+     * Number of seconds elapsed.
+     *
+     * @return time in seconds
+     */
+    long getElapsedSeconds();
+
   }
 
   /**
@@ -622,6 +637,8 @@ public class DiskBalancer {
     private long maxDiskErrors;
     private int poolIndex;
     private AtomicBoolean shouldRun;
+    private long startTime;
+    private long secondsElapsed;
 
     /**
      * Constructs diskBalancerMover.
@@ -897,6 +914,9 @@ public class DiskBalancer {
       FsVolumeSpi source = pair.getSource();
       FsVolumeSpi dest = pair.getDest();
       List<FsVolumeSpi.BlockIterator> poolIters = new LinkedList<>();
+      startTime = Time.now();
+      item.setStartTime(startTime);
+      secondsElapsed = 0;
 
       if (source.isTransientStorage() || dest.isTransientStorage()) {
         return;
@@ -937,7 +957,7 @@ public class DiskBalancer {
             if (block == null) {
               this.setExitFlag();
               LOG.error("No source blocks, exiting the copy. Source: {}, " +
-                      "dest:{}", source.getBasePath(), dest.getBasePath());
+                  "dest:{}", source.getBasePath(), dest.getBasePath());
               continue;
             }
 
@@ -973,9 +993,6 @@ public class DiskBalancer {
                 block.getNumBytes(), source.getBasePath(),
                 dest.getBasePath());
 
-            item.incCopiedSoFar(block.getNumBytes());
-            item.incBlocksCopied();
-
             // Check for the max throughput constraint.
             // We sleep here to keep the promise that we will not
             // copy more than Max MB/sec. we sleep enough time
@@ -984,6 +1001,14 @@ public class DiskBalancer {
             // we exit via Thread Interrupted exception.
             Thread.sleep(computeDelay(block.getNumBytes(), timeUsed, item));
 
+            // We delay updating the info to avoid confusing the user.
+            // This way we report the copy only if it is under the
+            // throughput threshold.
+            item.incCopiedSoFar(block.getNumBytes());
+            item.incBlocksCopied();
+            secondsElapsed = TimeUnit.MILLISECONDS.toSeconds(Time.now() -
+                startTime);
+            item.setSecondsElapsed(secondsElapsed);
           } catch (IOException ex) {
             LOG.error("Exception while trying to copy blocks. error: {}", ex);
             item.incErrorCount();
@@ -1009,5 +1034,25 @@ public class DiskBalancer {
     public FsDatasetSpi getDataset() {
       return dataset;
     }
+
+    /**
+     * Returns time when this plan started executing.
+     *
+     * @return Start time in milliseconds.
+     */
+    @Override
+    public long getStartTime() {
+      return startTime;
+    }
+
+    /**
+     * Number of seconds elapsed.
+     *
+     * @return time in seconds
+     */
+    @Override
+    public long getElapsedSeconds() {
+      return secondsElapsed;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3bb154/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
index 3a3b97f..6c759e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
@@ -46,6 +46,7 @@ public class QueryCommand extends Command {
         " plan running on a given datanode.");
     addValidCommandParameters(DiskBalancer.VERBOSE, "Prints verbose results.");
   }
+
   /**
    * Executes the Client Calls.
    *
@@ -62,7 +63,7 @@ public class QueryCommand extends Command {
     String nodeAddress = nodeName;
 
     // if the string is not name:port format use the default port.
-    if(!nodeName.matches("^.*:\\d$")) {
+    if (!nodeName.matches("^.*:\\d$")) {
       int defaultIPC = NetUtils.createSocketAddr(
           getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
               DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
@@ -76,7 +77,7 @@ public class QueryCommand extends Command {
       System.out.printf("Plan ID: %s %nResult: %s%n", workStatus.getPlanID(),
           workStatus.getResult().toString());
 
-      if(cmd.hasOption(DiskBalancer.VERBOSE)) {
+      if (cmd.hasOption(DiskBalancer.VERBOSE)) {
         System.out.printf("%s", workStatus.currentStateString());
       }
     } catch (DiskBalancerException ex) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3bb154/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
index 491fccb..b73b290 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java
@@ -436,6 +436,26 @@ public class TestDiskBalancerWithMockMover {
       return this.dataset;
     }
 
+    /**
+     * Returns time when this plan started executing.
+     *
+     * @return Start time in milliseconds.
+     */
+    @Override
+    public long getStartTime() {
+      return 0;
+    }
+
+    /**
+     * Number of seconds elapsed.
+     *
+     * @return time in seconds
+     */
+    @Override
+    public long getElapsedSeconds() {
+      return 0;
+    }
+
     public int getRunCount() {
       synchronized (runCount) {
         LOG.info("Run count : " + runCount.intValue());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org