You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zh...@apache.org on 2015/05/26 21:34:51 UTC

[21/50] [abbrv] hadoop git commit: HDFS-8391. NN should consider current EC tasks handling count from DN while assigning new tasks. Contributed by Uma Maheswara Rao G.

HDFS-8391. NN should consider current EC tasks handling count from DN while assigning new tasks. Contributed by Uma Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c99c3379
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c99c3379
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c99c3379

Branch: refs/heads/HDFS-7285
Commit: c99c3379282779e11ebda88d845bb89407f2f350
Parents: bba15e0
Author: Uma Maheswara Rao G <um...@apache.org>
Authored: Thu May 14 11:27:48 2015 +0530
Committer: Zhe Zhang <zh...@cloudera.com>
Committed: Tue May 26 12:01:51 2015 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt         |  3 +++
 .../hadoop/hdfs/server/datanode/DataNode.java    | 19 +++++++++++++++++--
 .../erasurecode/ErasureCodingWorker.java         |  4 +++-
 3 files changed, 23 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c99c3379/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 190ddd6..1456434 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -212,3 +212,6 @@
 
     HDFS-8364. Erasure coding: fix some minor bugs in EC CLI
     (Walter Su via vinayakumarb)
+
+    HDFS-8391. NN should consider current EC tasks handling count from DN while 
+    assigning new tasks. (umamahesh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c99c3379/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 5eca2c7..a1a80ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1909,6 +1909,21 @@ public class DataNode extends ReconfigurableBase
   int getXmitsInProgress() {
     return xmitsInProgress.get();
   }
+  
+  /**
+   * Increments the xmitsInProgress count. xmitsInProgress count represents the
+   * number of data replication/reconstruction tasks running currently.
+   */
+  public void incrementXmitsInProgress() {
+    xmitsInProgress.getAndIncrement();
+  }
+
+  /**
+   * Decrements the xmitsInProgress count
+   */
+  public void decrementXmitsInProgress() {
+    xmitsInProgress.getAndDecrement();
+  }
 
   private void reportBadBlock(final BPOfferService bpos,
       final ExtendedBlock block, final String msg) {
@@ -2128,7 +2143,7 @@ public class DataNode extends ReconfigurableBase
      */
     @Override
     public void run() {
-      xmitsInProgress.getAndIncrement();
+      incrementXmitsInProgress();
       Socket sock = null;
       DataOutputStream out = null;
       DataInputStream in = null;
@@ -2207,7 +2222,7 @@ public class DataNode extends ReconfigurableBase
         // check if there are any disk problem
         checkDiskErrorAsync();
       } finally {
-        xmitsInProgress.getAndDecrement();
+        decrementXmitsInProgress();
         IOUtils.closeStream(blockSender);
         IOUtils.closeStream(out);
         IOUtils.closeStream(in);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c99c3379/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index eedb191..7b3c24d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -312,6 +312,7 @@ public final class ErasureCodingWorker {
 
     @Override
     public void run() {
+      datanode.incrementXmitsInProgress();
       try {
         // Store the indices of successfully read source
         // This will be updated after doing real read.
@@ -397,8 +398,9 @@ public final class ErasureCodingWorker {
         // Currently we don't check the acks for packets, this is similar as
         // block replication.
       } catch (Throwable e) {
-        LOG.warn("Failed to recover striped block: " + blockGroup);
+        LOG.warn("Failed to recover striped block: " + blockGroup, e);
       } finally {
+        datanode.decrementXmitsInProgress();
         // close block readers
         for (StripedReader stripedReader : stripedReaders) {
           closeBlockReader(stripedReader.blockReader);