You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ay...@apache.org on 2020/05/03 13:55:57 UTC

[hadoop] branch branch-3.3 updated: Erasure Coding: metrics xmitsInProgress become to negative. Contributed by maobaolong and Toshihiko Uchida.

This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
     new df16146  Erasure Coding: metrics xmitsInProgress become to negative. Contributed by maobaolong and Toshihiko Uchida.
df16146 is described below

commit df16146e7f181d8da991fc195efa0f1983612120
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Sun May 3 19:09:14 2020 +0530

    Erasure Coding: metrics xmitsInProgress become to negative. Contributed by maobaolong and Toshihiko Uchida.
---
 .../hdfs/server/datanode/erasurecode/ErasureCodingWorker.java       | 4 ++++
 .../hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java | 6 +++++-
 .../hdfs/server/datanode/erasurecode/StripedReconstructor.java      | 4 ++++
 .../java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java     | 6 ++++++
 4 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index f9063b7..f4506cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -170,4 +170,8 @@ public final class ErasureCodingWorker {
     stripedReconstructionPool.shutdown();
     stripedReadPool.shutdown();
   }
+
+  public float getXmitWeight() {
+    return xmitWeight;
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
index 29c0078..1af2380 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
@@ -67,7 +67,11 @@ class StripedBlockReconstructor extends StripedReconstructor
       LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e);
       getDatanode().getMetrics().incrECFailedReconstructionTasks();
     } finally {
-      getDatanode().decrementXmitsInProgress(getXmits());
+      float xmitWeight = getErasureCodingWorker().getXmitWeight();
+      // if the xmits is smaller than 1, the xmitsSubmitted should be set to 1
+      // because if it set to zero, we cannot to measure the xmits submitted
+      int xmitsSubmitted = Math.max((int) (getXmits() * xmitWeight), 1);
+      getDatanode().decrementXmitsInProgress(xmitsSubmitted);
       final DataNodeMetrics metrics = getDatanode().getMetrics();
       metrics.incrECReconstructionTasks();
       metrics.incrECReconstructionBytesRead(getBytesRead());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
index a1f4c7f..4c8be82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
@@ -275,4 +275,8 @@ abstract class StripedReconstructor {
   DataNode getDatanode() {
     return datanode;
   }
+
+  public ErasureCodingWorker getErasureCodingWorker() {
+    return erasureCodingWorker;
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 2abfff7..b119e78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -514,6 +514,8 @@ public class TestReconstructStripedFile {
 
   @Test(timeout = 180000)
   public void testErasureCodingWorkerXmitsWeight() throws Exception {
+    testErasureCodingWorkerXmitsWeight(0.5f,
+        (int) (ecPolicy.getNumDataUnits() * 0.5f));
     testErasureCodingWorkerXmitsWeight(1f, ecPolicy.getNumDataUnits());
     testErasureCodingWorkerXmitsWeight(0f, 1);
     testErasureCodingWorkerXmitsWeight(10f, 10 * ecPolicy.getNumDataUnits());
@@ -567,6 +569,10 @@ public class TestReconstructStripedFile {
     } finally {
       barrier.await();
       DataNodeFaultInjector.set(oldInjector);
+      for (final DataNode curDn : cluster.getDataNodes()) {
+        GenericTestUtils.waitFor(() -> curDn.getXceiverCount() <= 1, 10, 60000);
+        assertEquals(0, curDn.getXmitsInProgress());
+      }
     }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org