You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by li...@apache.org on 2021/03/20 03:38:10 UTC
[hadoop] branch trunk updated: HDFS-15904 : De-flake
TestBalancer#testBalancerWithSortTopNodes() (#2785)
This is an automated email from the ASF dual-hosted git repository.
liuml07 pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 261191c HDFS-15904 : De-flake TestBalancer#testBalancerWithSortTopNodes() (#2785)
261191c is described below
commit 261191cbc06cf28e656085e7e6633e80fc1f17a9
Author: Viraj Jasani <vj...@apache.org>
AuthorDate: Sat Mar 20 09:07:44 2021 +0530
HDFS-15904 : De-flake TestBalancer#testBalancerWithSortTopNodes() (#2785)
Contributed by Viraj Jasani.
Signed-off-by: Mingliang Liu <li...@apache.org>
Signed-off-by: Ayush Saxena <ay...@apache.org>
---
.../hadoop/hdfs/server/balancer/Balancer.java | 24 +++++++++++++++++----
.../hadoop/hdfs/server/balancer/Dispatcher.java | 7 +-----
.../hadoop/hdfs/server/balancer/TestBalancer.java | 25 ++++++++++++++++------
3 files changed, 40 insertions(+), 16 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 6734c97..0024ba5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -634,10 +634,10 @@ public class Balancer {
}
static class Result {
- final ExitStatus exitStatus;
- final long bytesLeftToMove;
- final long bytesBeingMoved;
- final long bytesAlreadyMoved;
+ private final ExitStatus exitStatus;
+ private final long bytesLeftToMove;
+ private final long bytesBeingMoved;
+ private final long bytesAlreadyMoved;
Result(ExitStatus exitStatus, long bytesLeftToMove, long bytesBeingMoved,
long bytesAlreadyMoved) {
@@ -647,6 +647,22 @@ public class Balancer {
this.bytesAlreadyMoved = bytesAlreadyMoved;
}
+ public ExitStatus getExitStatus() {
+ return exitStatus;
+ }
+
+ public long getBytesLeftToMove() {
+ return bytesLeftToMove;
+ }
+
+ public long getBytesBeingMoved() {
+ return bytesBeingMoved;
+ }
+
+ public long getBytesAlreadyMoved() {
+ return bytesAlreadyMoved;
+ }
+
void print(int iteration, NameNodeConnector nnc, PrintStream out) {
out.printf("%-24s %10d %19s %18s %17s %s%n",
DateFormat.getDateTimeInstance().format(new Date()), iteration,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
index c34e6a3..17f0d8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -1158,12 +1158,7 @@ public class Dispatcher {
p.proxySource.removePendingBlock(p);
return;
}
- moveExecutor.execute(new Runnable() {
- @Override
- public void run() {
- p.dispatch();
- }
- });
+ moveExecutor.execute(p::dispatch);
}
public boolean dispatchAndCheckContinue() throws InterruptedException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index b94cebc..343faf6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -1024,14 +1024,14 @@ public class TestBalancer {
// clean all lists
b.resetData(conf);
- if (r.exitStatus == ExitStatus.IN_PROGRESS) {
+ if (r.getExitStatus() == ExitStatus.IN_PROGRESS) {
done = false;
- } else if (r.exitStatus != ExitStatus.SUCCESS) {
+ } else if (r.getExitStatus() != ExitStatus.SUCCESS) {
//must be an error statue, return.
- return r.exitStatus.getExitCode();
+ return r.getExitStatus().getExitCode();
} else {
if (iteration > 0) {
- assertTrue(r.bytesAlreadyMoved > 0);
+ assertTrue(r.getBytesAlreadyMoved() > 0);
}
}
}
@@ -1657,7 +1657,7 @@ public class TestBalancer {
// When a block move is not canceled in 2 seconds properly and then
// a block is moved unexpectedly, IN_PROGRESS will be reported.
assertEquals("We expect ExitStatus.NO_MOVE_PROGRESS to be reported.",
- ExitStatus.NO_MOVE_PROGRESS, r.exitStatus);
+ ExitStatus.NO_MOVE_PROGRESS, r.getExitStatus());
}
} finally {
for (NameNodeConnector nnc : connectors) {
@@ -2297,7 +2297,20 @@ public class TestBalancer {
maxUsage = Math.max(maxUsage, datanodeReport[i].getDfsUsed());
}
- assertEquals(200, balancerResult.bytesAlreadyMoved);
+ // The 95% usage DN will have 9 blocks of 100B and 1 block of 50B - all for the same file.
+ // The HDFS balancer will choose a block to move from this node randomly. More likely it will
+ // be 100B block. Since 100B is greater than DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY which is 99L,
+ // it will stop here. Total bytes moved from this 95% DN will be 1 block of size 100B.
+ // However, chances are the first block selected to be moved from this 95% DN is the 50B block.
+ // After this block is moved, the total moved size so far would be 50B which is smaller than
+ // DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY (99L), hence it will try to move another block.
+ // The second block will always be of size 100B. So total bytes moved from this 95% DN will be
+ // 2 blocks of size (100B + 50B) 150B.
+ // Hence, overall total blocks moved by HDFS balancer would be either of these 2 options:
+ // a) 2 blocks of total size (100B + 100B)
+ // b) 3 blocks of total size (50B + 100B + 100B)
+ assertTrue(balancerResult.getBytesAlreadyMoved() == 200
+ || balancerResult.getBytesAlreadyMoved() == 250);
// 100% and 95% used nodes will be balanced, so top used will be 900
assertEquals(900, maxUsage);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org