You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ay...@apache.org on 2020/09/18 12:01:53 UTC
[hadoop] branch trunk updated: HDFS-15438. Setting
dfs.disk.balancer.max.disk.errors = 0 will fail the block copy. Contributed
by AMC-team.
This is an automated email from the ASF dual-hosted git repository.
ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 2377df3 HDFS-15438. Setting dfs.disk.balancer.max.disk.errors = 0 will fail the block copy. Contributed by AMC-team.
2377df3 is described below
commit 2377df38ad2396f8b42d60031001a25d9d6fa32e
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Fri Sep 18 17:28:11 2020 +0530
HDFS-15438. Setting dfs.disk.balancer.max.disk.errors = 0 will fail the block copy. Contributed by AMC-team.
---
.../java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java | 4 ++--
.../org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index ac10e8f..d180f0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -902,7 +902,7 @@ public class DiskBalancer {
*/
private ExtendedBlock getBlockToCopy(FsVolumeSpi.BlockIterator iter,
DiskBalancerWorkItem item) {
- while (!iter.atEnd() && item.getErrorCount() < getMaxError(item)) {
+ while (!iter.atEnd() && item.getErrorCount() <= getMaxError(item)) {
try {
ExtendedBlock block = iter.nextBlock();
if(null == block){
@@ -923,7 +923,7 @@ public class DiskBalancer {
item.incErrorCount();
}
}
- if (item.getErrorCount() >= getMaxError(item)) {
+ if (item.getErrorCount() > getMaxError(item)) {
item.setErrMsg("Error count exceeded.");
LOG.info("Maximum error count exceeded. Error count: {} Max error:{} ",
item.getErrorCount(), item.getMaxDiskErrors());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index 6282024..3a17450 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -757,7 +757,7 @@ public class TestDiskBalancer {
}
}, 1000, 100000);
- assertTrue("Disk balancer operation hit max errors!", errorCount.get() <
+ assertTrue("Disk balancer operation hit max errors!", errorCount.get() <=
DFSConfigKeys.DFS_DISK_BALANCER_MAX_DISK_ERRORS_DEFAULT);
createWorkPlanLatch.await();
removeDiskLatch.await();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org