You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2022/03/16 01:32:50 UTC
[hadoop] branch trunk updated: HDFS-16502. Reconfigure Block Invalidate limit (#4064)
This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 1c0bc35 HDFS-16502. Reconfigure Block Invalidate limit (#4064)
1c0bc35 is described below
commit 1c0bc35305aea6ab8037241fab10862615f3e296
Author: Viraj Jasani <vj...@apache.org>
AuthorDate: Wed Mar 16 07:02:29 2022 +0530
HDFS-16502. Reconfigure Block Invalidate limit (#4064)
Signed-off-by: Wei-Chiu Chuang <we...@apache.org>
---
.../server/blockmanagement/DatanodeManager.java | 33 ++++++++++++++--------
.../hadoop/hdfs/server/namenode/NameNode.java | 27 +++++++++++++++++-
.../server/namenode/TestNameNodeReconfigure.java | 31 ++++++++++++++++++++
.../org/apache/hadoop/hdfs/tools/TestDFSAdmin.java | 21 ++++++++------
4 files changed, 91 insertions(+), 21 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index cfb1d83..cb601e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -314,18 +314,12 @@ public class DatanodeManager {
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
+ 10 * 1000 * heartbeatIntervalSeconds;
- // Effected block invalidate limit is the bigger value between
- // value configured in hdfs-site.xml, and 20 * HB interval.
final int configuredBlockInvalidateLimit = conf.getInt(
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY,
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
- final int countedBlockInvalidateLimit = 20*(int)(heartbeatIntervalSeconds);
- this.blockInvalidateLimit = Math.max(countedBlockInvalidateLimit,
- configuredBlockInvalidateLimit);
- LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
- + ": configured=" + configuredBlockInvalidateLimit
- + ", counted=" + countedBlockInvalidateLimit
- + ", effected=" + blockInvalidateLimit);
+ // Block invalidate limit also has some dependency on heartbeat interval.
+ // Check setBlockInvalidateLimit().
+ setBlockInvalidateLimit(configuredBlockInvalidateLimit);
this.checkIpHostnameInRegistration = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT);
@@ -2088,8 +2082,25 @@ public class DatanodeManager {
this.heartbeatRecheckInterval = recheckInterval;
this.heartbeatExpireInterval = 2L * recheckInterval + 10 * 1000
* intervalSeconds;
- this.blockInvalidateLimit = Math.max(20 * (int) (intervalSeconds),
- blockInvalidateLimit);
+ this.blockInvalidateLimit = getBlockInvalidateLimit(blockInvalidateLimit);
+ }
+
+ private int getBlockInvalidateLimitFromHBInterval() {
+ return 20 * (int) heartbeatIntervalSeconds;
+ }
+
+ private int getBlockInvalidateLimit(int configuredBlockInvalidateLimit) {
+ return Math.max(getBlockInvalidateLimitFromHBInterval(), configuredBlockInvalidateLimit);
+ }
+
+ public void setBlockInvalidateLimit(int configuredBlockInvalidateLimit) {
+ final int countedBlockInvalidateLimit = getBlockInvalidateLimitFromHBInterval();
+ // Effected block invalidate limit is the bigger value between
+ // value configured in hdfs-site.xml, and 20 * HB interval.
+ this.blockInvalidateLimit = getBlockInvalidateLimit(configuredBlockInvalidateLimit);
+ LOG.info("{} : configured={}, counted={}, effected={}",
+ DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, configuredBlockInvalidateLimit,
+ countedBlockInvalidateLimit, this.blockInvalidateLimit);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 8cd5d25..ef0eef8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -121,6 +121,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT;
@@ -337,7 +338,8 @@ public class NameNode extends ReconfigurableBase implements
DFS_IMAGE_PARALLEL_LOAD_KEY,
DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY,
DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY,
- DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY));
+ DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY,
+ DFS_BLOCK_INVALIDATE_LIMIT_KEY));
private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
@@ -2210,6 +2212,8 @@ public class NameNode extends ReconfigurableBase implements
|| (property.equals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY))
|| (property.equals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY))) {
return reconfigureSlowNodesParameters(datanodeManager, property, newVal);
+ } else if (property.equals(DFS_BLOCK_INVALIDATE_LIMIT_KEY)) {
+ return reconfigureBlockInvalidateLimit(datanodeManager, property, newVal);
} else {
throw new ReconfigurationException(property, newVal, getConf().get(
property));
@@ -2434,6 +2438,27 @@ public class NameNode extends ReconfigurableBase implements
}
}
+ private String reconfigureBlockInvalidateLimit(final DatanodeManager datanodeManager,
+ final String property, final String newVal) throws ReconfigurationException {
+ namesystem.writeLock();
+ try {
+ if (newVal == null) {
+ datanodeManager.setBlockInvalidateLimit(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
+ } else {
+ datanodeManager.setBlockInvalidateLimit(Integer.parseInt(newVal));
+ }
+ final String updatedBlockInvalidateLimit =
+ String.valueOf(datanodeManager.getBlockInvalidateLimit());
+ LOG.info("RECONFIGURE* changed blockInvalidateLimit to {}", updatedBlockInvalidateLimit);
+ return updatedBlockInvalidateLimit;
+ } catch (NumberFormatException e) {
+ throw new ReconfigurationException(property, newVal, getConf().get(property), e);
+ } finally {
+ namesystem.writeUnlock();
+ }
+ }
+
+
@Override // ReconfigurableBase
protected Configuration getNewConf() {
return new HdfsConfiguration();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index 33debdb..2f06918 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -453,6 +453,37 @@ public class TestNameNodeReconfigure {
assertEquals(10, datanodeManager.getMaxSlowpeerCollectNodes());
}
+ @Test
+ public void testBlockInvalidateLimit() throws ReconfigurationException {
+ final NameNode nameNode = cluster.getNameNode();
+ final DatanodeManager datanodeManager = nameNode.namesystem
+ .getBlockManager().getDatanodeManager();
+
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set",
+ customizedBlockInvalidateLimit, datanodeManager.getBlockInvalidateLimit());
+
+ try {
+ nameNode.reconfigureProperty(DFS_BLOCK_INVALIDATE_LIMIT_KEY, "non-numeric");
+ fail("Should not reach here");
+ } catch (ReconfigurationException e) {
+ assertEquals(
+ "Could not change property dfs.block.invalidate.limit from '500' to 'non-numeric'",
+ e.getMessage());
+ }
+
+ nameNode.reconfigureProperty(DFS_BLOCK_INVALIDATE_LIMIT_KEY, "2500");
+
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not honored after reconfiguration", 2500,
+ datanodeManager.getBlockInvalidateLimit());
+
+ nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY, "500");
+
+ // 20 * 500 (10000) > 2500
+ // Hence, invalid block limit should be reset to 10000
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not reconfigured correctly", 10000,
+ datanodeManager.getBlockInvalidateLimit());
+ }
+
@After
public void shutDown() throws IOException {
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 7859876..0f8c4cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -31,6 +31,7 @@ import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
@@ -433,15 +434,17 @@ public class TestDFSAdmin {
final List<String> outs = Lists.newArrayList();
final List<String> errs = Lists.newArrayList();
getReconfigurableProperties("namenode", address, outs, errs);
- assertEquals(16, outs.size());
- assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(1));
- assertEquals(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, outs.get(2));
- assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(3));
- assertEquals(DFS_IMAGE_PARALLEL_LOAD_KEY, outs.get(4));
- assertEquals(DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY, outs.get(5));
- assertEquals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY, outs.get(6));
- assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(7));
- assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(8));
+ assertEquals(17, outs.size());
+ assertTrue(outs.get(0).contains("Reconfigurable properties:"));
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
+ assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
+ assertEquals(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, outs.get(3));
+ assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(4));
+ assertEquals(DFS_IMAGE_PARALLEL_LOAD_KEY, outs.get(5));
+ assertEquals(DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY, outs.get(6));
+ assertEquals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY, outs.get(7));
+ assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(8));
+ assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(9));
assertEquals(errs.size(), 0);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org