You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2017/07/31 18:36:04 UTC
[2/2] hadoop git commit: HDFS-12082. BlockInvalidateLimit value is
incorrectly set after namenode heartbeat interval reconfigured. Contributed
by Weiwei Yang.
HDFS-12082. BlockInvalidateLimit value is incorrectly set after namenode heartbeat interval reconfigured. Contributed by Weiwei Yang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c635809
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c635809
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c635809
Branch: refs/heads/branch-2
Commit: 8c6358098147bb8a9a8b21cbee393662d7bec522
Parents: b19415f
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Jul 31 11:33:55 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Jul 31 11:34:00 2017 -0700
----------------------------------------------------------------------
.../server/blockmanagement/DatanodeManager.java | 20 +++++++----
.../namenode/TestNameNodeReconfigure.java | 36 ++++++++++++++++++++
2 files changed, 50 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c635809/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index bd9f420..7e82938 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -269,12 +269,19 @@ public class DatanodeManager {
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
+ 10 * 1000 * heartbeatIntervalSeconds;
- final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds),
+
+ // Effected block invalidate limit is the bigger value between
+ // value configured in hdfs-site.xml, and 20 * HB interval.
+ final int configuredBlockInvalidateLimit = conf.getInt(
+ DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY,
DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
- this.blockInvalidateLimit = conf.getInt(
- DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
+ final int countedBlockInvalidateLimit = 20*(int)(heartbeatIntervalSeconds);
+ this.blockInvalidateLimit = Math.max(countedBlockInvalidateLimit,
+ configuredBlockInvalidateLimit);
LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
- + "=" + this.blockInvalidateLimit);
+ + ": configured=" + configuredBlockInvalidateLimit
+ + ", counted=" + countedBlockInvalidateLimit
+ + ", effected=" + blockInvalidateLimit);
this.checkIpHostnameInRegistration = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
@@ -382,7 +389,8 @@ public class DatanodeManager {
return fsClusterStats;
}
- int getBlockInvalidateLimit() {
+ @VisibleForTesting
+ public int getBlockInvalidateLimit() {
return blockInvalidateLimit;
}
@@ -1812,7 +1820,7 @@ public class DatanodeManager {
this.heartbeatExpireInterval = 2L * recheckInterval + 10 * 1000
* intervalSeconds;
this.blockInvalidateLimit = Math.max(20 * (int) (intervalSeconds),
- DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
+ blockInvalidateLimit);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c635809/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index 7b4061a..5793841 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -40,6 +40,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
public class TestNameNodeReconfigure {
@@ -48,10 +49,13 @@ public class TestNameNodeReconfigure {
.getLog(TestNameNodeReconfigure.class);
private MiniDFSCluster cluster;
+ private final int customizedBlockInvalidateLimit = 500;
@Before
public void setUp() throws IOException {
Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFS_BLOCK_INVALIDATE_LIMIT_KEY,
+ customizedBlockInvalidateLimit);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
}
@@ -201,6 +205,38 @@ public class TestNameNodeReconfigure {
datanodeManager.getHeartbeatRecheckInterval());
}
+ @Test
+ public void testBlockInvalidateLimitAfterReconfigured()
+ throws ReconfigurationException {
+ final NameNode nameNode = cluster.getNameNode();
+ final DatanodeManager datanodeManager = nameNode.namesystem
+ .getBlockManager().getDatanodeManager();
+
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY + " is not correctly set",
+ customizedBlockInvalidateLimit,
+ datanodeManager.getBlockInvalidateLimit());
+
+ nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY,
+ Integer.toString(6));
+
+ // 20 * 6 = 120 < 500
+ // Invalid block limit should stay same as before after reconfiguration.
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY
+ + " is not honored after reconfiguration",
+ customizedBlockInvalidateLimit,
+ datanodeManager.getBlockInvalidateLimit());
+
+ nameNode.reconfigureProperty(DFS_HEARTBEAT_INTERVAL_KEY,
+ Integer.toString(50));
+
+ // 20 * 50 = 1000 > 500
+ // Invalid block limit should be reset to 1000
+ assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY
+ + " is not reconfigured correctly",
+ 1000,
+ datanodeManager.getBlockInvalidateLimit());
+ }
+
@After
public void shutDown() throws IOException {
if (cluster != null) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org