You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2021/04/13 08:09:03 UTC
[hadoop] branch branch-3.2 updated: HDFS-15815. if required
storageType are unavailable,
log the failed reason during choosing Datanode. Contributed by Yang Yun.
(#2882)
This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.2 by this push:
new e6df0fb HDFS-15815. if required storageType are unavailable, log the failed reason during choosing Datanode. Contributed by Yang Yun. (#2882)
e6df0fb is described below
commit e6df0fb84cacc7242b0cce0c3343b95a99c15357
Author: Wei-Chiu Chuang <we...@apache.org>
AuthorDate: Mon Apr 12 23:55:32 2021 -0700
HDFS-15815. if required storageType are unavailable, log the failed reason during choosing Datanode. Contributed by Yang Yun. (#2882)
(cherry picked from commit e391844e8e414abf8c94f7bd4719053efa3b538a)
Co-authored-by: Ayush Saxena <ay...@apache.org>
(cherry picked from commit bfba6f1f3cd1966ff59655feb7e19503e06eeeae)
---
.../blockmanagement/BlockPlacementPolicyDefault.java | 8 ++++++--
.../server/blockmanagement/TestReplicationPolicy.java | 18 +++++++++++++++++-
2 files changed, 23 insertions(+), 3 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 66c60be..b59f0ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -76,7 +76,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
NODE_STALE("the node is stale"),
NODE_TOO_BUSY("the node is too busy"),
TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
- NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block");
+ NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block"),
+ NO_REQUIRED_STORAGE_TYPE("required storage types are unavailable");
private final String text;
@@ -802,6 +803,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
includeType = type;
break;
}
+ logNodeIsNotChosen(null,
+ NodeNotChosenReason.NO_REQUIRED_STORAGE_TYPE,
+ " for storage type " + type);
}
} else {
chosenNode = chooseDataNode(scope, excludedNodes);
@@ -938,7 +942,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
if (LOG.isDebugEnabled()) {
// build the error message for later use.
debugLoggingBuilder.get()
- .append("\n Datanode ").append(node)
+ .append("\n Datanode ").append((node==null)?"None":node)
.append(" is not chosen since ").append(reason.getText());
if (reasonDetails != null) {
debugLoggingBuilder.get().append(" ").append(reasonDetails);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 0980c2f..7f68142 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@@ -1619,4 +1620,19 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
assertTrue(bppd.excludeNodeByLoad(node));
}
-}
+
+ @Test
+ public void testChosenFailureForStorageType() {
+ final LogVerificationAppender appender = new LogVerificationAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
+
+ DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 1,
+ dataNodes[0], new ArrayList<DatanodeStorageInfo>(), false, null,
+ BLOCK_SIZE, TestBlockStoragePolicy.POLICY_SUITE.getPolicy(
+ HdfsConstants.COLD_STORAGE_POLICY_ID), null);
+ assertEquals(0, targets.length);
+ assertNotEquals(0,
+ appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE"));
+ }
+}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org