You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2022/04/14 08:17:30 UTC
[hadoop] branch branch-3.3 updated: HDFS-16509. Fix decommission UnsupportedOperationException (#4077). Contributed by daimin.
This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.3 by this push:
new 0ef1a13f019 HDFS-16509. Fix decommission UnsupportedOperationException (#4077). Contributed by daimin.
0ef1a13f019 is described below
commit 0ef1a13f019c22a99300e6a683b0d1af5193663c
Author: daimin <da...@outlook.com>
AuthorDate: Thu Apr 14 11:07:06 2022 +0800
HDFS-16509. Fix decommission UnsupportedOperationException (#4077). Contributed by daimin.
(cherry picked from commit c65c383b7ebef48c638607f15ba35d61554982cb)
---
.../blockmanagement/DatanodeAdminDefaultMonitor.java | 6 ++++--
.../java/org/apache/hadoop/hdfs/TestDecommission.java | 17 +++++++++++++++++
2 files changed, 21 insertions(+), 2 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java
index 2da3de07147..3f7be83496c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java
@@ -390,8 +390,10 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase
// Remove the block from the list if it's no longer in the block map,
// e.g. the containing file has been deleted
if (blockManager.blocksMap.getStoredBlock(block) == null) {
- LOG.trace("Removing unknown block {}", block);
- it.remove();
+ if (pruneReliableBlocks) {
+ LOG.trace("Removing unknown block {}", block);
+ it.remove();
+ }
continue;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 9592a23c510..89e5cabc880 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
@@ -65,6 +66,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -672,6 +674,21 @@ public class TestDecommission extends AdminStatesBaseTest {
fdos.close();
}
+ @Test(timeout = 20000)
+ public void testDecommissionWithUnknownBlock() throws IOException {
+ startCluster(1, 3);
+
+ FSNamesystem ns = getCluster().getNamesystem(0);
+ DatanodeManager datanodeManager = ns.getBlockManager().getDatanodeManager();
+
+ BlockInfo blk = new BlockInfoContiguous(new Block(1L), (short) 1);
+ DatanodeDescriptor dn = datanodeManager.getDatanodes().iterator().next();
+ dn.getStorageInfos()[0].addBlock(blk, blk);
+
+ datanodeManager.getDatanodeAdminManager().startDecommission(dn);
+ waitNodeState(dn, DatanodeInfo.AdminStates.DECOMMISSIONED);
+ }
+
private static String scanIntoString(final ByteArrayOutputStream baos) {
final TextStringBuilder sb = new TextStringBuilder();
final Scanner scanner = new Scanner(baos.toString());
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org