You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by su...@apache.org on 2019/11/09 16:58:46 UTC
[hadoop] branch branch-3.2 updated: HDFS-14720. DataNode shouldn't
report block as bad block if the block length is Long.MAX_VALUE.
Contributed by hemanthboyina.
This is an automated email from the ASF dual-hosted git repository.
surendralilhore pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.2 by this push:
new c4e22e0 HDFS-14720. DataNode shouldn't report block as bad block if the block length is Long.MAX_VALUE. Contributed by hemanthboyina.
c4e22e0 is described below
commit c4e22e0c809e3b26dccaedd3698d65b576cd60ec
Author: Surendra Singh Lilhore <su...@apache.org>
AuthorDate: Sat Nov 9 22:14:35 2019 +0530
HDFS-14720. DataNode shouldn't report block as bad block if the block length is Long.MAX_VALUE. Contributed by hemanthboyina.
(cherry picked from commit 320008bb7cc558b1300398178bd2f48cbf0b6c80)
---
.../hdfs/server/blockmanagement/ReplicationWork.java | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
index f250bcb..8f81286 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.net.Node;
import java.util.List;
@@ -43,10 +44,16 @@ class ReplicationWork extends BlockReconstructionWork {
assert getSrcNodes().length > 0
: "At least 1 source node should have been selected";
try {
- DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
- getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
- getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
- storagePolicySuite.getPolicy(getStoragePolicyID()), null);
+ DatanodeStorageInfo[] chosenTargets = null;
+ // HDFS-14720 If the block is deleted, the block size will become
+ // BlockCommand.NO_ACK (LONG.MAX_VALUE) . This kind of block we don't need
+ // to send for replication or reconstruction
+ if (getBlock().getNumBytes() != BlockCommand.NO_ACK) {
+ chosenTargets = blockplacement.chooseTarget(getSrcPath(),
+ getAdditionalReplRequired(), getSrcNodes()[0],
+ getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
+ storagePolicySuite.getPolicy(getStoragePolicyID()), null);
+ }
setTargets(chosenTargets);
} finally {
getSrcNodes()[0].decrementPendingReplicationWithoutTargets();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org