You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2018/05/07 23:24:48 UTC
hadoop git commit: HDFS-13486. Backport HDFS-11817 (A faulty node can
cause a lease leak and NPE on accessing data) to branch-2.7. Contributed by
Kihwal Lee.
Repository: hadoop
Updated Branches:
refs/heads/branch-2.7 20c6b448a -> e8777342d
HDFS-13486. Backport HDFS-11817 (A faulty node can cause a lease leak and NPE on accessing data) to branch-2.7. Contributed by Kihwal Lee.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8777342
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8777342
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8777342
Branch: refs/heads/branch-2.7
Commit: e8777342d3d916276a06bbd8cb2257159402bd58
Parents: 20c6b44
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Mon May 7 16:22:34 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Mon May 7 16:23:41 2018 -0700
----------------------------------------------------------------------
.../BlockInfoContiguousUnderConstruction.java | 66 +++++++++++++-------
.../server/blockmanagement/DatanodeManager.java | 3 +-
.../hdfs/server/namenode/FSNamesystem.java | 5 +-
.../hdfs/server/namenode/LeaseManager.java | 18 +++++-
.../TestBlockInfoUnderConstruction.java | 8 +--
.../namenode/TestBlockUnderConstruction.java | 42 +++++++++++++
.../TestCommitBlockSynchronization.java | 2 +-
7 files changed, 111 insertions(+), 33 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8777342/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
index e98ab7e..6b9b7c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
@@ -190,12 +190,23 @@ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
/** Set expected locations */
public void setExpectedLocations(DatanodeStorageInfo[] targets) {
- int numLocations = targets == null ? 0 : targets.length;
+ if (targets == null) {
+ return;
+ }
+ int numLocations = 0;
+ for (DatanodeStorageInfo target : targets) {
+ if (target != null) {
+ numLocations++;
+ }
+ }
+
this.replicas = new ArrayList<ReplicaUnderConstruction>(numLocations);
- for(int i = 0; i < numLocations; i++)
+ for(int i = 0; i < targets.length; i++) {
+ // Only store non-null DatanodeStorageInfo.
if (targets[i] != null) {
replicas.add(new ReplicaUnderConstruction(this, targets[i], ReplicaState.RBW));
}
+ }
}
/**
@@ -286,10 +297,15 @@ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
* Initialize lease recovery for this block.
* Find the first alive data-node starting from the previous primary and
* make it primary.
+ * @param recoveryId Recovery ID (new gen stamp)
+ * @param startRecovery Issue recovery command to datanode if true.
*/
- public void initializeBlockRecovery(long recoveryId) {
+ public void initializeBlockRecovery(long recoveryId, boolean startRecovery) {
setBlockUCState(BlockUCState.UNDER_RECOVERY);
blockRecoveryId = recoveryId;
+ if (!startRecovery) {
+ return;
+ }
if (replicas.size() == 0) {
NameNode.blockStateChangeLog.warn("BLOCK*"
+ " BlockInfoUnderConstruction.initLeaseRecovery:"
@@ -337,27 +353,33 @@ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
void addReplicaIfNotPresent(DatanodeStorageInfo storage,
Block block,
ReplicaState rState) {
- Iterator<ReplicaUnderConstruction> it = replicas.iterator();
- while (it.hasNext()) {
- ReplicaUnderConstruction r = it.next();
- DatanodeStorageInfo expectedLocation = r.getExpectedStorageLocation();
- if(expectedLocation == storage) {
- // Record the gen stamp from the report
- r.setGenerationStamp(block.getGenerationStamp());
- return;
- } else if (expectedLocation != null &&
- expectedLocation.getDatanodeDescriptor() ==
- storage.getDatanodeDescriptor()) {
-
- // The Datanode reported that the block is on a different storage
- // than the one chosen by BlockPlacementPolicy. This can occur as
- // we allow Datanodes to choose the target storage. Update our
- // state by removing the stale entry and adding a new one.
- it.remove();
- break;
+ if (replicas == null) {
+ replicas = new ArrayList<ReplicaUnderConstruction>(1);
+ replicas.add(new ReplicaUnderConstruction(block, storage,
+ rState));
+ } else {
+ Iterator<ReplicaUnderConstruction> it = replicas.iterator();
+ while (it.hasNext()) {
+ ReplicaUnderConstruction r = it.next();
+ DatanodeStorageInfo expectedLocation = r.getExpectedStorageLocation();
+ if (expectedLocation == storage) {
+ // Record the gen stamp from the report
+ r.setGenerationStamp(block.getGenerationStamp());
+ return;
+ } else if (expectedLocation != null
+ && expectedLocation.getDatanodeDescriptor() ==
+ storage.getDatanodeDescriptor()) {
+
+ // The Datanode reported that the block is on a different storage
+ // than the one chosen by BlockPlacementPolicy. This can occur as
+ // we allow Datanodes to choose the target storage. Update our
+ // state by removing the stale entry and adding a new one.
+ it.remove();
+ break;
+ }
}
+ replicas.add(new ReplicaUnderConstruction(block, storage, rState));
}
- replicas.add(new ReplicaUnderConstruction(block, storage, rState));
}
@Override // BlockInfo
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8777342/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 5d82186..e449585 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -505,10 +505,11 @@ public class DatanodeManager {
DatanodeID[] datanodeID, String[] storageIDs,
String format, Object... args) throws UnregisteredNodeException {
if (datanodeID.length != storageIDs.length) {
+ // Error for pre-2.0.0-alpha clients.
final String err = (storageIDs.length == 0?
"Missing storageIDs: It is likely that the HDFS client,"
+ " who made this call, is running in an older version of Hadoop"
- + " which does not support storageIDs."
+ + "(pre-2.0.0-alpha) which does not support storageIDs."
: "Length mismatched: storageIDs.length=" + storageIDs.length + " != "
) + " datanodeID.length=" + datanodeID.length;
throw new HadoopIllegalArgumentException(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8777342/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b458900..f3d9484 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2205,7 +2205,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
truncatedBlockUC.getTruncateBlock().getNumBytes(), truncatedBlockUC);
}
if (shouldRecoverNow) {
- truncatedBlockUC.initializeBlockRecovery(newBlock.getGenerationStamp());
+ truncatedBlockUC.initializeBlockRecovery(newBlock.getGenerationStamp(),
+ true);
}
return newBlock;
@@ -4105,7 +4106,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} else if(truncateRecovery) {
recoveryBlock.setGenerationStamp(blockRecoveryId);
}
- uc.initializeBlockRecovery(blockRecoveryId);
+ uc.initializeBlockRecovery(blockRecoveryId, true);
leaseManager.renewLease(lease);
// Cannot close file right now, since the last block requires recovery.
// This may potentially cause infinite loop in lease recovery
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8777342/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index fd7c42e..5270d0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -491,8 +491,20 @@ public class LeaseManager {
try {
INodesInPath iip = fsnamesystem.getFSDirectory().getINodesInPath(p,
true);
- boolean completed = fsnamesystem.internalReleaseLease(leaseToCheck, p,
- iip, HdfsServerConstants.NAMENODE_LEASE_HOLDER);
+ // Sanity check to make sure the path is correct
+ if (!p.startsWith("/")) {
+ throw new IOException("Invalid path in the lease " + p);
+ }
+ boolean completed = false;
+ try {
+ completed = fsnamesystem.internalReleaseLease(
+ leaseToCheck, p, iip,
+ HdfsServerConstants.NAMENODE_LEASE_HOLDER);
+ } catch (IOException e) {
+ LOG.warn("Cannot release the path " + p + " in the lease "
+ + leaseToCheck + ". It will be retried.", e);
+ continue;
+ }
if (LOG.isDebugEnabled()) {
if (completed) {
LOG.debug("Lease recovery for " + p + " is complete. File closed.");
@@ -505,7 +517,7 @@ public class LeaseManager {
needSync = true;
}
} catch (IOException e) {
- LOG.error("Cannot release the path " + p + " in the lease "
+ LOG.warn("Removing lease with an invalid path: " + p + ","
+ leaseToCheck, e);
removing.add(p);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8777342/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
index a7ba293..c72a356 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
@@ -50,7 +50,7 @@ public class TestBlockInfoUnderConstruction {
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000);
- blockInfo.initializeBlockRecovery(1);
+ blockInfo.initializeBlockRecovery(1, true);
BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
@@ -58,7 +58,7 @@ public class TestBlockInfoUnderConstruction {
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
- blockInfo.initializeBlockRecovery(2);
+ blockInfo.initializeBlockRecovery(2, true);
blockInfoRecovery = dd1.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
@@ -66,7 +66,7 @@ public class TestBlockInfoUnderConstruction {
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
- blockInfo.initializeBlockRecovery(3);
+ blockInfo.initializeBlockRecovery(3, true);
blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
@@ -75,7 +75,7 @@ public class TestBlockInfoUnderConstruction {
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
- blockInfo.initializeBlockRecovery(3);
+ blockInfo.initializeBlockRecovery(3, true);
blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0], blockInfo);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8777342/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
index 1fbe160..18215d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.junit.AfterClass;
@@ -181,4 +182,45 @@ public class TestBlockUnderConstruction {
// close file
out.close();
}
+
+ /**
+ * A storage ID can be invalid if the storage failed or the node
+ * reregisters. When the node heart-beats, the storage report in it
+ * causes storage volumes to be added back. An invalid storage ID
+ * should not cause an NPE.
+ */
+ @Test
+ public void testEmptyExpectedLocations() throws Exception {
+ final NamenodeProtocols namenode = cluster.getNameNodeRpc();
+ final FSNamesystem fsn = cluster.getNamesystem();
+ final BlockManager bm = fsn.getBlockManager();
+ final Path p = new Path(BASE_DIR, "file2.dat");
+ final String src = p.toString();
+ final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 1);
+ writeFile(p, out, 256);
+ out.hflush();
+
+ // make sure the block is readable
+ LocatedBlocks lbs = namenode.getBlockLocations(src, 0, 256);
+ LocatedBlock lastLB = lbs.getLocatedBlocks().get(0);
+ final Block b = lastLB.getBlock().getLocalBlock();
+
+ // fake a block recovery
+ long blockRecoveryId = fsn.getBlockIdManager().nextGenerationStamp(false);
+ BlockInfoContiguousUnderConstruction uc = bm.getStoredBlock(b).
+ convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
+ uc.initializeBlockRecovery(blockRecoveryId, false);
+
+ try {
+ String[] storages = { "invalid-storage-id1" };
+ fsn.commitBlockSynchronization(lastLB.getBlock(), blockRecoveryId, 256L,
+ true, false, lastLB.getLocations(), storages);
+ } catch (java.lang.IllegalStateException ise) {
+ // Although a failure is expected as of now, future commit policy
+ // changes may make it not fail. This is not critical to the test.
+ }
+
+ // Invalid storage should not trigger an exception.
+ lbs = namenode.getBlockLocations(src, 0, 256);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8777342/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
index c29383a..aa8ed31 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
@@ -68,7 +68,7 @@ public class TestCommitBlockSynchronization {
block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
blockInfo.setBlockCollection(file);
blockInfo.setGenerationStamp(genStamp);
- blockInfo.initializeBlockRecovery(genStamp);
+ blockInfo.initializeBlockRecovery(genStamp, true);
doReturn(true).when(file).removeLastBlock(any(Block.class));
doReturn(true).when(file).isUnderConstruction();
doReturn(new BlockInfoContiguous[1]).when(file).getBlocks();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org