You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/04/22 01:21:41 UTC
[20/29] hadoop git commit: HDFS-8133. Improve readability of deleted
block check (Daryn Sharp via Colin P. McCabe)
HDFS-8133. Improve readability of deleted block check (Daryn Sharp via Colin P. McCabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39e7929c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39e7929c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39e7929c
Branch: refs/heads/YARN-2928
Commit: 39e7929ceceef0c3fc3830e68fde64f88abd8f54
Parents: 81ec672
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue Apr 21 11:41:22 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Tue Apr 21 16:16:56 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../blockmanagement/BlockInfoContiguous.java | 4 ++++
.../server/blockmanagement/BlockManager.java | 19 ++++++++++---------
.../hdfs/server/blockmanagement/BlocksMap.java | 2 +-
.../hdfs/server/namenode/FSNamesystem.java | 5 ++---
.../server/blockmanagement/TestBlockInfo.java | 10 ++++++++++
6 files changed, 30 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39e7929c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e07e45d..e162d28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -452,6 +452,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. (wheat9)
+ HDFS-8133. Improve readability of deleted block check (Daryn Sharp via
+ Colin P. McCabe)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39e7929c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 48069c1..4314ab3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -86,6 +86,10 @@ public class BlockInfoContiguous extends Block
this.bc = bc;
}
+ public boolean isDeleted() {
+ return (bc == null);
+ }
+
public DatanodeDescriptor getDatanode(int index) {
DatanodeStorageInfo storage = getStorageInfo(index);
return storage == null ? null : storage.getDatanodeDescriptor();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39e7929c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 2a7b02a..1db1356 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1165,13 +1165,14 @@ public class BlockManager {
DatanodeStorageInfo storageInfo,
DatanodeDescriptor node) throws IOException {
- BlockCollection bc = b.corrupted.getBlockCollection();
- if (bc == null) {
+ if (b.corrupted.isDeleted()) {
blockLog.info("BLOCK markBlockAsCorrupt: {} cannot be marked as" +
" corrupt as it does not belong to any file", b);
addToInvalidates(b.corrupted, node);
return;
}
+ short expectedReplicas =
+ b.corrupted.getBlockCollection().getBlockReplication();
// Add replica to the data-node if it is not already there
if (storageInfo != null) {
@@ -1183,13 +1184,13 @@ public class BlockManager {
b.reasonCode);
NumberReplicas numberOfReplicas = countNodes(b.stored);
- boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >= bc
- .getBlockReplication();
+ boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=
+ expectedReplicas;
boolean minReplicationSatisfied =
numberOfReplicas.liveReplicas() >= minReplication;
boolean hasMoreCorruptReplicas = minReplicationSatisfied &&
(numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) >
- bc.getBlockReplication();
+ expectedReplicas;
boolean corruptedDuringWrite = minReplicationSatisfied &&
(b.stored.getGenerationStamp() > b.corrupted.getGenerationStamp());
// case 1: have enough number of live replicas
@@ -2509,7 +2510,7 @@ public class BlockManager {
} else {
storedBlock = block;
}
- if (storedBlock == null || storedBlock.getBlockCollection() == null) {
+ if (storedBlock == null || storedBlock.isDeleted()) {
// If this block does not belong to anyfile, then we are done.
blockLog.info("BLOCK* addStoredBlock: {} on {} size {} but it does not" +
" belong to any file", block, node, block.getNumBytes());
@@ -2794,8 +2795,7 @@ public class BlockManager {
* what happened with it.
*/
private MisReplicationResult processMisReplicatedBlock(BlockInfoContiguous block) {
- BlockCollection bc = block.getBlockCollection();
- if (bc == null) {
+ if (block.isDeleted()) {
// block does not belong to any file
addToInvalidates(block);
return MisReplicationResult.INVALID;
@@ -2806,7 +2806,8 @@ public class BlockManager {
return MisReplicationResult.UNDER_CONSTRUCTION;
}
// calculate current replication
- short expectedReplication = bc.getBlockReplication();
+ short expectedReplication =
+ block.getBlockCollection().getBlockReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
// add to under-replicated queue if need to be
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39e7929c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index 806a4cb..5e7d34f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
@@ -193,7 +193,7 @@ class BlocksMap {
boolean removed = node.removeBlock(info);
if (info.getDatanode(0) == null // no datanodes left
- && info.getBlockCollection() == null) { // does not belong to a file
+ && info.isDeleted()) { // does not belong to a file
blocks.remove(b); // remove block from the map
}
return removed;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39e7929c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f174a4e..e400644 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4260,13 +4260,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
// this method to add a CloseOp to the edit log for an already deleted
// file (See HDFS-6825).
//
- BlockCollection blockCollection = storedBlock.getBlockCollection();
- if (blockCollection == null) {
+ if (storedBlock.isDeleted()) {
throw new IOException("The blockCollection of " + storedBlock
+ " is null, likely because the file owning this block was"
+ " deleted and the block removal is delayed");
}
- INodeFile iFile = ((INode)blockCollection).asFile();
+ INodeFile iFile = ((INode)storedBlock.getBlockCollection()).asFile();
if (isFileDeleted(iFile)) {
throw new FileNotFoundException("File not found: "
+ iFile.getFullPathName() + ", likely due to delayed block"
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39e7929c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
index 7425c6a..6cee8b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.junit.Assert;
import org.junit.Test;
+import org.mockito.Mockito;
/**
* This class provides tests for BlockInfo class, which is used in BlocksMap.
@@ -46,6 +47,15 @@ public class TestBlockInfo {
private static final Log LOG = LogFactory
.getLog("org.apache.hadoop.hdfs.TestBlockInfo");
+ @Test
+ public void testIsDeleted() {
+ BlockInfoContiguous blockInfo = new BlockInfoContiguous((short) 3);
+ BlockCollection bc = Mockito.mock(BlockCollection.class);
+ blockInfo.setBlockCollection(bc);
+ Assert.assertFalse(blockInfo.isDeleted());
+ blockInfo.setBlockCollection(null);
+ Assert.assertTrue(blockInfo.isDeleted());
+ }
@Test
public void testAddStorage() throws Exception {