You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2017/05/11 16:59:16 UTC
[35/50] [abbrv] hadoop git commit: HDFS-11755. Underconstruction
blocks can be considered missing. Contributed by Nathan Roberts.
HDFS-11755. Underconstruction blocks can be considered missing. Contributed by Nathan Roberts.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad1e3e4d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad1e3e4d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad1e3e4d
Branch: refs/heads/HDFS-10467
Commit: ad1e3e4d9f105fac246ce1bdae80e92e013b8ba5
Parents: fab7374
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed May 10 14:15:57 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Wed May 10 14:15:57 2017 -0500
----------------------------------------------------------------------
.../server/blockmanagement/BlockManager.java | 2 +-
.../hdfs/server/namenode/CacheManager.java | 2 +-
.../apache/hadoop/hdfs/TestFileCorruption.java | 1 +
.../blockmanagement/TestBlockManager.java | 72 ++++++++++++++++++--
4 files changed, 68 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad1e3e4d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 8f58e25..41662a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4092,7 +4092,7 @@ public class BlockManager implements BlockStatsMXBean {
final int curReplicasDelta, int expectedReplicasDelta) {
namesystem.writeLock();
try {
- if (!isPopulatingReplQueues()) {
+ if (!isPopulatingReplQueues() || !block.isComplete()) {
return;
}
NumberReplicas repl = countNodes(block);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad1e3e4d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
index 515a363..ab026f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
@@ -100,7 +100,7 @@ import com.google.common.collect.Lists;
* caching directives, we will schedule caching and uncaching work.
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
-public final class CacheManager {
+public class CacheManager {
public static final Logger LOG = LoggerFactory.getLogger(CacheManager.class);
private static final float MIN_CACHED_BLOCKS_PERCENT = 0.001f;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad1e3e4d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 1f62414..1d9d402 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -275,6 +275,7 @@ public class TestFileCorruption {
out.write(outBuffer, 0, bufferSize);
out.close();
dfs.setReplication(filePath, (short) 10);
+ cluster.triggerBlockReports();
// underreplicated Blocks should be one after setrep
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override public Boolean get() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad1e3e4d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 36dafa5..beaef4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -83,11 +83,15 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
+import org.apache.hadoop.hdfs.server.namenode.CacheManager;
+import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
+import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -102,6 +106,8 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MetricsAsserts;
+import org.apache.hadoop.util.GSet;
+import org.apache.hadoop.util.LightWeightGSet;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.Assert;
@@ -146,7 +152,20 @@ public class TestBlockManager {
Mockito.doReturn(true).when(fsn).hasWriteLock();
Mockito.doReturn(true).when(fsn).hasReadLock();
Mockito.doReturn(true).when(fsn).isRunning();
+ //Make shouldPopulaeReplQueues return true
+ HAContext haContext = Mockito.mock(HAContext.class);
+ HAState haState = Mockito.mock(HAState.class);
+ Mockito.when(haContext.getState()).thenReturn(haState);
+ Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
+ Mockito.when(fsn.getHAContext()).thenReturn(haContext);
bm = new BlockManager(fsn, false, conf);
+ bm.setInitializedReplQueues(true);
+ CacheManager cm = Mockito.mock(CacheManager.class);
+ Mockito.doReturn(cm).when(fsn).getCacheManager();
+ GSet<CachedBlock, CachedBlock> cb =
+ new LightWeightGSet<CachedBlock, CachedBlock>(1);
+ Mockito.when(cm.getCachedBlocks()).thenReturn(cb);
+
final String[] racks = {
"/rackA",
"/rackA",
@@ -522,7 +541,7 @@ public class TestBlockManager {
}
return ret;
}
-
+
private List<DatanodeDescriptor> startDecommission(int ... indexes) {
List<DatanodeDescriptor> nodes = getNodes(indexes);
for (DatanodeDescriptor node : nodes) {
@@ -918,6 +937,42 @@ public class TestBlockManager {
return builder.build();
}
+ @Test
+ public void testUCBlockNotConsideredMissing() throws Exception {
+ DatanodeDescriptor node = nodes.get(0);
+ DatanodeStorageInfo ds = node.getStorageInfos()[0];
+ node.setAlive(true);
+ DatanodeRegistration nodeReg =
+ new DatanodeRegistration(node, null, null, "");
+
+ // register new node
+ bm.getDatanodeManager().registerDatanode(nodeReg);
+ bm.getDatanodeManager().addDatanode(node);
+
+ // Build an incremental report
+ List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>();
+
+ // blk_42 is under construction, finalizes on one node and is
+ // immediately deleted on same node
+ long blockId = 42; // arbitrary
+ BlockInfo receivedBlock = addUcBlockToBM(blockId);
+
+ rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock),
+ ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
+ rdbiList.add(new ReceivedDeletedBlockInfo(
+ new Block(blockId),
+ ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null));
+
+ // process IBR
+ StorageReceivedDeletedBlocks srdb =
+ new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()),
+ rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
+ bm.processIncrementalBlockReport(node, srdb);
+ // Needed replications should still be 0.
+ assertEquals("UC block was incorrectly added to needed Replications",
+ 0, bm.neededReconstruction.size());
+ }
+
private BlockInfo addBlockToBM(long blkId) {
Block block = new Block(blkId);
BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
@@ -1250,14 +1305,17 @@ public class TestBlockManager {
FileInputStream fstream = new FileInputStream(file);
DataInputStream in = new DataInputStream(fstream);
BufferedReader reader = new BufferedReader(new InputStreamReader(in));
+ String corruptBlocksLine;
+ Boolean foundIt = false;
try {
- for(int i =0;i<6;i++) {
- reader.readLine();
+ while ((corruptBlocksLine = reader.readLine()) != null) {
+ if (corruptBlocksLine.compareTo("Corrupt Blocks:") == 0) {
+ foundIt = true;
+ break;
+ }
}
- String corruptBlocksLine = reader.readLine();
- assertEquals("Unexpected text in metasave," +
- "was expecting corrupt blocks section!", 0,
- corruptBlocksLine.compareTo("Corrupt Blocks:"));
+ assertTrue("Unexpected text in metasave," +
+ "was expecting corrupt blocks section!", foundIt);
corruptBlocksLine = reader.readLine();
String regex = "Block=[0-9]+\\tNode=.*\\tStorageID=.*StorageState.*" +
"TotalReplicas=.*Reason=GENSTAMP_MISMATCH";
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org