You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2017/09/05 21:58:58 UTC
[4/4] hadoop git commit: HDFS-12182. BlockManager.metaSave does not
distinguish between "under replicated" and "missing" blocks. Contributed by
Wellington Chevreuil.
HDFS-12182. BlockManager.metaSave does not distinguish between "under replicated" and "missing" blocks. Contributed by Wellington Chevreuil.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a2f3e78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a2f3e78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a2f3e78
Branch: refs/heads/branch-2
Commit: 3a2f3e78fff443033078b17b18c936b91c5ec799
Parents: 92d9ad7
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Tue Sep 5 14:58:08 2017 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Tue Sep 5 14:58:08 2017 -0700
----------------------------------------------------------------------
.../server/blockmanagement/BlockManager.java | 29 +++++++++--
.../blockmanagement/TestBlockManager.java | 55 ++++++++++++++++++++
.../hdfs/server/namenode/TestMetaSave.java | 4 +-
3 files changed, 82 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a2f3e78/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 9bfbca8..4f0ec43 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -605,17 +605,36 @@ public class BlockManager implements BlockStatsMXBean {
datanodeManager.fetchDatanodes(live, dead, false);
out.println("Live Datanodes: " + live.size());
out.println("Dead Datanodes: " + dead.size());
+
//
- // Dump contents of neededReplication
+ // Need to iterate over all queues from neededReplications
+ // except for the QUEUE_WITH_CORRUPT_BLOCKS)
//
synchronized (neededReplications) {
- out.println("Metasave: Blocks waiting for replication: " +
- neededReplications.size());
- for (Block block : neededReplications) {
+ out.println("Metasave: Blocks waiting for reconstruction: "
+ + neededReplications.getUnderReplicatedBlockCount());
+ for (int i = 0; i < neededReplications.LEVEL; i++) {
+ if (i != neededReplications.QUEUE_WITH_CORRUPT_BLOCKS) {
+ for (Iterator<BlockInfo> it = neededReplications.iterator(i);
+ it.hasNext();) {
+ Block block = it.next();
+ dumpBlockMeta(block, out);
+ }
+ }
+ }
+ //
+ // Now prints corrupt blocks separately
+ //
+ out.println("Metasave: Blocks currently missing: " +
+ neededReplications.getCorruptBlockSize());
+ for (Iterator<BlockInfo> it = neededReplications.
+ iterator(neededReplications.QUEUE_WITH_CORRUPT_BLOCKS);
+ it.hasNext();) {
+ Block block = it.next();
dumpBlockMeta(block, out);
}
}
-
+
// Dump any postponed over-replicated blocks
out.println("Mis-replicated blocks that have been postponed:");
for (Block block : postponedMisreplicatedBlocks) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a2f3e78/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index f4fe38c..ad84805 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -1283,4 +1283,59 @@ public class TestBlockManager {
isReplicaCorrupt(Mockito.any(BlockInfo.class),
Mockito.any(DatanodeDescriptor.class));
}
+
+ @Test
+ public void testMetaSaveMissingReplicas() throws Exception {
+ List<DatanodeStorageInfo> origStorages = getStorages(0, 1);
+ List<DatanodeDescriptor> origNodes = getNodes(origStorages);
+ BlockInfo block = makeBlockReplicasMissing(0, origNodes);
+ File file = new File("test.log");
+ PrintWriter out = new PrintWriter(file);
+ bm.metaSave(out);
+ out.flush();
+ FileInputStream fstream = new FileInputStream(file);
+ DataInputStream in = new DataInputStream(fstream);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(in));
+ StringBuffer buffer = new StringBuffer();
+ String line;
+ try {
+ while ((line = reader.readLine()) != null) {
+ buffer.append(line);
+ }
+ String output = buffer.toString();
+ assertTrue("Metasave output should have reported missing blocks.",
+ output.contains("Metasave: Blocks currently missing: 1"));
+ assertTrue("There should be 0 blocks waiting for reconstruction",
+ output.contains("Metasave: Blocks waiting for reconstruction: 0"));
+ String blockNameGS = block.getBlockName() + "_" +
+ block.getGenerationStamp();
+ assertTrue("Block " + blockNameGS + " should be MISSING.",
+ output.contains(blockNameGS + " MISSING"));
+ } finally {
+ reader.close();
+ file.delete();
+ }
+ }
+
+ private BlockInfo makeBlockReplicasMissing(long blockId,
+ List<DatanodeDescriptor> nodesList) throws IOException {
+ long inodeId = ++mockINodeId;
+ final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
+
+ BlockInfo blockInfo = blockOnNodes(blockId, nodesList);
+ blockInfo.setReplication((short) 3);
+ blockInfo.setBlockCollectionId(inodeId);
+
+ Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId);
+ bm.blocksMap.addBlockCollection(blockInfo, bc);
+ bm.markBlockReplicasAsCorrupt(blockInfo,
+ blockInfo.getGenerationStamp() + 1,
+ blockInfo.getNumBytes(),
+ new DatanodeStorageInfo[]{});
+ BlockCollection mockedBc = Mockito.mock(BlockCollection.class);
+ Mockito.when(mockedBc.getBlocks()).thenReturn(new BlockInfo[]{blockInfo});
+ bm.checkReplication(mockedBc);
+ return blockInfo;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a2f3e78/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
index 14f9382..22691c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
@@ -162,7 +162,9 @@ public class TestMetaSave {
line = reader.readLine();
assertTrue(line.equals("Dead Datanodes: 1"));
line = reader.readLine();
- assertTrue(line.equals("Metasave: Blocks waiting for replication: 0"));
+ assertTrue(line.equals("Metasave: Blocks waiting for reconstruction: 0"));
+ line = reader.readLine();
+ assertTrue(line.equals("Metasave: Blocks currently missing: 0"));
line = reader.readLine();
assertTrue(line.equals("Mis-replicated blocks that have been postponed:"));
line = reader.readLine();
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org