You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sh...@apache.org on 2013/06/11 03:40:52 UTC
svn commit: r1491672 - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
Author: shv
Date: Tue Jun 11 01:40:52 2013
New Revision: 1491672
URL: http://svn.apache.org/r1491672
Log:
HDFS-4878. On Remove Block, block is not removed from neededReplications queue. Contributed by Tao Luo.
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1491672&r1=1491671&r2=1491672&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Jun 11 01:40:52 2013
@@ -354,6 +354,9 @@ Release 2.1.0-beta - UNRELEASED
HDFS-4586. TestDataDirs.testGetDataDirsFromURIs fails with all directories
in dfs.datanode.data.dir are invalid. (Ivan Mitic via atm)
+ HDFS-4878. On Remove Block, block is not removed from neededReplications
+ queue. (Tao Luo via shv)
+
BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1491672&r1=1491671&r2=1491672&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Tue Jun 11 01:40:52 2013
@@ -2869,8 +2869,9 @@ assert storedBlock.findDatanode(dn) < 0
addToInvalidates(block);
corruptReplicas.removeFromCorruptReplicasMap(block);
blocksMap.removeBlock(block);
- // Remove the block from pendingReplications
+ // Remove the block from pendingReplications and neededReplications
pendingReplications.remove(block);
+ neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
if (postponedMisreplicatedBlocks.remove(block)) {
postponedMisreplicatedBlocksCount.decrementAndGet();
}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java?rev=1491672&r1=1491671&r2=1491672&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java Tue Jun 11 01:40:52 2013
@@ -99,16 +99,71 @@ public class TestMetaSave {
+ "metasave.out.txt";
FileInputStream fstream = new FileInputStream(logFile);
DataInputStream in = new DataInputStream(fstream);
- BufferedReader reader = new BufferedReader(new InputStreamReader(in));
- String line = reader.readLine();
- assertTrue(line.equals("3 files and directories, 2 blocks = 5 total"));
- line = reader.readLine();
- assertTrue(line.equals("Live Datanodes: 1"));
- line = reader.readLine();
- assertTrue(line.equals("Dead Datanodes: 1"));
- line = reader.readLine();
- line = reader.readLine();
- assertTrue(line.matches("^/filestatus[01]:.*"));
+ BufferedReader reader = null;
+ try {
+ reader = new BufferedReader(new InputStreamReader(in));
+ String line = reader.readLine();
+ assertTrue(line.equals(
+ "3 files and directories, 2 blocks = 5 total"));
+ line = reader.readLine();
+ assertTrue(line.equals("Live Datanodes: 1"));
+ line = reader.readLine();
+ assertTrue(line.equals("Dead Datanodes: 1"));
+ line = reader.readLine();
+ line = reader.readLine();
+ assertTrue(line.matches("^/filestatus[01]:.*"));
+ } finally {
+ if (reader != null)
+ reader.close();
+ }
+ }
+
+ /**
+ * Tests metasave after delete, to make sure there are no orphaned blocks
+ */
+ @Test
+ public void testMetasaveAfterDelete()
+ throws IOException, InterruptedException {
+
+ final FSNamesystem namesystem = cluster.getNamesystem();
+
+ for (int i = 0; i < 2; i++) {
+ Path file = new Path("/filestatus" + i);
+ createFile(fileSys, file);
+ }
+
+ cluster.stopDataNode(1);
+ // wait for namenode to discover that a datanode is dead
+ Thread.sleep(15000);
+ namesystem.setReplication("/filestatus0", (short) 4);
+ namesystem.delete("/filestatus0", true);
+ namesystem.delete("/filestatus1", true);
+
+ namesystem.metaSave("metasaveAfterDelete.out.txt");
+
+ // Verification
+ String logFile = System.getProperty("hadoop.log.dir") + "/"
+ + "metasaveAfterDelete.out.txt";
+ BufferedReader reader = null;
+ try {
+ FileInputStream fstream = new FileInputStream(logFile);
+ DataInputStream in = new DataInputStream(fstream);
+ reader = new BufferedReader(new InputStreamReader(in));
+ reader.readLine();
+ String line = reader.readLine();
+ assertTrue(line.equals("Live Datanodes: 1"));
+ line = reader.readLine();
+ assertTrue(line.equals("Dead Datanodes: 1"));
+ line = reader.readLine();
+ assertTrue(line.equals("Metasave: Blocks waiting for replication: 0"));
+ line = reader.readLine();
+ assertTrue(line.equals("Mis-replicated blocks that have been postponed:"));
+ line = reader.readLine();
+ assertTrue(line.equals("Metasave: Blocks being replicated: 0"));
+ } finally {
+ if (reader != null)
+ reader.close();
+ }
}
@AfterClass