You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2009/12/04 23:50:52 UTC
svn commit: r887413 - in /hadoop/hdfs/trunk: CHANGES.txt
src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
Author: suresh
Date: Fri Dec 4 22:50:52 2009
New Revision: 887413
URL: http://svn.apache.org/viewvc?rev=887413&view=rev
Log:
HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented. Contributed by Suresh Srinivas.
Modified:
hadoop/hdfs/trunk/CHANGES.txt
hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=887413&r1=887412&r2=887413&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Dec 4 22:50:52 2009
@@ -545,6 +545,9 @@
HDFS-691. Fix an overflow error in DFSClient.DFSInputStream.available().
(szetszwo)
+ HDFS-781. Namenode metrics PendingDeletionBlocks is not decremented.
+ (Suresh)
+
Release 0.20.2 - Unreleased
IMPROVEMENTS
Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=887413&r1=887412&r2=887413&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Fri Dec 4 22:50:52 2009
@@ -41,7 +41,6 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.mortbay.log.Log;
/**
* Keeps information related to the blocks stored in the Hadoop cluster.
@@ -1614,6 +1613,7 @@
NameNode.stateChangeLog.info("BLOCK* ask " + dn.getName()
+ " to delete " + blockList);
}
+ pendingDeletionBlocksCount -= blocksToInvalidate.size();
return blocksToInvalidate.size();
}
}
Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=887413&r1=887412&r2=887413&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Fri Dec 4 22:50:52 2009
@@ -39,11 +39,19 @@
*/
public class TestNameNodeMetrics extends TestCase {
private static final Configuration CONF = new HdfsConfiguration();
+ private static final int DFS_REPLICATION_INTERVAL = 1;
+ private static final Path TEST_ROOT_DIR_PATH =
+ new Path(System.getProperty("test.build.data", "build/test/data"));
+
+ // Number of datanodes in the cluster
+ private static final int DATANODE_COUNT = 3;
static {
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
- CONF.setLong("dfs.heartbeat.interval", 1L);
- CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
+ CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+ DFS_REPLICATION_INTERVAL);
+ CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
+ DFS_REPLICATION_INTERVAL);
}
private MiniDFSCluster cluster;
@@ -52,9 +60,13 @@
private Random rand = new Random();
private FSNamesystem namesystem;
+ private static Path getTestPath(String fileName) {
+ return new Path(TEST_ROOT_DIR_PATH, fileName);
+ }
+
@Override
protected void setUp() throws Exception {
- cluster = new MiniDFSCluster(CONF, 3, true, null);
+ cluster = new MiniDFSCluster(CONF, DATANODE_COUNT, true, null);
cluster.waitActive();
namesystem = cluster.getNamesystem();
fs = (DistributedFileSystem) cluster.getFileSystem();
@@ -67,9 +79,8 @@
}
/** create a file with a length of <code>fileLen</code> */
- private void createFile(String fileName, long fileLen, short replicas) throws IOException {
- Path filePath = new Path(fileName);
- DFSTestUtil.createFile(fs, filePath, fileLen, replicas, rand.nextLong());
+ private void createFile(Path file, long fileLen, short replicas) throws IOException {
+ DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong());
}
private void updateMetrics() throws Exception {
@@ -82,7 +93,7 @@
/** Test metrics associated with addition of a file */
public void testFileAdd() throws Exception {
// Add files with 100 blocks
- final String file = "/tmp/t";
+ final Path file = getTestPath("testFileAdd");
createFile(file, 3200, (short)3);
final int blockCount = 32;
int blockCapacity = namesystem.getBlockCapacity();
@@ -96,27 +107,37 @@
blockCapacity <<= 1;
}
updateMetrics();
- assertEquals(3, metrics.filesTotal.get());
+ int filesTotal = file.depth() + 1; // Add 1 for root
+ assertEquals(filesTotal, metrics.filesTotal.get());
assertEquals(blockCount, metrics.blocksTotal.get());
assertEquals(blockCapacity, metrics.blockCapacity.get());
- fs.delete(new Path(file), true);
+ fs.delete(file, true);
+ filesTotal--; // reduce the filecount for deleted file
+
+ // Wait for more than DATANODE_COUNT replication intervals to ensure all
+ // the blocks pending deletion are sent for deletion to the datanodes.
+ Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000);
+ updateMetrics();
+ assertEquals(filesTotal, metrics.filesTotal.get());
+ assertEquals(0, metrics.blocksTotal.get());
+ assertEquals(0, metrics.pendingDeletionBlocks.get());
}
/** Corrupt a block and ensure metrics reflects it */
public void testCorruptBlock() throws Exception {
// Create a file with single block with two replicas
- String file = "/tmp/t";
+ final Path file = getTestPath("testCorruptBlock");
createFile(file, 100, (short)2);
// Corrupt first replica of the block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
- cluster.getNameNode(), file, 0, 1).get(0);
+ cluster.getNameNode(), file.toString(), 0, 1).get(0);
namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
updateMetrics();
assertEquals(1, metrics.corruptBlocks.get());
assertEquals(1, metrics.pendingReplicationBlocks.get());
assertEquals(1, metrics.scheduledReplicationBlocks.get());
- fs.delete(new Path(file), true);
+ fs.delete(file, true);
updateMetrics();
assertEquals(0, metrics.corruptBlocks.get());
assertEquals(0, metrics.pendingReplicationBlocks.get());
@@ -127,30 +148,29 @@
* for a file and ensure metrics reflects it
*/
public void testExcessBlocks() throws Exception {
- String file = "/tmp/t";
+ Path file = getTestPath("testExcessBlocks");
createFile(file, 100, (short)2);
int totalBlocks = 1;
- namesystem.setReplication(file, (short)1);
+ namesystem.setReplication(file.toString(), (short)1);
updateMetrics();
assertEquals(totalBlocks, metrics.excessBlocks.get());
- assertEquals(totalBlocks, metrics.pendingDeletionBlocks.get());
- fs.delete(new Path(file), true);
+ fs.delete(file, true);
}
/** Test to ensure metrics reflects missing blocks */
public void testMissingBlock() throws Exception {
// Create a file with single block with two replicas
- String file = "/tmp/t";
+ Path file = getTestPath("testMissingBlocks");
createFile(file, 100, (short)1);
// Corrupt the only replica of the block to result in a missing block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
- cluster.getNameNode(), file, 0, 1).get(0);
+ cluster.getNameNode(), file.toString(), 0, 1).get(0);
namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
updateMetrics();
assertEquals(1, metrics.underReplicatedBlocks.get());
assertEquals(1, metrics.missingBlocks.get());
- fs.delete(new Path(file), true);
+ fs.delete(file, true);
updateMetrics();
assertEquals(0, metrics.underReplicatedBlocks.get());
}