You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sh...@apache.org on 2009/09/06 00:04:19 UTC
svn commit: r811720 - in /hadoop/hdfs/branches/HDFS-265: ./ lib/
src/java/org/apache/hadoop/hdfs/server/namenode/
src/test/hdfs/org/apache/hadoop/hdfs/
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/
Author: shv
Date: Sat Sep 5 22:04:19 2009
New Revision: 811720
URL: http://svn.apache.org/viewvc?rev=811720&view=rev
Log:
HDFS-576. Merge -r 810334:811493 from trunk to the append branch.
Added:
hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java (with props)
hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java (with props)
Modified:
hadoop/hdfs/branches/HDFS-265/CHANGES.txt
hadoop/hdfs/branches/HDFS-265/build.xml
hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar
hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java
Modified: hadoop/hdfs/branches/HDFS-265/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/CHANGES.txt?rev=811720&r1=811719&r2=811720&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-265/CHANGES.txt Sat Sep 5 22:04:19 2009
@@ -156,6 +156,16 @@
HDFS-581. Introduce an iterator over blocks in the block report array. (shv)
+ HDFS-549. Add a new target, run-with-fault-inject-testcaseonly, which
+ allows an execution of non-FI tests in FI-enable environment. (Konstantin
+ Boudnik via szetszwo)
+
+ HDFS-173. Namenode will not block until a large directory deletion completes.
+ It allows other operations when the deletion is in progress. (suresh)
+
+ HDFS-551. Create new functional test for a block report. (Konstantin
+ Boudnik via hairong)
+
BUG FIXES
HDFS-76. Better error message to users when commands fail because of
@@ -224,6 +234,9 @@
HDFS-15. All replicas end up on 1 rack. (Jitendra Nath Pandey via hairong)
+ HDFS-586. TestBlocksWithNotEnoughRacks sometimes fails.
+ (Jitendra Nath Pandey via hairong)
+
Release 0.20.1 - Unreleased
IMPROVEMENTS
Modified: hadoop/hdfs/branches/HDFS-265/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/build.xml?rev=811720&r1=811719&r2=811720&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/build.xml (original)
+++ hadoop/hdfs/branches/HDFS-265/build.xml Sat Sep 5 22:04:19 2009
@@ -358,6 +358,17 @@
<!--At this moment there's no special FI test suite thus the normal tests are -->
<!--being executed with faults injected in place-->
+ <!--This target is not included into the the top level list of target
+ for it serves a special "regression" testing purpose of non-FI tests in
+ FI environment -->
+ <target name="run-with-fault-inject-testcaseonly">
+ <fail unless="testcase">Can't run this target without -Dtestcase setting!
+ </fail>
+ <subant buildpath="build.xml" target="run-test-hdfs-fault-inject">
+ <property name="special.fi.testcasesonly" value="yes"/>
+ </subant>
+ </target>
+
<target name="run-test-hdfs-fault-inject" depends="injectfaults"
description="Run Fault Injection related hdfs tests">
<subant buildpath="build.xml" target="run-test-hdfs">
@@ -637,6 +648,12 @@
<batchtest todir="${test.build.dir}" if="tests.testcase.fi">
<fileset dir="${test.src.dir}/aop" includes="**/${testcase}.java"/>
</batchtest>
+ <!--The following batch is for very special occasions only when
+ a non-FI tests are needed to be executed against FI-environment -->
+ <batchtest todir="${test.build.dir}" if="special.fi.testcasesonly">
+ <fileset dir="${test.src.dir}/aop" includes="**/${testcase}.java"/>
+ <fileset dir="${test.src.dir}/hdfs" includes="**/${testcase}.java"/>
+ </batchtest>
</junit>
<antcall target="checkfailure"/>
</sequential>
Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-0.21.0-dev.jar?rev=811720&r1=811719&r2=811720&view=diff
==============================================================================
Binary files - no diff available.
Modified: hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/lib/hadoop-core-test-0.21.0-dev.jar?rev=811720&r1=811719&r2=811720&view=diff
==============================================================================
Binary files - no diff available.
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=811720&r1=811719&r2=811720&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Sat Sep 5 22:04:19 2009
@@ -413,12 +413,13 @@
/**
* Adds block to list of blocks which will be invalidated on specified
- * datanode and log the move
+ * datanode
*
* @param b block
* @param dn datanode
+ * @param log true to create an entry in the log
*/
- void addToInvalidates(Block b, DatanodeInfo dn) {
+ void addToInvalidates(Block b, DatanodeInfo dn, boolean log) {
Collection<Block> invalidateSet = recentInvalidateSets
.get(dn.getStorageID());
if (invalidateSet == null) {
@@ -427,20 +428,39 @@
}
if (invalidateSet.add(b)) {
pendingDeletionBlocksCount++;
- NameNode.stateChangeLog.info("BLOCK* NameSystem.addToInvalidates: "
- + b.getBlockName() + " is added to invalidSet of " + dn.getName());
+ if (log) {
+ NameNode.stateChangeLog.info("BLOCK* NameSystem.addToInvalidates: "
+ + b.getBlockName() + " to " + dn.getName());
+ }
}
}
/**
+ * Adds block to list of blocks which will be invalidated on specified
+ * datanode and log the operation
+ *
+ * @param b block
+ * @param dn datanode
+ */
+ void addToInvalidates(Block b, DatanodeInfo dn) {
+ addToInvalidates(b, dn, true);
+ }
+
+ /**
* Adds block to list of blocks which will be invalidated on all its
* datanodes.
*/
private void addToInvalidates(Block b) {
+ StringBuilder datanodes = new StringBuilder();
for (Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(b); it
.hasNext();) {
DatanodeDescriptor node = it.next();
- addToInvalidates(b, node);
+ addToInvalidates(b, node, false);
+ datanodes.append(node.getName()).append(" ");
+ }
+ if (datanodes.length() != 0) {
+ NameNode.stateChangeLog.info("BLOCK* NameSystem.addToInvalidates: "
+ + b.getBlockName() + " to " + datanodes.toString());
}
}
@@ -1075,7 +1095,7 @@
// handle underReplication/overReplication
short fileReplication = fileINode.getReplication();
- if (numCurrentReplica >= fileReplication) {
+ if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
neededReplications.remove(storedBlock, numCurrentReplica,
num.decommissionedReplicas, fileReplication);
} else {
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=811720&r1=811719&r2=811720&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Sat Sep 5 22:04:19 2009
@@ -584,19 +584,26 @@
}
/**
- * Remove the file from management, return blocks
+ * Delete the target directory and collect the blocks under it
+ *
+ * @param src Path of a directory to delete
+ * @param collectedBlocks Blocks under the deleted directory
+ * @return true on successful deletion; else false
*/
- INode delete(String src) {
+ boolean delete(String src, List<Block>collectedBlocks) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "+src);
+ NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
}
waitForReady();
long now = FSNamesystem.now();
- INode deletedNode = unprotectedDelete(src, now);
- if (deletedNode != null) {
- fsImage.getEditLog().logDelete(src, now);
- }
- return deletedNode;
+ INode removedNode = unprotectedDelete(src, collectedBlocks, now);
+ if (removedNode == null) {
+ return false;
+ }
+ // Blocks will be deleted later by the caller of this method
+ getFSNamesystem().removePathAndBlocks(src, null);
+ fsImage.getEditLog().logDelete(src, now);
+ return true;
}
/** Return if a directory is empty or not **/
@@ -622,12 +629,30 @@
/**
* Delete a path from the name space
* Update the count at each ancestor directory with quota
+ * <br>
+ * Note: This is to be used by {@link FSEditLog} only.
+ * <br>
+ * @param src a string representation of a path to an inode
+ * @param mtime the time the inode is removed
+ * @return deleted inode if deletion succeeds; else null
+ */
+ INode unprotectedDelete(String src, long mtime) {
+ List<Block> collectedBlocks = new ArrayList<Block>();
+ INode removedNode = unprotectedDelete(src, collectedBlocks, mtime);
+ getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
+ return removedNode;
+ }
+
+ /**
+ * Delete a path from the name space
+ * Update the count at each ancestor directory with quota
* @param src a string representation of a path to an inode
- * @param modificationTime the time the inode is removed
- * @param deletedBlocks the place holder for the blocks to be removed
- * @return if the deletion succeeds
+ * @param collectedBlocks blocks collected from the deleted path
+ * @param mtime the time the inode is removed
+ * @return deleted inode if deletion succeeds; else null
*/
- INode unprotectedDelete(String src, long modificationTime) {
+ INode unprotectedDelete(String src, List<Block> collectedBlocks,
+ long mtime) {
src = normalizePath(src);
synchronized (rootDir) {
@@ -638,33 +663,34 @@
NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+"failed to remove "+src+" because it does not exist");
return null;
- } else if (inodes.length == 1) { // src is the root
+ }
+ if (inodes.length == 1) { // src is the root
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " +
"failed to remove " + src +
" because the root is not allowed to be deleted");
return null;
- } else {
- try {
- // Remove the node from the namespace
- removeChild(inodes, inodes.length-1);
- // set the parent's modification time
- inodes[inodes.length-2].setModificationTime(modificationTime);
- // GC all the blocks underneath the node.
- ArrayList<Block> v = new ArrayList<Block>();
- int filesRemoved = targetNode.collectSubtreeBlocksAndClear(v);
- incrDeletedFileCount(filesRemoved);
- getFSNamesystem().removePathAndBlocks(src, v);
- if (NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
- +src+" is removed");
- }
- return targetNode;
- } catch(QuotaExceededException e) {
- NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " +
- "failed to remove " + src + " because " + e.getMessage());
- return null;
- }
}
+ int pos = inodes.length - 1;
+ try {
+ // Remove the node from the namespace
+ targetNode = removeChild(inodes, pos);
+ } catch(QuotaExceededException e) {
+ NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " +
+ "failed to remove " + src + " because " + e.getMessage());
+ return null;
+ }
+ if (targetNode == null) {
+ return null;
+ }
+ // set the parent's modification time
+ inodes[pos-1].setModificationTime(mtime);
+ int filesRemoved = targetNode.collectSubtreeBlocksAndClear(collectedBlocks);
+ incrDeletedFileCount(filesRemoved);
+ if (NameNode.stateChangeLog.isDebugEnabled()) {
+ NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+ +src+" is removed");
+ }
+ return targetNode;
}
}
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=811720&r1=811719&r2=811720&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sat Sep 5 22:04:19 2009
@@ -123,6 +123,7 @@
public static final Log auditLog = LogFactory.getLog(
FSNamesystem.class.getName() + ".audit");
+ static int BLOCK_DELETION_INCREMENT = 1000;
private boolean isPermissionEnabled;
private UserGroupInformation fsOwner;
private String supergroup;
@@ -1402,8 +1403,10 @@
if ((!recursive) && (!dir.isDirEmpty(src))) {
throw new IOException(src + " is non empty");
}
+ if (NameNode.stateChangeLog.isDebugEnabled()) {
+ NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
+ }
boolean status = deleteInternal(src, true);
- getEditLog().logSync();
if (status && auditLog.isInfoEnabled()) {
logAuditEvent(UserGroupInformation.getCurrentUGI(),
Server.getRemoteIp(),
@@ -1413,25 +1416,68 @@
}
/**
- * Remove the indicated filename from the namespace. This may
- * invalidate some blocks that make up the file.
+ * Remove a file/directory from the namespace.
+ * <p>
+ * For large directories, deletion is incremental. The blocks under
+ * the directory are collected and deleted a small number at a time holding
+ * the {@link FSNamesystem} lock.
+ * <p>
+ * For small directory or file the deletion is done in one shot.
*/
- synchronized boolean deleteInternal(String src,
+ private boolean deleteInternal(String src,
boolean enforcePermission) throws IOException {
- if (NameNode.stateChangeLog.isDebugEnabled()) {
- NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
+ boolean deleteNow = false;
+ ArrayList<Block> collectedBlocks = new ArrayList<Block>();
+ synchronized(this) {
+ if (isInSafeMode()) {
+ throw new SafeModeException("Cannot delete " + src, safeMode);
+ }
+ if (enforcePermission && isPermissionEnabled) {
+ checkPermission(src, false, null, FsAction.WRITE, null, FsAction.ALL);
+ }
+ // Unlink the target directory from directory tree
+ if (!dir.delete(src, collectedBlocks)) {
+ return false;
+ }
+ deleteNow = collectedBlocks.size() <= BLOCK_DELETION_INCREMENT;
+ if (deleteNow) { // Perform small deletes right away
+ removeBlocks(collectedBlocks);
+ }
}
- if (isInSafeMode())
- throw new SafeModeException("Cannot delete " + src, safeMode);
- if (enforcePermission && isPermissionEnabled) {
- checkPermission(src, false, null, FsAction.WRITE, null, FsAction.ALL);
+ // Log directory deletion to editlog
+ getEditLog().logSync();
+ if (!deleteNow) {
+ removeBlocks(collectedBlocks); // Incremental deletion of blocks
}
-
- return dir.delete(src) != null;
+ collectedBlocks.clear();
+ if (NameNode.stateChangeLog.isDebugEnabled()) {
+ NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
+ + src +" is removed");
+ }
+ return true;
}
+ /** From the given list, incrementally remove the blocks from blockManager */
+ private void removeBlocks(List<Block> blocks) {
+ int start = 0;
+ int end = 0;
+ while (start < blocks.size()) {
+ end = BLOCK_DELETION_INCREMENT + start;
+ end = end > blocks.size() ? blocks.size() : end;
+ synchronized(this) {
+ for (int i=start; i<end; i++) {
+ blockManager.removeBlock(blocks.get(i));
+ }
+ }
+ start = end;
+ }
+ }
+
void removePathAndBlocks(String src, List<Block> blocks) {
leaseManager.removeLeaseWithPrefixPath(src);
+ if (blocks == null) {
+ return;
+ }
for(Block b : blocks) {
blockManager.removeBlock(b);
}
Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=811720&r1=811719&r2=811720&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Sat Sep 5 22:04:19 2009
@@ -112,8 +112,9 @@
int collectSubtreeBlocksAndClear(List<Block> v) {
parent = null;
- for (Block blk : blocks) {
+ for (BlockInfo blk : blocks) {
v.add(blk);
+ blk.setINode(null);
}
blocks = null;
return 1;
Added: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java?rev=811720&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java (added)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java Sat Sep 5 22:04:19 2009
@@ -0,0 +1,270 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.log4j.Level;
+import org.junit.After;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+/**
+ * This test simulates a variety of situations when blocks are being intentionally
+ * corrupted, unexpectedly modified, and so on before a block report is happening
+ */
+public class TestBlockReport {
+ public static final Log LOG = LogFactory.getLog(TestBlockReport.class);
+
+ private static final short REPL_FACTOR = 1;
+ private static final int RAND_LIMIT = 2000;
+ private static final long DN_RESCAN_INTERVAL = 5000;
+ private static final long DN_RESCAN_EXTRA_WAIT = 2 * DN_RESCAN_INTERVAL;
+ private static final int DN_N0 = 0;
+ private static final int FILE_START = 0;
+
+ private MiniDFSCluster cluster;
+ private DistributedFileSystem fs;
+
+ Random rand = new Random(RAND_LIMIT);
+
+ private static Configuration conf;
+
+ static {
+ conf = new Configuration();
+ int customPerChecksumSize = 512;
+ int customBlockSize = customPerChecksumSize * 3;
+ conf.setInt("io.bytes.per.checksum", customPerChecksumSize);
+ conf.setLong("dfs.block.size", customBlockSize);
+ conf.setLong("dfs.datanode.directoryscan.interval", DN_RESCAN_INTERVAL);
+ }
+
+ @Before
+ public void startUpCluster() throws IOException {
+ cluster = new MiniDFSCluster(conf, REPL_FACTOR, true, null);
+ fs = (DistributedFileSystem) cluster.getFileSystem();
+ }
+
+ @After
+ public void shutDownCluster() throws IOException {
+ fs.close();
+ cluster.shutdownDataNodes();
+ cluster.shutdown();
+ }
+
+ /**
+ * Test write a file, verifies and closes it. Then the length of the blocks
+ * are messed up and BlockReport is forced.
+ * The length of blocks in NN's memory should be the same as set by the DN
+ */
+ @Test
+ public void messWithBlocksLen() throws IOException {
+ final String METHOD_NAME = "TestBlockReport";
+ LOG.info("Running test " + METHOD_NAME);
+
+ Path filePath = new Path("/" + METHOD_NAME + ".dat");
+ DFSTestUtil.createFile(fs, filePath,
+ (long)AppendTestUtil.FILE_SIZE, REPL_FACTOR, rand.nextLong());
+
+ // mock with newly created blocks
+ // I can't use DFSTestUtil.getAllBlocks(fs.open(filePath)) because it
+ // will keep the file open which will prevent the effect of the test
+ Block[] blocks = locatedToBlocks(cluster.getNameNode().getBlockLocations(
+ filePath.toString(), FILE_START,
+ AppendTestUtil.FILE_SIZE).getLocatedBlocks(), null);
+
+ LOG.info("Number of blocks allocated " + blocks.length);
+ int[] newLengths = new int[blocks.length];
+ int tempLen;
+ for (int i = 0; i < blocks.length; i++) {
+ Block b = blocks[i];
+ LOG.debug("Block " + b.getBlockName() + " before\t" + "Size " +
+ b.getNumBytes());
+ LOG.debug("Setting new length");
+ tempLen = rand.nextInt(AppendTestUtil.BLOCK_SIZE);
+ b.set(b.getBlockId(), tempLen, b.getGenerationStamp());
+ LOG.debug("Block " + b.getBlockName() + " after\t " + "Size " +
+ b.getNumBytes());
+ newLengths[i] = tempLen;
+ }
+ cluster.getNameNode().blockReport(
+ cluster.listDataNodes()[DN_N0].dnRegistration,
+ BlockListAsLongs.convertToArrayLongs(blocks));
+
+ List<LocatedBlock> blocksAfterReport =
+ DFSTestUtil.getAllBlocks(fs.open(filePath));
+
+ LOG.info("After mods: Number of blocks allocated " +
+ blocksAfterReport.size());
+
+ for (int i = 0; i < blocksAfterReport.size(); i++) {
+ Block b = blocksAfterReport.get(i).getBlock();
+ assertEquals("Length of " + i + "th block is incorrect",
+ newLengths[i], b.getNumBytes());
+ }
+ }
+
+ /**
+ * Test write a file, verifies and closes it. Then a couple of random blocks
+ * is removed and BlockReport is forced; the FSNamesystem is pushed to
+ * recalculate required DN's activities such as replications and so on.
+ * The number of missing and under-replicated blocks should be the same in
+ * case of a single-DN cluster.
+ */
+ @Test
+ public void messWithBlockReplication() throws IOException {
+ final String METHOD_NAME = "messWithBlockReplication";
+ LOG.info("Running test " + METHOD_NAME);
+
+ Path filePath = new Path("/" + METHOD_NAME + ".dat");
+ DFSTestUtil.createFile(fs, filePath,
+ (long)AppendTestUtil.FILE_SIZE, REPL_FACTOR, rand.nextLong());
+
+ // mock around with newly created blocks and delete some
+ String testDataDirectory = System.getProperty("test.build.data");
+
+ File dataDir = new File(testDataDirectory);
+ assertTrue(dataDir.isDirectory());
+
+ List<Block> blocks2Remove = new ArrayList<Block>();
+ List<Integer> removedIndex = new ArrayList<Integer>();
+ List<LocatedBlock> lBlocks = cluster.getNameNode().getBlockLocations(
+ filePath.toString(), FILE_START,
+ AppendTestUtil.FILE_SIZE).getLocatedBlocks();
+
+ while (removedIndex.size() != 2) {
+ int newRemoveIndex = rand.nextInt(lBlocks.size());
+ if (!removedIndex.contains(newRemoveIndex))
+ removedIndex.add(newRemoveIndex);
+ }
+
+ for (Integer aRemovedIndex : removedIndex) {
+ blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock());
+ }
+ Block[] blocks = locatedToBlocks(lBlocks, removedIndex);
+
+ LOG.debug("Number of blocks allocated " + lBlocks.size());
+
+ for (Block b : blocks2Remove) {
+ LOG.debug("Removing the block " + b.getBlockName());
+ for (File f : findAllFiles(dataDir, new MyFileFilter(b.getBlockName()))) {
+ cluster.listDataNodes()[DN_N0].getFSDataset().unfinalizeBlock(b);
+ if (!f.delete())
+ LOG.warn("Couldn't delete " + b.getBlockName());
+ }
+ }
+
+ try { //Wait til next re-scan
+ Thread.sleep(DN_RESCAN_EXTRA_WAIT);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+
+ cluster.getNameNode().blockReport(
+ cluster.listDataNodes()[DN_N0].dnRegistration,
+ BlockListAsLongs.convertToArrayLongs(blocks));
+
+ cluster.getNamesystem().computeDatanodeWork();
+
+ // I suppose to see blocks2Remove.size() as under-replicated
+ LOG.debug("Missing " + cluster.getNamesystem().getMissingBlocksCount());
+ LOG.debug("Corrupted " + cluster.getNamesystem().getCorruptReplicaBlocks());
+ LOG.debug("Under-replicated " + cluster.getNamesystem().
+ getUnderReplicatedBlocks());
+ LOG.debug("Pending delete " + cluster.getNamesystem().
+ getPendingDeletionBlocks());
+ LOG.debug("Pending replications " + cluster.getNamesystem().
+ getPendingReplicationBlocks());
+ LOG.debug("Excess " + cluster.getNamesystem().getExcessBlocks());
+ LOG.debug("Total " + cluster.getNamesystem().getBlocksTotal());
+
+ assertEquals("Wrong number of MissingBlocks is found",
+ blocks2Remove.size(), cluster.getNamesystem().getMissingBlocksCount());
+ assertEquals("Wrong number of UnderReplicatedBlocks is found",
+ blocks2Remove.size(), cluster.getNamesystem().getUnderReplicatedBlocks());
+ }
+
+ private Block[] locatedToBlocks(final List<LocatedBlock> locatedBlks,
+ List<Integer> positionsToRemove) {
+ int substructLen = 0;
+ if (positionsToRemove != null) { // Need to allocated smaller array
+ substructLen = positionsToRemove.size();
+ }
+ Block[] ret = new Block[substructLen];
+ ArrayList<Block> newList = new ArrayList<Block>();
+ for (int i = 0; i < locatedBlks.size(); i++) {
+ if (positionsToRemove != null && positionsToRemove.contains(i)) {
+ LOG.debug(i + " block to be omitted");
+ continue;
+ }
+ newList.add(locatedBlks.get(i).getBlock());
+ }
+ return newList.toArray(ret);
+ }
+
+ private List<File> findAllFiles(File top, FilenameFilter mask) {
+ if (top == null) return null;
+ ArrayList<File> ret = new ArrayList<File>();
+ for (File f : top.listFiles()) {
+ if (f.isDirectory())
+ ret.addAll(findAllFiles(f, mask));
+ else if (mask.accept(f, f.getName()))
+ ret.add(f);
+ }
+ return ret;
+ }
+
+ private class MyFileFilter implements FilenameFilter {
+ private String nameToAccept = "";
+
+ public MyFileFilter(String nameToAccept) {
+ if (nameToAccept == null)
+ throw new IllegalArgumentException("Argument isn't suppose to be null");
+ this.nameToAccept = nameToAccept;
+ }
+
+ public boolean accept(File file, String s) {
+ return s != null && s.contains(nameToAccept);
+ }
+ }
+
+ private static void initLoggers () {
+ ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger) TestBlockReport.LOG).getLogger().setLevel(Level.ALL);
+ }
+}
Propchange: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java
------------------------------------------------------------------------------
svn:mime-type = text/plain
Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java?rev=811720&r1=811719&r2=811720&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java (original)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlocksWithNotEnoughRacks.java Sat Sep 5 22:04:19 2009
@@ -29,6 +29,8 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.log4j.Level;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import junit.framework.TestCase;
@@ -38,6 +40,8 @@
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL) ;
}
+ private static final Log LOG =
+ LogFactory.getLog(TestBlocksWithNotEnoughRacks.class.getName());
//Creates a block with all datanodes on same rack
//Adds additional datanode on a different rack
//The block should be replicated to the new rack
@@ -61,20 +65,29 @@
Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
final FSNamesystem namesystem = cluster.getNamesystem();
int numRacks = namesystem.blockManager.getNumberOfRacks(b);
+ NumberReplicas number = namesystem.blockManager.countNodes(b);
+ int curReplicas = number.liveReplicas();
+ int neededReplicationSize =
+ namesystem.blockManager.neededReplications.size();
//Add a new datanode on a different rack
String newRacks[] = {"/rack2"} ;
cluster.startDataNodes(conf, 1, true, null, newRacks);
-
- Thread.sleep(5000);
-
- numRacks = namesystem.blockManager.getNumberOfRacks(b);
- NumberReplicas number = namesystem.blockManager.countNodes(b);
- int curReplicas = number.liveReplicas();
- System.out.println("curReplicas = " + curReplicas);
- System.out.println("numRacks = " + numRacks);
- System.out.println("Size = " + namesystem.blockManager.neededReplications.size());
+ while ( (numRacks < 2) || (curReplicas < REPLICATION_FACTOR) ||
+ (neededReplicationSize > 0) ) {
+ LOG.info("Waiting for replication");
+ Thread.sleep(600);
+ numRacks = namesystem.blockManager.getNumberOfRacks(b);
+ number = namesystem.blockManager.countNodes(b);
+ curReplicas = number.liveReplicas();
+ neededReplicationSize =
+ namesystem.blockManager.neededReplications.size();
+ }
+
+ LOG.info("curReplicas = " + curReplicas);
+ LOG.info("numRacks = " + numRacks);
+ LOG.info("Size = " + namesystem.blockManager.neededReplications.size());
assertEquals(2,numRacks);
assertTrue(curReplicas == REPLICATION_FACTOR);
@@ -106,22 +119,31 @@
Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
final FSNamesystem namesystem = cluster.getNamesystem();
int numRacks = namesystem.blockManager.getNumberOfRacks(b);
+ NumberReplicas number = namesystem.blockManager.countNodes(b);
+ int curReplicas = number.liveReplicas();
+ int neededReplicationSize =
+ namesystem.blockManager.neededReplications.size();
//Add a new datanode on a different rack
- String newRacks[] = {"/rack2","/rack2"} ;
- cluster.startDataNodes(conf, 2, true, null, newRacks);
+ String newRacks[] = {"/rack2","/rack2","/rack2"} ;
+ cluster.startDataNodes(conf, 3, true, null, newRacks);
REPLICATION_FACTOR = 5;
namesystem.setReplication(FILE_NAME, REPLICATION_FACTOR);
- Thread.sleep(30000);
-
-
- numRacks = namesystem.blockManager.getNumberOfRacks(b);
- NumberReplicas number = namesystem.blockManager.countNodes(b);
- int curReplicas = number.liveReplicas();
- System.out.println("curReplicas = " + curReplicas);
- System.out.println("numRacks = " + numRacks);
- System.out.println("Size = " + namesystem.blockManager.neededReplications.size());
+ while ( (numRacks < 2) || (curReplicas < REPLICATION_FACTOR) ||
+ (neededReplicationSize > 0) ) {
+ LOG.info("Waiting for replication");
+ Thread.sleep(600);
+ numRacks = namesystem.blockManager.getNumberOfRacks(b);
+ number = namesystem.blockManager.countNodes(b);
+ curReplicas = number.liveReplicas();
+ neededReplicationSize =
+ namesystem.blockManager.neededReplications.size();
+ }
+
+ LOG.info("curReplicas = " + curReplicas);
+ LOG.info("numRacks = " + numRacks);
+ LOG.info("Size = " + namesystem.blockManager.neededReplications.size());
assertEquals(2,numRacks);
assertTrue(curReplicas == REPLICATION_FACTOR);
Added: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java?rev=811720&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java (added)
+++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java Sat Sep 5 22:04:19 2009
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.Assert;
+import org.junit.Test;
+
+
+/**
+ * Ensure during large directory delete, namenode does not block until the
+ * deletion completes and handles new requests from other clients
+ */
+public class TestLargeDirectoryDelete {
+ private static final Log LOG = LogFactory.getLog(TestLargeDirectoryDelete.class);
+ private static final Configuration CONF = new Configuration();
+ private static final int TOTAL_BLOCKS = 10000;
+ private MiniDFSCluster mc = null;
+ private int createOps = 0;
+ private int lockOps = 0;
+
+ static {
+ CONF.setLong("dfs.block.size", 1);
+ CONF.setInt("io.bytes.per.checksum", 1);
+ }
+
+ /** create a file with a length of <code>filelen</code> */
+ private void createFile(final String fileName, final long filelen) throws IOException {
+ FileSystem fs = mc.getFileSystem();
+ Path filePath = new Path(fileName);
+ DFSTestUtil.createFile(fs, filePath, filelen, (short) 1, 0);
+ }
+
+ /** Create a large number of directories and files */
+ private void createFiles() throws IOException {
+ Random rand = new Random();
+ // Create files in a directory with random depth
+ // ranging from 0-10.
+ for (int i = 0; i < TOTAL_BLOCKS; i+=100) {
+ String filename = "/root/";
+ int dirs = rand.nextInt(10); // Depth of the directory
+ for (int j=i; j >=(i-dirs); j--) {
+ filename += j + "/";
+ }
+ filename += "file" + i;
+ createFile(filename, 100);
+ }
+ }
+
+ private int getBlockCount() {
+ return (int)mc.getNamesystem().getBlocksTotal();
+ }
+
+ /** Run multiple threads doing simultaneous operations on the namenode
+ * while a large directory is being deleted.
+ */
+ private void runThreads() throws IOException {
+ final Thread threads[] = new Thread[2];
+
+ // Thread for creating files
+ threads[0] = new Thread() {
+ @Override
+ public void run() {
+ while(true) {
+ try {
+ int blockcount = getBlockCount();
+ if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
+ String file = "/tmp" + createOps;
+ createFile(file, 1);
+ mc.getFileSystem().delete(new Path(file), true);
+ createOps++;
+ }
+ } catch (IOException ex) {
+ LOG.info("createFile exception ", ex);
+ break;
+ }
+ }
+ }
+ };
+
+ // Thread that periodically acquires the FSNamesystem lock
+ threads[1] = new Thread() {
+ @Override
+ public void run() {
+ while(true) {
+ try {
+ int blockcount = getBlockCount();
+ if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
+ synchronized(mc.getNamesystem()) {
+ lockOps++;
+ }
+ Thread.sleep(1);
+ }
+ } catch (InterruptedException ex) {
+ LOG.info("lockOperation exception ", ex);
+ break;
+ }
+ }
+ }
+ };
+ threads[0].start();
+ threads[1].start();
+
+ final long start = System.currentTimeMillis();
+ FSNamesystem.BLOCK_DELETION_INCREMENT = 1;
+ mc.getFileSystem().delete(new Path("/root"), true); // recursive delete
+ final long end = System.currentTimeMillis();
+ threads[0].interrupt();
+ threads[1].interrupt();
+ LOG.info("Deletion took " + (end - start) + "msecs");
+ LOG.info("createOperations " + createOps);
+ LOG.info("lockOperations " + lockOps);
+ Assert.assertTrue(lockOps + createOps > 0);
+ }
+
+ @Test
+ public void largeDelete() throws IOException, InterruptedException {
+ mc = new MiniDFSCluster(CONF, 1, true, null);
+ try {
+ mc.waitActive();
+ createFiles();
+ Assert.assertEquals(TOTAL_BLOCKS, getBlockCount());
+ runThreads();
+ } finally {
+ mc.shutdown();
+ }
+ }
+}
\ No newline at end of file
Propchange: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
------------------------------------------------------------------------------
svn:mime-type = text/plain