You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2010/09/14 02:25:37 UTC
svn commit: r996727 [2/2] - in /hadoop/hdfs/branches/HDFS-1052: ./
src/c++/libhdfs/ src/contrib/hdfsproxy/ src/java/
src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/
src/java/org/apache/hadoop/hdfs/security/token/block/ src/ja...
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=996727&r1=996726&r2=996727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java Tue Sep 14 00:25:35 2010
@@ -22,12 +22,7 @@ import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
import java.util.ArrayList;
-import java.util.Random;
import junit.framework.TestCase;
@@ -35,10 +30,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
-import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
@@ -189,172 +182,4 @@ public class TestFileCorruption extends
// TODO:FEDERATION cleanup when BlockPoolID support in Datanode is complete
return new ExtendedBlock("TODO", blockId, blocks[idx].length(), blockTimeStamp);
}
-
- /** check if ClientProtocol.getCorruptFiles() returns a file that has missing blocks */
- public void testCorruptFilesMissingBlock() throws Exception {
- MiniDFSCluster cluster = null;
- try {
- Configuration conf = new HdfsConfiguration();
- conf.setInt("dfs.datanode.directoryscan.interval", 1); // datanode scans directories
- conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000); // datanode sends block reports
- cluster = new MiniDFSCluster(conf, 1, true, null);
- FileSystem fs = cluster.getFileSystem();
-
- // create two files with one block each
- DFSTestUtil util = new DFSTestUtil("testCorruptFilesMissingBlock", 2, 1, 512);
- util.createFiles(fs, "/srcdat");
-
- // verify that there are no bad blocks.
- ClientProtocol namenode = DFSClient.createNamenode(conf);
- FileStatus[] badFiles = namenode.getCorruptFiles();
- assertTrue("Namenode has " + badFiles.length + " corrupt files. Expecting none.",
- badFiles.length == 0);
-
- // Now deliberately remove one block
- File data_dir = new File(System.getProperty("test.build.data"),
- "dfs/data/data1/current/finalized");
- assertTrue("data directory does not exist", data_dir.exists());
- File[] blocks = data_dir.listFiles();
- assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
- for (int idx = 0; idx < blocks.length; idx++) {
- if (!blocks[idx].getName().startsWith("blk_")) {
- continue;
- }
- LOG.info("Deliberately removing file "+blocks[idx].getName());
- assertTrue("Cannot remove file.", blocks[idx].delete());
- break;
- }
-
- badFiles = namenode.getCorruptFiles();
- while (badFiles.length == 0) {
- Thread.sleep(1000);
- badFiles = namenode.getCorruptFiles();
- }
- LOG.info("Namenode has bad files. " + badFiles.length);
- assertTrue("Namenode has " + badFiles.length + " bad files. Expecting 1.",
- badFiles.length == 1);
- util.cleanup(fs, "/srcdat");
- } finally {
- if (cluster != null) { cluster.shutdown(); }
- }
- }
-
- /** check if ClientProtocol.getCorruptFiles() returns the right limit */
- public void testMaxCorruptFiles() throws Exception {
- MiniDFSCluster cluster = null;
- try {
- Configuration conf = new HdfsConfiguration();
- conf.setInt("dfs.corruptfilesreturned.max", 2);
- conf.setInt("dfs.datanode.directoryscan.interval", 1); // datanode scans directories
- conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000); // datanode sends block reports
- cluster = new MiniDFSCluster(conf, 1, true, null);
- FileSystem fs = cluster.getFileSystem();
-
- // create two files with one block each
- DFSTestUtil util = new DFSTestUtil("testMaxCorruptFiles", 4, 1, 512);
- util.createFiles(fs, "/srcdat2");
-
- // verify that there are no bad blocks.
- ClientProtocol namenode = DFSClient.createNamenode(conf);
- FileStatus[] badFiles = namenode.getCorruptFiles();
- assertTrue("Namenode has " + badFiles.length + " corrupt files. Expecting none.",
- badFiles.length == 0);
-
- // Now deliberately remove one block
- File data_dir = new File(System.getProperty("test.build.data"),
- "dfs/data/data1/current/finalized");
- assertTrue("data directory does not exist", data_dir.exists());
- File[] blocks = data_dir.listFiles();
- assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
- for (int idx = 0; idx < blocks.length; idx++) {
- if (!blocks[idx].getName().startsWith("blk_")) {
- continue;
- }
- LOG.info("Deliberately removing file "+blocks[idx].getName());
- assertTrue("Cannot remove file.", blocks[idx].delete());
- }
-
- badFiles = namenode.getCorruptFiles();
- while (badFiles.length < 2) {
- badFiles = namenode.getCorruptFiles();
- Thread.sleep(10000);
- }
- badFiles = namenode.getCorruptFiles(); // once more since time has passed
- LOG.info("Namenode has bad files. " + badFiles.length);
- assertTrue("Namenode has " + badFiles.length + " bad files. Expecting 2.",
- badFiles.length == 2);
- util.cleanup(fs, "/srcdat2");
- } finally {
- if (cluster != null) { cluster.shutdown(); }
- }
- }
-
- /** check if ClientProtocol.getCorruptFiles() returns a file that has corrupted blocks */
- public void testCorruptFilesCorruptedBlock() throws Exception {
- MiniDFSCluster cluster = null;
- Random random = new Random();
-
- try {
- Configuration conf = new HdfsConfiguration();
- conf.setInt("dfs.datanode.directoryscan.interval", 1); // datanode scans directories
- conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000); // datanode sends block reports
- cluster = new MiniDFSCluster(conf, 1, true, null);
- FileSystem fs = cluster.getFileSystem();
-
- // create two files with one block each
- DFSTestUtil util = new DFSTestUtil("testCorruptFilesCorruptedBlock", 2, 1, 512);
- util.createFiles(fs, "/srcdat10");
-
- // fetch bad file list from namenode. There should be none.
- ClientProtocol namenode = DFSClient.createNamenode(conf);
- FileStatus[] badFiles = namenode.getCorruptFiles();
- assertTrue("Namenode has " + badFiles.length + " corrupt files. Expecting None.",
- badFiles.length == 0);
-
- // Now deliberately corrupt one block
- File data_dir = new File(System.getProperty("test.build.data"),
- "dfs/data/data1/current/finalized");
- assertTrue("data directory does not exist", data_dir.exists());
- File[] blocks = data_dir.listFiles();
- assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
- for (int idx = 0; idx < blocks.length; idx++) {
- if (blocks[idx].getName().startsWith("blk_") &&
- blocks[idx].getName().endsWith(".meta")) {
- //
- // shorten .meta file
- //
- RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
- FileChannel channel = file.getChannel();
- long position = channel.size() - 2;
- int length = 2;
- byte[] buffer = new byte[length];
- random.nextBytes(buffer);
- channel.write(ByteBuffer.wrap(buffer), position);
- file.close();
- LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
- " at offset " + position + " length " + length);
-
- // read all files to trigger detection of corrupted replica
- try {
- util.checkFiles(fs, "/srcdat10");
- } catch (BlockMissingException e) {
- System.out.println("Received BlockMissingException as expected.");
- } catch (IOException e) {
- assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " +
- " but received IOException " + e, false);
- }
- break;
- }
- }
-
- // fetch bad file list from namenode. There should be one file.
- badFiles = namenode.getCorruptFiles();
- LOG.info("Namenode has bad files. " + badFiles.length);
- assertTrue("Namenode has " + badFiles.length + " bad files. Expecting 1.",
- badFiles.length == 1);
- util.cleanup(fs, "/srcdat10");
- } finally {
- if (cluster != null) { cluster.shutdown(); }
- }
- }
}
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=996727&r1=996726&r2=996727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Tue Sep 14 00:25:35 2010
@@ -22,7 +22,6 @@ import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
-import java.util.Arrays;
import java.util.EnumSet;
import java.util.Set;
@@ -104,16 +103,15 @@ public class TestBlockToken {
Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
.getTokenIdentifiers();
assertEquals("Only one BlockTokenIdentifier expected", 1, tokenIds.size());
- long [] result = {0};
+ long result = 0;
for (TokenIdentifier tokenId : tokenIds) {
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
LOG.info("Got: " + id.toString());
assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id));
sm.checkAccess(id, null, block, BlockTokenSecretManager.AccessMode.WRITE);
- result = id.getBlockIds();
+ result = id.getBlockId();
}
- assertEquals("Got more than one block back", 1, result.length);
- return result[0];
+ return result;
}
}
@@ -224,28 +222,4 @@ public class TestBlockToken {
}
}
- @Test
- public void collectionOfBlocksActsSanely() {
- final long[][] testBlockIds = new long [][] {{99l, 7l, -32l, 0l},
- {},
- {42l},
- {-5235l, 2352}};
- final long [] notBlockIds = new long [] { 32l, 1l, -23423423l};
-
- for(long [] bids : testBlockIds) {
- BlockTokenIdentifier bti = new BlockTokenIdentifier("Madame Butterfly",
- bids, EnumSet.noneOf(BlockTokenSecretManager.AccessMode.class));
-
- for(long bid : bids) assertTrue(bti.isBlockIncluded(bid));
-
- for(long nbid : notBlockIds) assertFalse(bti.isBlockIncluded(nbid));
-
- // BlockTokenIdentifiers maintain a sorted array of the block Ids.
- long[] sorted = Arrays.copyOf(bids, bids.length);
- Arrays.sort(sorted);
-
- assertTrue(Arrays.toString(bids)+" doesn't equal "+Arrays.toString(sorted),
- Arrays.equals(bti.getBlockIds(), sorted));
- }
- }
}
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=996727&r1=996726&r2=996727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Tue Sep 14 00:25:35 2010
@@ -77,6 +77,8 @@ import org.apache.log4j.LogManager;
* {@link NameNode#refreshUserToGroupsMappings()} after
* every G operations, which purges the name-node's user group cache.
* By default the refresh is never called.</li>
+ * <li>-keepResults do not clean up the name-space after execution.</li>
+ * <li>-useExisting do not recreate the name-space, use existing data.</li>
* </ol>
*
* The benchmark first generates inputs for each thread so that the
@@ -92,7 +94,7 @@ public class NNThroughputBenchmark {
private static final Log LOG = LogFactory.getLog(NNThroughputBenchmark.class);
private static final int BLOCK_SIZE = 16;
private static final String GENERAL_OPTIONS_USAGE =
- " [-logLevel L] [-UGCacheRefreshCount G]";
+ " [-keepResults] | [-logLevel L] | [-UGCacheRefreshCount G]";
static Configuration config;
static NameNode nameNode;
@@ -139,8 +141,7 @@ public class NNThroughputBenchmark {
abstract class OperationStatsBase {
protected static final String BASE_DIR_NAME = "/nnThroughputBenchmark";
protected static final String OP_ALL_NAME = "all";
- protected static final String OP_ALL_USAGE = "-op all " +
- "<other ops options> [-keepResults]";
+ protected static final String OP_ALL_USAGE = "-op all <other ops options>";
protected String baseDir;
protected short replication;
@@ -672,6 +673,34 @@ public class NNThroughputBenchmark {
}
/**
+ * List file status statistics.
+ *
+ * Measure how many get-file-status calls the name-node can handle per second.
+ */
+ class FileStatusStats extends OpenFileStats {
+ // Operation types
+ static final String OP_FILE_STATUS_NAME = "fileStatus";
+ static final String OP_FILE_STATUS_USAGE =
+ "-op " + OP_FILE_STATUS_NAME + OP_USAGE_ARGS;
+
+ FileStatusStats(List<String> args) {
+ super(args);
+ }
+
+ String getOpName() {
+ return OP_FILE_STATUS_NAME;
+ }
+
+ long executeOp(int daemonId, int inputIdx, String ignore)
+ throws IOException {
+ long start = System.currentTimeMillis();
+ nameNode.getFileInfo(fileNames[daemonId][inputIdx]);
+ long end = System.currentTimeMillis();
+ return end-start;
+ }
+ }
+
+ /**
* Rename file statistics.
*
* Measure how many rename calls the name-node can handle per second.
@@ -1164,6 +1193,7 @@ public class NNThroughputBenchmark {
+ " | \n\t" + CreateFileStats.OP_CREATE_USAGE
+ " | \n\t" + OpenFileStats.OP_OPEN_USAGE
+ " | \n\t" + DeleteFileStats.OP_DELETE_USAGE
+ + " | \n\t" + FileStatusStats.OP_FILE_STATUS_USAGE
+ " | \n\t" + RenameFileStats.OP_RENAME_USAGE
+ " | \n\t" + BlockReportStats.OP_BLOCK_REPORT_USAGE
+ " | \n\t" + ReplicationStats.OP_REPLICATION_USAGE
@@ -1201,6 +1231,10 @@ public class NNThroughputBenchmark {
opStat = bench.new DeleteFileStats(args);
ops.add(opStat);
}
+ if(runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) {
+ opStat = bench.new FileStatusStats(args);
+ ops.add(opStat);
+ }
if(runAll || RenameFileStats.OP_RENAME_NAME.equals(type)) {
opStat = bench.new RenameFileStats(args);
ops.add(opStat);
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java?rev=996727&r1=996726&r2=996727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java Tue Sep 14 00:25:35 2010
@@ -20,20 +20,18 @@ package org.apache.hadoop.hdfs.server.na
import static org.junit.Assert.assertTrue;
import java.net.URL;
+import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.junit.Test;
/** A JUnit test for corrupt_files.jsp */
@@ -66,10 +64,11 @@ public class TestCorruptFilesJsp {
}
// verify there are not corrupt files
- ClientProtocol namenode = DFSClient.createNamenode(conf);
- FileStatus[] badFiles = namenode.getCorruptFiles();
- assertTrue("There are " + badFiles.length
- + " corrupt files, but expecting none", badFiles.length == 0);
+ final NameNode namenode = cluster.getNameNode();
+ Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode
+ .listCorruptFileBlocks("/", null);
+ assertTrue("There are " + badFiles.size()
+ + " corrupt files, but expecting none", badFiles.size() == 0);
// Check if webui agrees
URL url = new URL("http://"
@@ -95,9 +94,9 @@ public class TestCorruptFilesJsp {
}
// verify if all corrupt files were reported to NN
- badFiles = namenode.getCorruptFiles();
- assertTrue("Expecting 3 corrupt files, but got " + badFiles.length,
- badFiles.length == 3);
+ badFiles = namenode.listCorruptFileBlocks("/", null);
+ assertTrue("Expecting 3 corrupt files, but got " + badFiles.size(),
+ badFiles.size() == 3);
// Check if webui agrees
url = new URL("http://"
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=996727&r1=996726&r2=996727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Tue Sep 14 00:25:35 2010
@@ -28,6 +28,7 @@ import java.io.RandomAccessFile;
import java.net.InetSocketAddress;
import java.nio.channels.FileChannel;
import java.security.PrivilegedExceptionAction;
+import java.util.Collection;
import java.util.Random;
import java.util.regex.Pattern;
@@ -35,10 +36,7 @@ import junit.framework.TestCase;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ChecksumException;
-import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
@@ -47,8 +45,6 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.tools.DFSck;
import org.apache.hadoop.io.IOUtils;
@@ -455,121 +451,68 @@ public class TestFsck extends TestCase {
}
}
- /**
- * Check if NamenodeFsck.buildSummaryResultForListCorruptFiles constructs the
- * proper string according to the number of corrupt files
- */
- public void testbuildResultForListCorruptFile() {
- assertEquals("Verifying result for zero corrupt files",
- "Unable to locate any corrupt files under '/'.\n\n"
- + "Please run a complete fsck to confirm if '/' "
- + NamenodeFsck.HEALTHY_STATUS, NamenodeFsck
- .buildSummaryResultForListCorruptFiles(0, "/"));
-
- assertEquals("Verifying result for one corrupt file",
- "There is at least 1 corrupt file under '/', which "
- + NamenodeFsck.CORRUPT_STATUS, NamenodeFsck
- .buildSummaryResultForListCorruptFiles(1, "/"));
-
- assertEquals("Verifying result for than one corrupt file",
- "There are at least 100 corrupt files under '/', which "
- + NamenodeFsck.CORRUPT_STATUS, NamenodeFsck
- .buildSummaryResultForListCorruptFiles(100, "/"));
-
- try {
- NamenodeFsck.buildSummaryResultForListCorruptFiles(-1, "/");
- fail("NamenodeFsck.buildSummaryResultForListCorruptFiles should "
- + "have thrown IllegalArgumentException for non-positive argument");
- } catch (IllegalArgumentException e) {
- // expected result
- }
- }
-
/** check if option -list-corruptfiles of fsck command works properly */
- public void testCorruptFilesOption() throws Exception {
+ public void testFsckListCorruptFilesBlocks() throws Exception {
+ Configuration conf = new Configuration();
+ conf.setLong("dfs.blockreport.intervalMsec", 1000);
+ conf.setInt("dfs.datanode.directoryscan.interval", 1);
+ FileSystem fs = null;
+
MiniDFSCluster cluster = null;
try {
-
- final int FILE_SIZE = 512;
- // the files and directories are intentionally prefixes of each other in
- // order to verify if fsck can distinguish correctly whether the path
- // supplied by user is a file or a directory
- Path[] filepaths = { new Path("/audiobook"), new Path("/audio/audio1"),
- new Path("/audio/audio2"), new Path("/audio/audio") };
-
- Configuration conf = new HdfsConfiguration();
- conf.setInt("dfs.datanode.directoryscan.interval", 1); // datanode scans
- // directories
- conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000); // datanode sends
- // block reports
cluster = new MiniDFSCluster(conf, 1, true, null);
- FileSystem fs = cluster.getFileSystem();
-
- // create files
- for (Path filepath : filepaths) {
- DFSTestUtil.createFile(fs, filepath, FILE_SIZE, (short) 1, 0L);
- DFSTestUtil.waitReplication(fs, filepath, (short) 1);
- }
-
- // verify there are not corrupt files
- ClientProtocol namenode = DFSClient.createNamenode(conf);
- FileStatus[] badFiles = namenode.getCorruptFiles();
- assertTrue("There are " + badFiles.length
- + " corrupt files, but expecting none", badFiles.length == 0);
-
- // Check if fsck -list-corruptfiles agree
- String outstr = runFsck(conf, 0, true, "/", "-list-corruptfiles");
- assertTrue(outstr.contains(NamenodeFsck
- .buildSummaryResultForListCorruptFiles(0, "/")));
-
- // Now corrupt all the files except for the last one
- for (int idx = 0; idx < filepaths.length - 1; idx++) {
- String blockName = DFSTestUtil.getFirstBlock(fs, filepaths[idx])
- .getBlockName();
- TestDatanodeBlockScanner.corruptReplica(blockName, 0);
-
- // read the file so that the corrupt block is reported to NN
- FSDataInputStream in = fs.open(filepaths[idx]);
- try {
- in.readFully(new byte[FILE_SIZE]);
- } catch (ChecksumException ignored) { // checksum error is expected.
+ cluster.waitActive();
+ fs = cluster.getFileSystem();
+ DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024);
+ util.createFiles(fs, "/corruptData", (short) 1);
+ util.waitReplication(fs, "/corruptData", (short) 1);
+
+ // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
+ String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
+ System.out.println("1. good fsck out: " + outStr);
+ assertTrue(outStr.contains("has 0 CORRUPT files"));
+ // delete the blocks
+ File baseDir = new File(System.getProperty("test.build.data",
+ "build/test/data"),"dfs/data");
+ for (int i=0; i<8; i++) {
+ File data_dir = new File(baseDir, "data" +(i+1)+ MiniDFSCluster.FINALIZED_DIR_NAME);
+ File[] blocks = data_dir.listFiles();
+ if (blocks == null)
+ continue;
+
+ for (int idx = 0; idx < blocks.length; idx++) {
+ if (!blocks[idx].getName().startsWith("blk_")) {
+ continue;
+ }
+ assertTrue("Cannot remove file.", blocks[idx].delete());
}
- in.close();
}
- // verify if all corrupt files were reported to NN
- badFiles = namenode.getCorruptFiles();
- assertTrue("Expecting 3 corrupt files, but got " + badFiles.length,
- badFiles.length == 3);
-
- // check the corrupt file
- String corruptFile = "/audiobook";
- outstr = runFsck(conf, 1, true, corruptFile, "-list-corruptfiles");
- assertTrue(outstr.contains(NamenodeFsck
- .buildSummaryResultForListCorruptFiles(1, corruptFile)));
-
- // check corrupt dir
- String corruptDir = "/audio";
- outstr = runFsck(conf, 1, true, corruptDir, "-list-corruptfiles");
- assertTrue(outstr.contains("/audio/audio1"));
- assertTrue(outstr.contains("/audio/audio2"));
- assertTrue(outstr.contains(NamenodeFsck
- .buildSummaryResultForListCorruptFiles(2, corruptDir)));
-
- // check healthy file
- String healthyFile = "/audio/audio";
- outstr = runFsck(conf, 0, true, healthyFile, "-list-corruptfiles");
- assertTrue(outstr.contains(NamenodeFsck
- .buildSummaryResultForListCorruptFiles(0, healthyFile)));
-
- // clean up
- for (Path filepath : filepaths) {
- fs.delete(filepath, false);
+ // wait for the namenode to see the corruption
+ final NameNode namenode = cluster.getNameNode();
+ Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = namenode
+ .listCorruptFileBlocks("/corruptData", null);
+ int numCorrupt = corruptFileBlocks.size();
+ while (numCorrupt == 0) {
+ Thread.sleep(1000);
+ corruptFileBlocks = namenode
+ .listCorruptFileBlocks("/corruptData", null);
+ numCorrupt = corruptFileBlocks.size();
}
+ outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
+ System.out.println("2. bad fsck out: " + outStr);
+ assertTrue(outStr.contains("has 3 CORRUPT files"));
+
+ // Do a listing on a dir which doesn't have any corrupt blocks and validate
+ util.createFiles(fs, "/goodData");
+ outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
+ System.out.println("3. good fsck out: " + outStr);
+ assertTrue(outStr.contains("has 0 CORRUPT files"));
+ util.cleanup(fs,"/corruptData");
+ util.cleanup(fs, "/goodData");
} finally {
- if (cluster != null) {
- cluster.shutdown();
- }
+ if (cluster != null) {cluster.shutdown();}
}
}
+
}
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java?rev=996727&r1=996726&r2=996727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java Tue Sep 14 00:25:35 2010
@@ -19,18 +19,18 @@ package org.apache.hadoop.hdfs.server.na
import java.util.Arrays;
-import junit.framework.TestCase;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.junit.Test;
-public class TestNNThroughputBenchmark extends TestCase {
+public class TestNNThroughputBenchmark {
/**
* This test runs all benchmarks defined in {@link NNThroughputBenchmark}.
*/
+ @Test
public void testNNThroughput() throws Exception {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
Propchange: hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Sep 14 00:25:35 2010
@@ -2,4 +2,4 @@
/hadoop/core/trunk/src/webapps/datanode:776175-784663
/hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
/hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
-/hadoop/hdfs/trunk/src/webapps/datanode:987665-992489
+/hadoop/hdfs/trunk/src/webapps/datanode:987665-996725
Propchange: hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Sep 14 00:25:35 2010
@@ -2,4 +2,4 @@
/hadoop/core/trunk/src/webapps/hdfs:776175-784663
/hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
/hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
-/hadoop/hdfs/trunk/src/webapps/hdfs:987665-992489
+/hadoop/hdfs/trunk/src/webapps/hdfs:987665-996725
Modified: hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs/corrupt_files.jsp
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs/corrupt_files.jsp?rev=996727&r1=996726&r2=996727&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs/corrupt_files.jsp (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs/corrupt_files.jsp Tue Sep 14 00:25:35 2010
@@ -23,6 +23,7 @@
import="org.apache.hadoop.fs.FileStatus"
import="org.apache.hadoop.fs.FileUtil"
import="org.apache.hadoop.fs.Path"
+ import="java.util.Collection"
import="java.util.Arrays" %>
<%!//for java.io.Serializable
private static final long serialVersionUID = 1L;%>
@@ -32,9 +33,9 @@
String namenodeRole = nn.getRole().toString();
String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":"
+ nn.getNameNodeAddress().getPort();
- FileStatus[] corruptFileStatuses = nn.getCorruptFiles();
- Path[] corruptFilePaths = FileUtil.stat2Paths(corruptFileStatuses);
- int corruptFileCount = corruptFileStatuses.length;
+ Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks =
+ nn.listCorruptFileBlocks("/", null);
+ int corruptFileCount = corruptFileBlocks.size();
%>
<html>
@@ -58,11 +59,10 @@
Please run fsck for a thorough health analysis.
<%
} else {
- Arrays.sort(corruptFilePaths);
- for (Path corruptFilePath : corruptFilePaths) {
- String currentPath = corruptFilePath.toString();
+ for (FSNamesystem.CorruptFileBlockInfo c : corruptFileBlocks) {
+ String currentFileBlock = c.toString();
%>
- <%=currentPath%><br>
+ <%=currentFileBlock%><br>
<%
}
%>
Propchange: hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Sep 14 00:25:35 2010
@@ -2,4 +2,4 @@
/hadoop/core/trunk/src/webapps/secondary:776175-784663
/hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
/hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
-/hadoop/hdfs/trunk/src/webapps/secondary:987665-992489
+/hadoop/hdfs/trunk/src/webapps/secondary:987665-996725