You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/02/27 05:54:45 UTC
svn commit: r1294028 [6/6] - in
/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/ src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/protocol/
src/main/java/org/apache/hadoop/hdfs/protocolPB/...
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java Mon Feb 27 04:54:33 2012
@@ -19,17 +19,277 @@ package org.apache.hadoop.hdfs.server.na
import static org.junit.Assert.*;
-import java.io.IOException;
+import java.net.URI;
+import java.util.Collections;
+import java.util.Arrays;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.io.RandomAccessFile;
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.BufferedInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.security.SecurityUtil;
+import org.junit.Test;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
+import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
+import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL;
+import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
+import com.google.common.collect.ImmutableList;
import com.google.common.base.Joiner;
+import java.util.zip.CheckedInputStream;
+import java.util.zip.Checksum;
+
public class TestFileJournalManager {
+ /**
+ * Test the normal operation of loading transactions from
+ * file journal manager. 3 edits directories are setup without any
+ * failures. Test that we read in the expected number of transactions.
+ */
+ @Test
+ public void testNormalOperation() throws IOException {
+ File f1 = new File(TestEditLog.TEST_DIR + "/normtest0");
+ File f2 = new File(TestEditLog.TEST_DIR + "/normtest1");
+ File f3 = new File(TestEditLog.TEST_DIR + "/normtest2");
+
+ List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI());
+ NNStorage storage = setupEdits(editUris, 5);
+
+ long numJournals = 0;
+ for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
+ FileJournalManager jm = new FileJournalManager(sd);
+ assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1));
+ numJournals++;
+ }
+ assertEquals(3, numJournals);
+ }
+
+ /**
+ * Test that inprogress files are handled correct. Set up a single
+ * edits directory. Fail on after the last roll. Then verify that the
+ * logs have the expected number of transactions.
+ */
+ @Test
+ public void testInprogressRecovery() throws IOException {
+ File f = new File(TestEditLog.TEST_DIR + "/filejournaltest0");
+ // abort after the 5th roll
+ NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
+ 5, new AbortSpec(5, 0));
+ StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
+
+ FileJournalManager jm = new FileJournalManager(sd);
+ assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL,
+ jm.getNumberOfTransactions(1));
+ }
+
+ /**
+ * Test a mixture of inprogress files and finalised. Set up 3 edits
+ * directories and fail the second on the last roll. Verify that reading
+ * the transactions, reads from the finalised directories.
+ */
+ @Test
+ public void testInprogressRecoveryMixed() throws IOException {
+ File f1 = new File(TestEditLog.TEST_DIR + "/mixtest0");
+ File f2 = new File(TestEditLog.TEST_DIR + "/mixtest1");
+ File f3 = new File(TestEditLog.TEST_DIR + "/mixtest2");
+
+ List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI());
+
+ // abort after the 5th roll
+ NNStorage storage = setupEdits(editUris,
+ 5, new AbortSpec(5, 1));
+ Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
+ StorageDirectory sd = dirs.next();
+ FileJournalManager jm = new FileJournalManager(sd);
+ assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1));
+
+ sd = dirs.next();
+ jm = new FileJournalManager(sd);
+ assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1));
+
+ sd = dirs.next();
+ jm = new FileJournalManager(sd);
+ assertEquals(6*TXNS_PER_ROLL, jm.getNumberOfTransactions(1));
+ }
+
+ /**
+ * Test that FileJournalManager behaves correctly despite inprogress
+ * files in all its edit log directories. Set up 3 directories and fail
+ * all on the last roll. Verify that the correct number of transaction
+ * are then loaded.
+ */
+ @Test
+ public void testInprogressRecoveryAll() throws IOException {
+ File f1 = new File(TestEditLog.TEST_DIR + "/failalltest0");
+ File f2 = new File(TestEditLog.TEST_DIR + "/failalltest1");
+ File f3 = new File(TestEditLog.TEST_DIR + "/failalltest2");
+
+ List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI());
+ // abort after the 5th roll
+ NNStorage storage = setupEdits(editUris, 5,
+ new AbortSpec(5, 0),
+ new AbortSpec(5, 1),
+ new AbortSpec(5, 2));
+ Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
+ StorageDirectory sd = dirs.next();
+ FileJournalManager jm = new FileJournalManager(sd);
+ assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1));
+
+ sd = dirs.next();
+ jm = new FileJournalManager(sd);
+ assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1));
+
+ sd = dirs.next();
+ jm = new FileJournalManager(sd);
+ assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, jm.getNumberOfTransactions(1));
+ }
+
+ /**
+ * Corrupt an edit log file after the start segment transaction
+ */
+ private void corruptAfterStartSegment(File f) throws IOException {
+ RandomAccessFile raf = new RandomAccessFile(f, "rw");
+ raf.seek(0x16); // skip version and first tranaction and a bit of next transaction
+ for (int i = 0; i < 1000; i++) {
+ raf.writeInt(0xdeadbeef);
+ }
+ raf.close();
+ }
+
+ /**
+ * Test that we can read from a stream created by FileJournalManager.
+ * Create a single edits directory, failing it on the final roll.
+ * Then try loading from the point of the 3rd roll. Verify that we read
+ * the correct number of transactions from this point.
+ */
+ @Test
+ public void testReadFromStream() throws IOException {
+ File f = new File(TestEditLog.TEST_DIR + "/filejournaltest1");
+ // abort after 10th roll
+ NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
+ 10, new AbortSpec(10, 0));
+ StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
+
+ FileJournalManager jm = new FileJournalManager(sd);
+ long expectedTotalTxnCount = TXNS_PER_ROLL*10 + TXNS_PER_FAIL;
+ assertEquals(expectedTotalTxnCount, jm.getNumberOfTransactions(1));
+
+ long skippedTxns = (3*TXNS_PER_ROLL); // skip first 3 files
+ long startingTxId = skippedTxns + 1;
+
+ long numTransactionsToLoad = jm.getNumberOfTransactions(startingTxId);
+ long numLoaded = 0;
+ while (numLoaded < numTransactionsToLoad) {
+ EditLogInputStream editIn = jm.getInputStream(startingTxId);
+ FSEditLogLoader.EditLogValidation val = FSEditLogLoader.validateEditLog(editIn);
+ long count = val.getNumTransactions();
+
+ editIn.close();
+ startingTxId += count;
+ numLoaded += count;
+ }
+
+ assertEquals(expectedTotalTxnCount - skippedTxns, numLoaded);
+ }
+
+ /**
+ * Try to make a request with a start transaction id which doesn't
+ * match the start ID of some log segment.
+ * This should fail as edit logs must currently be treated as indevisable
+ * units.
+ */
+ @Test(expected=IOException.class)
+ public void testAskForTransactionsMidfile() throws IOException {
+ File f = new File(TestEditLog.TEST_DIR + "/filejournaltest2");
+ NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
+ 10);
+ StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
+
+ FileJournalManager jm = new FileJournalManager(sd);
+ jm.getNumberOfTransactions(2);
+ }
+
+ /**
+ * Test that we receive the correct number of transactions when we count
+ * the number of transactions around gaps.
+ * Set up a single edits directory, with no failures. Delete the 4th logfile.
+ * Test that getNumberOfTransactions returns the correct number of
+ * transactions before this gap and after this gap. Also verify that if you
+ * try to count on the gap that an exception is thrown.
+ */
+ @Test
+ public void testManyLogsWithGaps() throws IOException {
+ File f = new File(TestEditLog.TEST_DIR + "/filejournaltest3");
+ NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10);
+ StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
+
+ final long startGapTxId = 3*TXNS_PER_ROLL + 1;
+ final long endGapTxId = 4*TXNS_PER_ROLL;
+ File[] files = new File(f, "current").listFiles(new FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, endGapTxId))) {
+ return true;
+ }
+ return false;
+ }
+ });
+ assertEquals(1, files.length);
+ assertTrue(files[0].delete());
+
+ FileJournalManager jm = new FileJournalManager(sd);
+ assertEquals(startGapTxId-1, jm.getNumberOfTransactions(1));
+
+ try {
+ jm.getNumberOfTransactions(startGapTxId);
+ fail("Should have thrown an exception by now");
+ } catch (IOException ioe) {
+ assertTrue(true);
+ }
+
+ // rolled 10 times so there should be 11 files.
+ assertEquals(11*TXNS_PER_ROLL - endGapTxId,
+ jm.getNumberOfTransactions(endGapTxId+1));
+ }
+
+ /**
+ * Test that we can load an edits directory with a corrupt inprogress file.
+ * The corrupt inprogress file should be moved to the side.
+ */
+ @Test
+ public void testManyLogsWithCorruptInprogress() throws IOException {
+ File f = new File(TestEditLog.TEST_DIR + "/filejournaltest5");
+ NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0));
+ StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
+
+ File[] files = new File(f, "current").listFiles(new FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ if (name.startsWith("edits_inprogress")) {
+ return true;
+ }
+ return false;
+ }
+ });
+ assertEquals(files.length, 1);
+
+ corruptAfterStartSegment(files[0]);
+
+ FileJournalManager jm = new FileJournalManager(sd);
+ assertEquals(10*TXNS_PER_ROLL+1,
+ jm.getNumberOfTransactions(1));
+ }
+
@Test
public void testGetRemoteEditLog() throws IOException {
StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(
@@ -58,5 +318,4 @@ public class TestFileJournalManager {
FileJournalManager fjm, long firstTxId) throws IOException {
return Joiner.on(",").join(fjm.getRemoteEditLogs(firstTxId));
}
-
}
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java Mon Feb 27 04:54:33 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na
import junit.framework.TestCase;
import java.io.*;
import java.util.Random;
+import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -80,10 +81,12 @@ public class TestNameEditsConfigs extend
assertTrue("Expect no images in " + dir, ins.foundImages.isEmpty());
}
+ List<FileJournalManager.EditLogFile> editlogs
+ = FileJournalManager.matchEditLogs(new File(dir, "current").listFiles());
if (shouldHaveEdits) {
- assertTrue("Expect edits in " + dir, ins.foundEditLogs.size() > 0);
+ assertTrue("Expect edits in " + dir, editlogs.size() > 0);
} else {
- assertTrue("Expect no edits in " + dir, ins.foundEditLogs.isEmpty());
+ assertTrue("Expect no edits in " + dir, editlogs.isEmpty());
}
}
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Mon Feb 27 04:54:33 2012
@@ -84,7 +84,7 @@ public class TestSaveNamespace {
public Void answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
- StorageDirectory sd = (StorageDirectory)args[0];
+ StorageDirectory sd = (StorageDirectory)args[1];
if (count++ == 1) {
LOG.info("Injecting fault for sd: " + sd);
@@ -111,7 +111,7 @@ public class TestSaveNamespace {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
- FSNamesystem fsn = new FSNamesystem(conf);
+ FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
FSImage originalImage = fsn.dir.fsImage;
@@ -129,19 +129,22 @@ public class TestSaveNamespace {
case SAVE_SECOND_FSIMAGE_RTE:
// The spy throws a RuntimeException when writing to the second directory
doAnswer(new FaultySaveImage(true)).
- when(spyImage).saveFSImage((StorageDirectory)anyObject(), anyLong());
+ when(spyImage).saveFSImage(Mockito.eq(fsn),
+ (StorageDirectory)anyObject(), anyLong());
shouldFail = false;
break;
case SAVE_SECOND_FSIMAGE_IOE:
// The spy throws an IOException when writing to the second directory
doAnswer(new FaultySaveImage(false)).
- when(spyImage).saveFSImage((StorageDirectory)anyObject(), anyLong());
+ when(spyImage).saveFSImage(Mockito.eq(fsn),
+ (StorageDirectory)anyObject(), anyLong());
shouldFail = false;
break;
case SAVE_ALL_FSIMAGES:
// The spy throws IOException in all directories
doThrow(new RuntimeException("Injected")).
- when(spyImage).saveFSImage((StorageDirectory)anyObject(), anyLong());
+ when(spyImage).saveFSImage(Mockito.eq(fsn),
+ (StorageDirectory)anyObject(), anyLong());
shouldFail = true;
break;
case WRITE_STORAGE_ALL:
@@ -189,7 +192,7 @@ public class TestSaveNamespace {
// Start a new namesystem, which should be able to recover
// the namespace from the previous incarnation.
- fsn = new FSNamesystem(conf);
+ fsn = FSNamesystem.loadFromDisk(conf);
// Make sure the image loaded including our edits.
checkEditExists(fsn, 1);
@@ -214,7 +217,7 @@ public class TestSaveNamespace {
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
- FSNamesystem fsn = new FSNamesystem(conf);
+ FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
FSImage originalImage = fsn.dir.fsImage;
@@ -268,7 +271,7 @@ public class TestSaveNamespace {
// Start a new namesystem, which should be able to recover
// the namespace from the previous incarnation.
LOG.info("Loading new FSmage from disk.");
- fsn = new FSNamesystem(conf);
+ fsn = FSNamesystem.loadFromDisk(conf);
// Make sure the image loaded including our edit.
LOG.info("Checking reloaded image.");
@@ -349,7 +352,7 @@ public class TestSaveNamespace {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
- FSNamesystem fsn = new FSNamesystem(conf);
+ FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
final FSImage originalImage = fsn.dir.fsImage;
@@ -365,8 +368,9 @@ public class TestSaveNamespace {
FSNamesystem.getNamespaceEditsDirs(conf));
doThrow(new IOException("Injected fault: saveFSImage")).
- when(spyImage).saveFSImage((StorageDirectory)anyObject(),
- Mockito.anyLong());
+ when(spyImage).saveFSImage(
+ Mockito.eq(fsn), (StorageDirectory)anyObject(),
+ Mockito.anyLong());
try {
doAnEdit(fsn, 1);
@@ -395,7 +399,7 @@ public class TestSaveNamespace {
// Start a new namesystem, which should be able to recover
// the namespace from the previous incarnation.
- fsn = new FSNamesystem(conf);
+ fsn = FSNamesystem.loadFromDisk(conf);
// Make sure the image loaded including our edits.
checkEditExists(fsn, 1);
@@ -411,7 +415,7 @@ public class TestSaveNamespace {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
- FSNamesystem fsn = new FSNamesystem(conf);
+ FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
try {
doAnEdit(fsn, 1);
@@ -430,7 +434,7 @@ public class TestSaveNamespace {
// Start a new namesystem, which should be able to recover
// the namespace from the previous incarnation.
- fsn = new FSNamesystem(conf);
+ fsn = FSNamesystem.loadFromDisk(conf);
// Make sure the image loaded including our edits.
checkEditExists(fsn, 1);
@@ -447,7 +451,7 @@ public class TestSaveNamespace {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
- FSNamesystem fsn = new FSNamesystem(conf);
+ FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
try {
// We have a BEGIN_LOG_SEGMENT txn to start
@@ -469,7 +473,7 @@ public class TestSaveNamespace {
assertEquals(5, fsn.getEditLog().getLastWrittenTxId());
fsn = null;
- fsn = new FSNamesystem(conf);
+ fsn = FSNamesystem.loadFromDisk(conf);
// 1 more txn to start new segment on restart
assertEquals(6, fsn.getEditLog().getLastWrittenTxId());
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java?rev=1294028&r1=1294027&r2=1294028&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java Mon Feb 27 04:54:33 2012
@@ -84,7 +84,7 @@ public class TestNNLeaseRecovery {
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
DFSTestUtil.formatNameNode(conf);
- fsn = spy(new FSNamesystem(conf));
+ fsn = spy(FSNamesystem.loadFromDisk(conf));
}
/**
@@ -428,7 +428,6 @@ public class TestNNLeaseRecovery {
when(fsn.getFSImage()).thenReturn(fsImage);
when(fsn.getFSImage().getEditLog()).thenReturn(editLog);
- fsn.getFSImage().setFSNamesystem(fsn);
switch (fileBlocksNumber) {
case 0: