You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/10/19 04:28:07 UTC
svn commit: r1399950 [24/27] - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: ./
hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/org/apac...
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Fri Oct 19 02:25:55 2012
@@ -18,25 +18,31 @@
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import junit.framework.TestCase;
+import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertNNHasCheckpoints;
+import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.getNameNodeCurrentDirs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
-import java.lang.management.ManagementFactory;
-import java.net.InetSocketAddress;
import java.io.File;
import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
+import java.net.InetSocketAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
-import java.util.Random;
import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
@@ -44,6 +50,8 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -53,6 +61,7 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.CheckpointStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
@@ -60,12 +69,16 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Level;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
import org.mockito.ArgumentMatcher;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
@@ -78,13 +91,10 @@ import com.google.common.collect.Immutab
import com.google.common.collect.Lists;
import com.google.common.primitives.Ints;
-import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertNNHasCheckpoints;
-import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.getNameNodeCurrentDirs;
-
/**
* This class tests the creation and validation of a checkpoint.
*/
-public class TestCheckpoint extends TestCase {
+public class TestCheckpoint {
static {
((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
@@ -100,26 +110,29 @@ public class TestCheckpoint extends Test
private CheckpointFaultInjector faultInjector;
- @Override
+ @Before
public void setUp() throws IOException {
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
faultInjector = Mockito.mock(CheckpointFaultInjector.class);
CheckpointFaultInjector.instance = faultInjector;
}
-
- static void writeFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
- FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
- .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
- (short) repl, blockSize);
- byte[] buffer = new byte[TestCheckpoint.fileSize];
- Random rand = new Random(TestCheckpoint.seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
+ @After
+ public void checkForSNNThreads() {
+ ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
+
+ ThreadInfo[] infos = threadBean.getThreadInfo(threadBean.getAllThreadIds(), 20);
+ for (ThreadInfo info : infos) {
+ if (info == null) continue;
+ LOG.info("Check thread: " + info.getThreadName());
+ if (info.getThreadName().contains("SecondaryNameNode")) {
+ fail("Leaked thread: " + info + "\n" +
+ Joiner.on("\n").join(info.getStackTrace()));
+ }
+ }
+ LOG.info("--------");
+ }
static void checkFile(FileSystem fileSys, Path name, int repl)
throws IOException {
@@ -139,6 +152,7 @@ public class TestCheckpoint extends Test
/*
* Verify that namenode does not startup if one namedir is bad.
*/
+ @Test
public void testNameDirError() throws IOException {
LOG.info("Starting testNameDirError");
Configuration conf = new HdfsConfiguration();
@@ -180,6 +194,7 @@ public class TestCheckpoint extends Test
* correctly (by removing the storage directory)
* See https://issues.apache.org/jira/browse/HDFS-2011
*/
+ @Test
public void testWriteTransactionIdHandlesIOE() throws Exception {
LOG.info("Check IOException handled correctly by writeTransactionIdFile");
ArrayList<URI> fsImageDirs = new ArrayList<URI>();
@@ -214,6 +229,7 @@ public class TestCheckpoint extends Test
/*
* Simulate namenode crashing after rolling edit log.
*/
+ @Test
public void testSecondaryNamenodeError1()
throws IOException {
LOG.info("Starting testSecondaryNamenodeError1");
@@ -247,7 +263,8 @@ public class TestCheckpoint extends Test
//
// Create a new file
//
- writeFile(fileSys, file1, replication);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ replication, seed);
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
@@ -279,6 +296,7 @@ public class TestCheckpoint extends Test
/*
* Simulate a namenode crash after uploading new image
*/
+ @Test
public void testSecondaryNamenodeError2() throws IOException {
LOG.info("Starting testSecondaryNamenodeError2");
Configuration conf = new HdfsConfiguration();
@@ -310,7 +328,8 @@ public class TestCheckpoint extends Test
//
// Create a new file
//
- writeFile(fileSys, file1, replication);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ replication, seed);
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
@@ -340,6 +359,7 @@ public class TestCheckpoint extends Test
/*
* Simulate a secondary namenode crash after rolling the edit log.
*/
+ @Test
public void testSecondaryNamenodeError3() throws IOException {
LOG.info("Starting testSecondaryNamenodeError3");
Configuration conf = new HdfsConfiguration();
@@ -380,7 +400,8 @@ public class TestCheckpoint extends Test
//
// Create a new file
//
- writeFile(fileSys, file1, replication);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ replication, seed);
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
@@ -412,6 +433,7 @@ public class TestCheckpoint extends Test
* back to the name-node.
* Used to truncate primary fsimage file.
*/
+ @Test
public void testSecondaryFailsToReturnImage() throws IOException {
Mockito.doThrow(new IOException("If this exception is not caught by the " +
"name-node, fs image will be truncated."))
@@ -425,6 +447,7 @@ public class TestCheckpoint extends Test
* before even setting the length header. This used to cause image
* truncation. Regression test for HDFS-3330.
*/
+ @Test
public void testSecondaryFailsWithErrorBeforeSettingHeaders()
throws IOException {
Mockito.doThrow(new Error("If this exception is not caught by the " +
@@ -497,6 +520,7 @@ public class TestCheckpoint extends Test
* The length header in the HTTP transfer should prevent
* this from corrupting the NN.
*/
+ @Test
public void testNameNodeImageSendFailWrongSize()
throws IOException {
LOG.info("Starting testNameNodeImageSendFailWrongSize");
@@ -511,6 +535,7 @@ public class TestCheckpoint extends Test
* The digest header in the HTTP transfer should prevent
* this from corrupting the NN.
*/
+ @Test
public void testNameNodeImageSendFailWrongDigest()
throws IOException {
LOG.info("Starting testNameNodeImageSendFailWrongDigest");
@@ -528,7 +553,7 @@ public class TestCheckpoint extends Test
private void doSendFailTest(String exceptionSubstring)
throws IOException {
Configuration conf = new HdfsConfiguration();
- Path file1 = new Path("checkpoint-doSendFailTest-" + getName() + ".dat");
+ Path file1 = new Path("checkpoint-doSendFailTest-doSendFailTest.dat");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes)
.build();
@@ -562,7 +587,8 @@ public class TestCheckpoint extends Test
//
// Create a new file
//
- writeFile(fileSys, file1, replication);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ replication, seed);
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
@@ -574,6 +600,7 @@ public class TestCheckpoint extends Test
* Test that the NN locks its storage and edits directories, and won't start up
* if the directories are already locked
**/
+ @Test
public void testNameDirLocking() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
@@ -603,6 +630,7 @@ public class TestCheckpoint extends Test
* Test that, if the edits dir is separate from the name dir, it is
* properly locked.
**/
+ @Test
public void testSeparateEditsDirLocking() throws IOException {
Configuration conf = new HdfsConfiguration();
File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
@@ -638,6 +666,7 @@ public class TestCheckpoint extends Test
/**
* Test that the SecondaryNameNode properly locks its storage directories.
*/
+ @Test
public void testSecondaryNameNodeLocking() throws Exception {
// Start a primary NN so that the secondary will start successfully
Configuration conf = new HdfsConfiguration();
@@ -687,6 +716,7 @@ public class TestCheckpoint extends Test
* Test that, an attempt to lock a storage that is already locked by a nodename,
* logs error message that includes JVM name of the namenode that locked it.
*/
+ @Test
public void testStorageAlreadyLockedErrorMessage() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
@@ -763,6 +793,7 @@ public class TestCheckpoint extends Test
* 2. if the NN does not contain an image, importing a checkpoint
* succeeds and re-saves the image
*/
+ @Test
public void testImportCheckpoint() throws Exception {
Configuration conf = new HdfsConfiguration();
Path testPath = new Path("/testfile");
@@ -861,6 +892,7 @@ public class TestCheckpoint extends Test
/**
* Tests checkpoint in HDFS.
*/
+ @Test
public void testCheckpoint() throws IOException {
Path file1 = new Path("checkpoint.dat");
Path file2 = new Path("checkpoint2.dat");
@@ -882,7 +914,8 @@ public class TestCheckpoint extends Test
//
// Create file1
//
- writeFile(fileSys, file1, replication);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ replication, seed);
checkFile(fileSys, file1, replication);
//
@@ -909,7 +942,8 @@ public class TestCheckpoint extends Test
cleanupFile(fileSys, file1);
// create new file file2
- writeFile(fileSys, file2, replication);
+ DFSTestUtil.createFile(fileSys, file2, fileSize, fileSize, blockSize,
+ replication, seed);
checkFile(fileSys, file2, replication);
//
@@ -951,6 +985,7 @@ public class TestCheckpoint extends Test
/**
* Tests save namespace.
*/
+ @Test
public void testSaveNamespace() throws IOException {
MiniDFSCluster cluster = null;
DistributedFileSystem fs = null;
@@ -974,7 +1009,8 @@ public class TestCheckpoint extends Test
}
// create new file
Path file = new Path("namespace.dat");
- writeFile(fs, file, replication);
+ DFSTestUtil.createFile(fs, file, fileSize, fileSize, blockSize,
+ replication, seed);
checkFile(fs, file, replication);
// create new link
@@ -1057,6 +1093,7 @@ public class TestCheckpoint extends Test
/* Test case to test CheckpointSignature */
@SuppressWarnings("deprecation")
+ @Test
public void testCheckpointSignature() throws IOException {
MiniDFSCluster cluster = null;
@@ -1091,6 +1128,7 @@ public class TestCheckpoint extends Test
* - it then fails again for the same reason
* - it then tries to checkpoint a third time
*/
+ @Test
public void testCheckpointAfterTwoFailedUploads() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
@@ -1147,6 +1185,7 @@ public class TestCheckpoint extends Test
*
* @throws IOException
*/
+ @Test
public void testMultipleSecondaryNamenodes() throws IOException {
Configuration conf = new HdfsConfiguration();
String nameserviceId1 = "ns1";
@@ -1197,6 +1236,7 @@ public class TestCheckpoint extends Test
* Test that the secondary doesn't have to re-download image
* if it hasn't changed.
*/
+ @Test
public void testSecondaryImageDownload() throws IOException {
LOG.info("Starting testSecondaryImageDownload");
Configuration conf = new HdfsConfiguration();
@@ -1279,6 +1319,7 @@ public class TestCheckpoint extends Test
* It verifies that this works even though the earlier-txid checkpoint gets
* uploaded after the later-txid checkpoint.
*/
+ @Test
public void testMultipleSecondaryNNsAgainstSameNN() throws Exception {
Configuration conf = new HdfsConfiguration();
@@ -1364,6 +1405,7 @@ public class TestCheckpoint extends Test
* It verifies that one of the two gets an error that it's uploading a
* duplicate checkpoint, and the other one succeeds.
*/
+ @Test
public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception {
Configuration conf = new HdfsConfiguration();
@@ -1386,6 +1428,7 @@ public class TestCheckpoint extends Test
final Answer<Object> delegator = new GenericTestUtils.DelegateAnswer(origNN);
NamenodeProtocol spyNN = Mockito.mock(NamenodeProtocol.class, delegator);
DelayAnswer delayer = new DelayAnswer(LOG) {
+ @Override
protected Object passThrough(InvocationOnMock invocation) throws Throwable {
return delegator.answer(invocation);
}
@@ -1456,6 +1499,7 @@ public class TestCheckpoint extends Test
* is running. The secondary should shut itself down if if talks to a NN
* with the wrong namespace.
*/
+ @Test
public void testReformatNNBetweenCheckpoints() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
@@ -1513,6 +1557,7 @@ public class TestCheckpoint extends Test
* Test that the primary NN will not serve any files to a 2NN who doesn't
* share its namespace ID, and also will not accept any files from one.
*/
+ @Test
public void testNamespaceVerifiedOnFileTransfer() throws IOException {
MiniDFSCluster cluster = null;
@@ -1574,6 +1619,7 @@ public class TestCheckpoint extends Test
* the non-failed storage directory receives the checkpoint.
*/
@SuppressWarnings("deprecation")
+ @Test
public void testCheckpointWithFailedStorageDir() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
@@ -1638,6 +1684,7 @@ public class TestCheckpoint extends Test
* @throws Exception
*/
@SuppressWarnings("deprecation")
+ @Test
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
@@ -1710,6 +1757,7 @@ public class TestCheckpoint extends Test
/**
* Test that the 2NN triggers a checkpoint after the configurable interval
*/
+ @Test(timeout=30000)
public void testCheckpointTriggerOnTxnCount() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
@@ -1723,8 +1771,7 @@ public class TestCheckpoint extends Test
.format(true).build();
FileSystem fs = cluster.getFileSystem();
secondary = startSecondaryNameNode(conf);
- Thread t = new Thread(secondary);
- t.start();
+ secondary.startCheckpointThread();
final NNStorage storage = secondary.getFSImage().getStorage();
// 2NN should checkpoint at startup
@@ -1763,6 +1810,7 @@ public class TestCheckpoint extends Test
* logs that connect the 2NN's old checkpoint to the current txid
* get archived. Then, the 2NN tries to checkpoint again.
*/
+ @Test
public void testSecondaryHasVeryOutOfDateImage() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
@@ -1800,6 +1848,147 @@ public class TestCheckpoint extends Test
}
}
+ /**
+ * Regression test for HDFS-3678 "Edit log files are never being purged from 2NN"
+ */
+ @Test
+ public void testSecondaryPurgesEditLogs() throws IOException {
+ MiniDFSCluster cluster = null;
+ SecondaryNameNode secondary = null;
+
+ Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+ .format(true).build();
+
+ FileSystem fs = cluster.getFileSystem();
+ fs.mkdirs(new Path("/foo"));
+
+ secondary = startSecondaryNameNode(conf);
+
+ // Checkpoint a few times. Doing this will cause a log roll, and thus
+ // several edit log segments on the 2NN.
+ for (int i = 0; i < 5; i++) {
+ secondary.doCheckpoint();
+ }
+
+ // Make sure there are no more edit log files than there should be.
+ List<File> checkpointDirs = getCheckpointCurrentDirs(secondary);
+ for (File checkpointDir : checkpointDirs) {
+ List<EditLogFile> editsFiles = FileJournalManager.matchEditLogs(
+ checkpointDir);
+ assertEquals("Edit log files were not purged from 2NN", 1,
+ editsFiles.size());
+ }
+
+ } finally {
+ if (secondary != null) {
+ secondary.shutdown();
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ /**
+ * Regression test for HDFS-3835 - "Long-lived 2NN cannot perform a
+ * checkpoint if security is enabled and the NN restarts without outstanding
+ * delegation tokens"
+ */
+ @Test
+ public void testSecondaryNameNodeWithDelegationTokens() throws IOException {
+ MiniDFSCluster cluster = null;
+ SecondaryNameNode secondary = null;
+
+ Configuration conf = new HdfsConfiguration();
+ conf.setBoolean(
+ DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .format(true).build();
+
+ assertNotNull(cluster.getNamesystem().getDelegationToken(new Text("atm")));
+
+ secondary = startSecondaryNameNode(conf);
+
+ // Checkpoint once, so the 2NN loads the DT into its in-memory sate.
+ secondary.doCheckpoint();
+
+ // Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN
+ // therefore needs to download a new fsimage the next time it performs a
+ // checkpoint.
+ cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ cluster.getNameNodeRpc().saveNamespace();
+ cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+ // Ensure that the 2NN can still perform a checkpoint.
+ secondary.doCheckpoint();
+ } finally {
+ if (secondary != null) {
+ secondary.shutdown();
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ /**
+ * Regression test for HDFS-3849. This makes sure that when we re-load the
+ * FSImage in the 2NN, we clear the existing leases.
+ */
+ @Test
+ public void testSecondaryNameNodeWithSavedLeases() throws IOException {
+ MiniDFSCluster cluster = null;
+ SecondaryNameNode secondary = null;
+ FSDataOutputStream fos = null;
+ Configuration conf = new HdfsConfiguration();
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .format(true).build();
+ FileSystem fs = cluster.getFileSystem();
+ fos = fs.create(new Path("tmpfile"));
+ fos.write(new byte[] { 0, 1, 2, 3 });
+ fos.hflush();
+ assertEquals(1, cluster.getNamesystem().getLeaseManager().countLease());
+
+ secondary = startSecondaryNameNode(conf);
+ assertEquals(0, secondary.getFSNamesystem().getLeaseManager().countLease());
+
+ // Checkpoint once, so the 2NN loads the lease into its in-memory sate.
+ secondary.doCheckpoint();
+ assertEquals(1, secondary.getFSNamesystem().getLeaseManager().countLease());
+ fos.close();
+ fos = null;
+
+ // Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN
+ // therefore needs to download a new fsimage the next time it performs a
+ // checkpoint.
+ cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ cluster.getNameNodeRpc().saveNamespace();
+ cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+ // Ensure that the 2NN can still perform a checkpoint.
+ secondary.doCheckpoint();
+
+ // And the leases have been cleared...
+ assertEquals(0, secondary.getFSNamesystem().getLeaseManager().countLease());
+ } finally {
+ if (fos != null) {
+ fos.close();
+ }
+ if (secondary != null) {
+ secondary.shutdown();
+ }
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ @Test
public void testCommandLineParsing() throws ParseException {
SecondaryNameNode.CommandLineOpts opts =
new SecondaryNameNode.CommandLineOpts();
@@ -1860,7 +2049,7 @@ public class TestCheckpoint extends Test
ImmutableSet.of("VERSION"));
}
- private List<File> getCheckpointCurrentDirs(SecondaryNameNode secondary) {
+ private static List<File> getCheckpointCurrentDirs(SecondaryNameNode secondary) {
List<File> ret = Lists.newArrayList();
for (URI u : secondary.getCheckpointDirs()) {
File checkpointDir = new File(u.getPath());
@@ -1869,7 +2058,7 @@ public class TestCheckpoint extends Test
return ret;
}
- private CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary1) {
+ private static CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary1) {
CheckpointStorage spy = Mockito.spy((CheckpointStorage)secondary1.getFSImage());;
secondary1.setFSImage(spy);
return spy;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java Fri Oct 19 02:25:55 2012
@@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.na
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java Fri Oct 19 02:25:55 2012
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue;
+
import java.net.URL;
import java.util.Collection;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Fri Oct 19 02:25:55 2012
@@ -17,11 +17,12 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
import java.io.IOException;
import java.util.concurrent.TimeoutException;
-import junit.framework.Assert;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -42,6 +43,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.util.Time;
import org.junit.After;
import org.junit.Test;
@@ -64,10 +66,10 @@ public class TestDeadDatanode {
*/
private void waitForDatanodeState(String nodeID, boolean alive, int waitTime)
throws TimeoutException, InterruptedException {
- long stopTime = System.currentTimeMillis() + waitTime;
+ long stopTime = Time.now() + waitTime;
FSNamesystem namesystem = cluster.getNamesystem();
String state = alive ? "alive" : "dead";
- while (System.currentTimeMillis() < stopTime) {
+ while (Time.now() < stopTime) {
final DatanodeDescriptor dd = BlockManagerTestUtil.getDatanode(
namesystem, nodeID);
if (dd.isAlive == alive) {
@@ -120,7 +122,7 @@ public class TestDeadDatanode {
// Ensure blockReceived call from dead datanode is rejected with IOException
try {
dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
- Assert.fail("Expected IOException is not thrown");
+ fail("Expected IOException is not thrown");
} catch (IOException ex) {
// Expected
}
@@ -131,7 +133,7 @@ public class TestDeadDatanode {
new long[] { 0L, 0L, 0L }) };
try {
dnp.blockReport(reg, poolId, report);
- Assert.fail("Expected IOException is not thrown");
+ fail("Expected IOException is not thrown");
} catch (IOException ex) {
// Expected
}
@@ -141,8 +143,8 @@ public class TestDeadDatanode {
StorageReport[] rep = { new StorageReport(reg.getStorageID(), false, 0, 0,
0, 0) };
DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0, 0, 0).getCommands();
- Assert.assertEquals(1, cmd.length);
- Assert.assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
+ assertEquals(1, cmd.length);
+ assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
.getAction());
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java Fri Oct 19 02:25:55 2012
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -116,19 +117,6 @@ public class TestDecommissioningStatus {
stm.close();
}
- private void writeFile(FileSystem fileSys, Path name, short repl)
- throws IOException {
- // create and write a file that contains three blocks of data
- FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
- .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl,
- blockSize);
- byte[] buffer = new byte[fileSize];
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
-
private FSDataOutputStream writeIncompleteFile(FileSystem fileSys, Path name,
short repl) throws IOException {
// create and write a file that contains three blocks of data
@@ -198,7 +186,8 @@ public class TestDecommissioningStatus {
// Decommission one node. Verify the decommission status
//
Path file1 = new Path("decommission.dat");
- writeFile(fileSys, file1, replicas);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ replicas, seed);
Path file2 = new Path("decommission1.dat");
FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Fri Oct 19 02:25:55 2012
@@ -17,60 +17,70 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import junit.framework.TestCase;
-import java.io.*;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedInputStream;
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
+import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Arrays;
+import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
-import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.*;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
-import org.aspectj.util.FileUtil;
-
-import org.mockito.Mockito;
import org.junit.Test;
+import org.mockito.Mockito;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
-import static org.apache.hadoop.test.MetricsAsserts.*;
-
/**
* This class tests the creation and validation of a checkpoint.
*/
-public class TestEditLog extends TestCase {
+public class TestEditLog {
static {
((Log4JLogger)FSEditLog.LOG).getLogger().setLevel(Level.ALL);
@@ -108,6 +118,11 @@ public class TestEditLog extends TestCas
"a4ff 0000 0000 0000 0000 0000 0000 0000"
).replace(" ",""));
+ static {
+ // No need to fsync for the purposes of tests. This makes
+ // the tests run much faster.
+ EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+ }
static final byte TRAILER_BYTE = FSEditLogOpCodes.OP_INVALID.getOpCode();
@@ -129,6 +144,7 @@ public class TestEditLog extends TestCas
}
// add a bunch of transactions.
+ @Override
public void run() {
PermissionStatus p = namesystem.createFsOwnerPermissions(
new FsPermission((short)0777));
@@ -161,6 +177,7 @@ public class TestEditLog extends TestCas
/**
* Test case for an empty edit log from a prior version of Hadoop.
*/
+ @Test
public void testPreTxIdEditLogNoEdits() throws Exception {
FSNamesystem namesys = Mockito.mock(FSNamesystem.class);
namesys.dir = Mockito.mock(FSDirectory.class);
@@ -174,6 +191,7 @@ public class TestEditLog extends TestCas
* Test case for loading a very simple edit log from a format
* prior to the inclusion of edit transaction IDs in the log.
*/
+ @Test
public void testPreTxidEditLogWithEdits() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
@@ -202,6 +220,7 @@ public class TestEditLog extends TestCas
/**
* Simple test for writing to and rolling the edit log.
*/
+ @Test
public void testSimpleEditLog() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
@@ -246,6 +265,7 @@ public class TestEditLog extends TestCas
/**
* Tests transaction logging in dfs.
*/
+ @Test
public void testMultiThreadedEditLog() throws IOException {
testEditLog(2048);
// force edit buffer to automatically sync on each log of edit log entry
@@ -363,6 +383,7 @@ public class TestEditLog extends TestCas
final String filename) throws Exception
{
exec.submit(new Callable<Void>() {
+ @Override
public Void call() {
log.logSetReplication(filename, (short)1);
return null;
@@ -374,6 +395,7 @@ public class TestEditLog extends TestCas
throws Exception
{
exec.submit(new Callable<Void>() {
+ @Override
public Void call() {
log.logSync();
return null;
@@ -385,6 +407,7 @@ public class TestEditLog extends TestCas
throws Exception
{
exec.submit(new Callable<Void>() {
+ @Override
public Void call() throws Exception {
log.logSyncAll();
return null;
@@ -392,6 +415,7 @@ public class TestEditLog extends TestCas
}).get();
}
+ @Test
public void testSyncBatching() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
@@ -454,6 +478,7 @@ public class TestEditLog extends TestCas
* This sequence is legal and can occur if enterSafeMode() is closely
* followed by saveNamespace.
*/
+ @Test
public void testBatchedSyncWithClosedLogs() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
@@ -493,6 +518,7 @@ public class TestEditLog extends TestCas
}
}
+ @Test
public void testEditChecksum() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
@@ -544,6 +570,7 @@ public class TestEditLog extends TestCas
* Test what happens if the NN crashes when it has has started but
* had no transactions written.
*/
+ @Test
public void testCrashRecoveryNoTransactions() throws Exception {
testCrashRecovery(0);
}
@@ -552,6 +579,7 @@ public class TestEditLog extends TestCas
* Test what happens if the NN crashes when it has has started and
* had a few transactions written
*/
+ @Test
public void testCrashRecoveryWithTransactions() throws Exception {
testCrashRecovery(150);
}
@@ -591,14 +619,14 @@ public class TestEditLog extends TestCas
LOG.info("Copying data directory aside to a hot backup");
File backupDir = new File(dfsDir.getParentFile(), "dfs.backup-while-running");
- FileUtil.copyDir(dfsDir, backupDir);;
+ FileUtils.copyDirectory(dfsDir, backupDir);
LOG.info("Shutting down cluster #1");
cluster.shutdown();
cluster = null;
// Now restore the backup
- FileUtil.deleteContents(dfsDir);
+ FileUtil.fullyDeleteContents(dfsDir);
backupDir.renameTo(dfsDir);
// Directory layout looks like:
@@ -661,22 +689,26 @@ public class TestEditLog extends TestCas
}
// should succeed - only one corrupt log dir
+ @Test
public void testCrashRecoveryEmptyLogOneDir() throws Exception {
doTestCrashRecoveryEmptyLog(false, true, true);
}
// should fail - seen_txid updated to 3, but no log dir contains txid 3
+ @Test
public void testCrashRecoveryEmptyLogBothDirs() throws Exception {
doTestCrashRecoveryEmptyLog(true, true, false);
}
// should succeed - only one corrupt log dir
+ @Test
public void testCrashRecoveryEmptyLogOneDirNoUpdateSeenTxId()
throws Exception {
doTestCrashRecoveryEmptyLog(false, false, true);
}
// should succeed - both log dirs corrupt, but seen_txid never updated
+ @Test
public void testCrashRecoveryEmptyLogBothDirsNoUpdateSeenTxId()
throws Exception {
doTestCrashRecoveryEmptyLog(true, false, true);
@@ -824,6 +856,7 @@ public class TestEditLog extends TestCas
}
}
+ @Test
public void testFailedOpen() throws Exception {
File logDir = new File(TEST_DIR, "testFailedOpen");
logDir.mkdirs();
@@ -845,6 +878,7 @@ public class TestEditLog extends TestCas
* Regression test for HDFS-1112/HDFS-3020. Ensures that, even if
* logSync isn't called periodically, the edit log will sync itself.
*/
+ @Test
public void testAutoSync() throws Exception {
File logDir = new File(TEST_DIR, "testAutoSync");
logDir.mkdirs();
@@ -1144,6 +1178,7 @@ public class TestEditLog extends TestCas
final long endGapTxId = 2*TXNS_PER_ROLL;
File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
+ @Override
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,
endGapTxId))) {
@@ -1185,7 +1220,7 @@ public class TestEditLog extends TestCas
elfos.create();
elfos.writeRaw(garbage, 0, garbage.length);
elfos.setReadyToFlush();
- elfos.flushAndSync();
+ elfos.flushAndSync(true);
elfos.close();
elfos = null;
file = new File(TEST_LOG_NAME);
@@ -1201,10 +1236,8 @@ public class TestEditLog extends TestCas
}
} catch (IOException e) {
} catch (Throwable t) {
- StringWriter sw = new StringWriter();
- t.printStackTrace(new PrintWriter(sw));
- fail("caught non-IOException throwable with message " +
- t.getMessage() + "\nstack trace\n" + sw.toString());
+ fail("Caught non-IOException throwable " +
+ StringUtils.stringifyException(t));
}
} finally {
if ((elfos != null) && (elfos.isOpen()))
@@ -1278,6 +1311,7 @@ public class TestEditLog extends TestCas
final long endErrorTxId = 2*TXNS_PER_ROLL;
File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
+ @Override
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,
endErrorTxId))) {
@@ -1316,6 +1350,7 @@ public class TestEditLog extends TestCas
final long endErrorTxId = 2*TXNS_PER_ROLL;
File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
+ @Override
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,
endErrorTxId))) {
@@ -1382,7 +1417,7 @@ public class TestEditLog extends TestCas
}
// How long does it take to read through all these edit logs?
- long startTime = System.currentTimeMillis();
+ long startTime = Time.now();
try {
cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(NUM_DATA_NODES).build();
@@ -1392,7 +1427,7 @@ public class TestEditLog extends TestCas
cluster.shutdown();
}
}
- long endTime = System.currentTimeMillis();
+ long endTime = Time.now();
double delta = ((float)(endTime - startTime)) / 1000.0;
LOG.info(String.format("loaded %d edit log segments in %.2f seconds",
NUM_EDIT_LOG_ROLLS, delta));
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java Fri Oct 19 02:25:55 2012
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.io.OutputStream;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java Fri Oct 19 02:25:55 2012
@@ -40,6 +40,12 @@ public class TestEditLogFileOutputStream
final static int MIN_PREALLOCATION_LENGTH =
EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH;
+ static {
+ // No need to fsync for the purposes of tests. This makes
+ // the tests run much faster.
+ EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+ }
+
@Before
@After
public void deleteEditsFile() {
@@ -49,7 +55,7 @@ public class TestEditLogFileOutputStream
static void flushAndCheckLength(EditLogFileOutputStream elos,
long expectedLength) throws IOException {
elos.setReadyToFlush();
- elos.flushAndSync();
+ elos.flushAndSync(true);
assertEquals(expectedLength, elos.getFile().length());
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java Fri Oct 19 02:25:55 2012
@@ -125,7 +125,8 @@ public class TestEditLogJournalFailures
} catch (RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains(
- "Could not sync enough journals to persistent storage. " +
+ "Could not sync enough journals to persistent storage due to " +
+ "No journals available to flush. " +
"Unsynced transactions: 1", re);
}
}
@@ -227,7 +228,8 @@ public class TestEditLogJournalFailures
} catch (RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains(
- "Could not sync enough journals to persistent storage. " +
+ "Could not sync enough journals to persistent storage due to " +
+ "setReadyToFlush failed for too many journals. " +
"Unsynced transactions: 1", re);
}
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Fri Oct 19 02:25:55 2012
@@ -17,40 +17,41 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import org.apache.commons.logging.Log;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
-import java.io.*;
+import java.io.File;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.*;
-
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
-
-import static org.junit.Assert.*;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
-import static org.mockito.Mockito.*;
-
/**
* This class tests various synchronization bugs in FSEditLog rolling
* and namespace saving.
@@ -111,6 +112,7 @@ public class TestEditLogRace {
}
// add a bunch of transactions.
+ @Override
public void run() {
thr = Thread.currentThread();
PermissionStatus p = namesystem.createFsOwnerPermissions(
@@ -182,10 +184,7 @@ public class TestEditLogRace {
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
-
FSImage fsimage = namesystem.getFSImage();
- FSEditLog editLog = fsimage.getEditLog();
-
StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
startTransactionWorkers(namesystem, caughtErr);
@@ -304,7 +303,7 @@ public class TestEditLogRace {
assertEquals(fsimage.getStorage().getMostRecentCheckpointTxId(),
editLog.getLastWrittenTxId() - 1);
- namesystem.leaveSafeMode(false);
+ namesystem.leaveSafeMode();
LOG.info("Save " + i + ": complete");
}
} finally {
@@ -367,6 +366,7 @@ public class TestEditLogRace {
final CountDownLatch waitToEnterFlush = new CountDownLatch(1);
final Thread doAnEditThread = new Thread() {
+ @Override
public void run() {
try {
LOG.info("Starting mkdirs");
@@ -410,9 +410,9 @@ public class TestEditLogRace {
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long");
- long st = System.currentTimeMillis();
+ long st = Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
- long et = System.currentTimeMillis();
+ long et = Time.now();
LOG.info("Entered safe mode");
// Make sure we really waited for the flush to complete!
assertTrue(et - st > (BLOCK_TIME - 1)*1000);
@@ -462,6 +462,7 @@ public class TestEditLogRace {
final CountDownLatch waitToEnterSync = new CountDownLatch(1);
final Thread doAnEditThread = new Thread() {
+ @Override
public void run() {
try {
LOG.info("Starting mkdirs");
@@ -503,9 +504,9 @@ public class TestEditLogRace {
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since we have pending edits");
- long st = System.currentTimeMillis();
+ long st = Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
- long et = System.currentTimeMillis();
+ long et = Time.now();
LOG.info("Entered safe mode");
// Make sure we really waited for the flush to complete!
assertTrue(et - st > (BLOCK_TIME - 1)*1000);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java Fri Oct 19 02:25:55 2012
@@ -17,7 +17,10 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.IOException;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Fri Oct 19 02:25:55 2012
@@ -21,6 +21,8 @@ package org.apache.hadoop.hdfs.server.na
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
import java.io.BufferedInputStream;
import java.io.File;
@@ -30,7 +32,6 @@ import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileChannel;
import java.util.Map;
-import java.util.Set;
import java.util.SortedMap;
import org.apache.commons.logging.impl.Log4JLogger;
@@ -44,20 +45,14 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.Level;
import org.junit.Test;
import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
import com.google.common.io.Files;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.spy;
-
public class TestFSEditLogLoader {
static {
@@ -121,7 +116,7 @@ public class TestFSEditLogLoader {
* automatically bumped up to the new minimum upon restart.
*/
@Test
- public void testReplicationAdjusted() throws IOException {
+ public void testReplicationAdjusted() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
// Replicate and heartbeat fast to shave a few seconds off test
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java Fri Oct 19 02:25:55 2012
@@ -17,19 +17,21 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.*;
+import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
+import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
+import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
-import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
-import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
-
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.junit.Test;
public class TestFSImageStorageInspector {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java Fri Oct 19 02:25:55 2012
@@ -18,14 +18,17 @@
package org.apache.hadoop.hdfs.server.namenode;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.junit.Assert.*;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
+import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.junit.Test;
public class TestFSNamesystem {
@@ -45,4 +48,20 @@ public class TestFSNamesystem {
assertEquals(2, editsDirs.size());
}
+ /**
+ * Test that FSNamesystem#clear clears all leases.
+ */
+ @Test
+ public void testFSNamespaceClearLeases() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
+ DFSTestUtil.formatNameNode(conf);
+ FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
+ LeaseManager leaseMan = fsn.getLeaseManager();
+ leaseMan.addLease("client1", "importantFile");
+ assertEquals(1, leaseMan.countLease());
+ fsn.clear();
+ leaseMan = fsn.getLeaseManager();
+ assertEquals(0, leaseMan.countLease());
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java Fri Oct 19 02:25:55 2012
@@ -17,38 +17,46 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.*;
+import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
+import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL;
+import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.RandomAccessFile;
import java.net.URI;
import java.util.Collections;
-import java.util.List;
import java.util.Iterator;
+import java.util.List;
import java.util.PriorityQueue;
-import java.io.RandomAccessFile;
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import org.junit.Test;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
-import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
-import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
-import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL;
-import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
+import org.junit.Test;
-import com.google.common.collect.ImmutableList;
import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
public class TestFileJournalManager {
static final Log LOG = LogFactory.getLog(TestFileJournalManager.class);
+ static {
+ // No need to fsync for the purposes of tests. This makes
+ // the tests run much faster.
+ EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+ }
+
/**
* Find out how many transactions we can read from a
* FileJournalManager, starting at a given transaction ID.
@@ -296,6 +304,7 @@ public class TestFileJournalManager {
final long startGapTxId = 3*TXNS_PER_ROLL + 1;
final long endGapTxId = 4*TXNS_PER_ROLL;
File[] files = new File(f, "current").listFiles(new FilenameFilter() {
+ @Override
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, endGapTxId))) {
return true;
@@ -327,6 +336,7 @@ public class TestFileJournalManager {
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
File[] files = new File(f, "current").listFiles(new FilenameFilter() {
+ @Override
public boolean accept(File dir, String name) {
if (name.startsWith("edits_inprogress")) {
return true;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java Fri Oct 19 02:25:55 2012
@@ -17,44 +17,30 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import java.io.IOException;
-import java.util.Random;
+import static org.junit.Assert.assertTrue;
-import junit.framework.TestCase;
+import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.junit.Test;
/**
* This class tests that a file system adheres to the limit of
* maximum number of files that is configured.
*/
-public class TestFileLimit extends TestCase {
+public class TestFileLimit {
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
boolean simulatedStorage = false;
- // creates a zero file.
- private void createFile(FileSystem fileSys, Path name)
- throws IOException {
- FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
- .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
- (short) 1, blockSize);
- byte[] buffer = new byte[1024];
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
-
private void waitForLimit(FSNamesystem namesys, long num)
{
// wait for number of blocks to decrease
@@ -75,6 +61,7 @@ public class TestFileLimit extends TestC
/**
* Test that file data becomes available before file is closed.
*/
+ @Test
public void testFileLimit() throws IOException {
Configuration conf = new HdfsConfiguration();
int maxObjects = 5;
@@ -104,7 +91,7 @@ public class TestFileLimit extends TestC
//
for (int i = 0; i < maxObjects/2; i++) {
Path file = new Path("/filestatus" + i);
- createFile(fs, file);
+ DFSTestUtil.createFile(fs, file, 1024, 1024, blockSize, (short) 1, seed);
System.out.println("Created file " + file);
currentNodes += 2; // two more objects for this creation.
}
@@ -113,7 +100,7 @@ public class TestFileLimit extends TestC
boolean hitException = false;
try {
Path file = new Path("/filestatus");
- createFile(fs, file);
+ DFSTestUtil.createFile(fs, file, 1024, 1024, blockSize, (short) 1, seed);
System.out.println("Created file " + file);
} catch (IOException e) {
hitException = true;
@@ -130,7 +117,7 @@ public class TestFileLimit extends TestC
waitForLimit(namesys, currentNodes);
// now, we shud be able to create a new file
- createFile(fs, file0);
+ DFSTestUtil.createFile(fs, file0, 1024, 1024, blockSize, (short) 1, seed);
System.out.println("Created file " + file0 + " again.");
currentNodes += 2;
@@ -166,6 +153,7 @@ public class TestFileLimit extends TestC
}
}
+ @Test
public void testFileLimitSimulated() throws IOException {
simulatedStorage = true;
testFileLimit();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java Fri Oct 19 02:25:55 2012
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode;
+import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.mock;
@@ -30,12 +31,11 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import org.junit.Before;
import org.junit.Test;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Fri Oct 19 02:25:55 2012
@@ -18,7 +18,11 @@
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
@@ -57,7 +61,6 @@ import org.apache.hadoop.hdfs.Distribute
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -92,6 +95,12 @@ public class TestFsck {
"ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" +
"cmd=fsck\\ssrc=\\/\\sdst=null\\s" +
"perm=null");
+ static final Pattern getfileinfoPattern = Pattern.compile(
+ "allowed=.*?\\s" +
+ "ugi=.*?\\s" +
+ "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" +
+ "cmd=getfileinfo\\ssrc=\\/\\sdst=null\\s" +
+ "perm=null");
static final Pattern numCorruptBlocksPattern = Pattern.compile(
".*Corrupt blocks:\t\t([0123456789]*).*");
@@ -177,10 +186,14 @@ public class TestFsck {
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
logger.setLevel(Level.OFF);
- // Ensure audit log has only one for FSCK
+ // Audit log should contain one getfileinfo and one fsck
BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
String line = reader.readLine();
assertNotNull(line);
+ assertTrue("Expected getfileinfo event not found in audit log",
+ getfileinfoPattern.matcher(line).matches());
+ line = reader.readLine();
+ assertNotNull(line);
assertTrue("Expected fsck event not found in audit log",
fsckPattern.matcher(line).matches());
assertNull("Unexpected event in audit log", reader.readLine());
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java Fri Oct 19 02:25:55 2012
@@ -17,18 +17,18 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import org.junit.Test;
-
-import static org.mockito.Mockito.mock;
import static org.junit.Assert.*;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import static org.mockito.Mockito.mock;
+import java.io.IOException;
import java.net.URI;
import java.util.Collection;
-import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.junit.Test;
public class TestGenericJournalConf {
private static final String DUMMY_URI = "dummy://test";
@@ -123,6 +123,8 @@ public class TestGenericJournalConf {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
+ assertTrue(DummyJournalManager.shouldPromptCalled);
+ assertTrue(DummyJournalManager.formatCalled);
assertNotNull(DummyJournalManager.conf);
assertEquals(new URI(DUMMY_URI), DummyJournalManager.uri);
assertNotNull(DummyJournalManager.nsInfo);
@@ -139,6 +141,8 @@ public class TestGenericJournalConf {
static Configuration conf = null;
static URI uri = null;
static NamespaceInfo nsInfo = null;
+ static boolean formatCalled = false;
+ static boolean shouldPromptCalled = false;
public DummyJournalManager(Configuration conf, URI u,
NamespaceInfo nsInfo) {
@@ -149,6 +153,11 @@ public class TestGenericJournalConf {
}
@Override
+ public void format(NamespaceInfo nsInfo) throws IOException {
+ formatCalled = true;
+ }
+
+ @Override
public EditLogOutputStream startLogSegment(long txId) throws IOException {
return mock(EditLogOutputStream.class);
}
@@ -176,6 +185,12 @@ public class TestGenericJournalConf {
@Override
public void close() throws IOException {}
+
+ @Override
+ public boolean hasSomeData() throws IOException {
+ shouldPromptCalled = true;
+ return false;
+ }
}
public static class BadConstructorJournalManager extends DummyJournalManager {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java Fri Oct 19 02:25:55 2012
@@ -17,7 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
import java.io.IOException;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java Fri Oct 19 02:25:55 2012
@@ -25,10 +25,11 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@@ -135,10 +136,10 @@ public class TestLargeDirectoryDelete {
threads[0].start();
threads[1].start();
- final long start = System.currentTimeMillis();
+ final long start = Time.now();
FSNamesystem.BLOCK_DELETION_INCREMENT = 1;
mc.getFileSystem().delete(new Path("/root"), true); // recursive delete
- final long end = System.currentTimeMillis();
+ final long end = Time.now();
threads[0].endThread();
threads[1].endThread();
LOG.info("Deletion took " + (end - start) + "msecs");
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.junit.Assert.assertTrue;
+
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
@@ -25,9 +27,6 @@ import java.nio.channels.FileChannel;
import java.util.Collection;
import java.util.Random;
-import org.junit.Test;
-import static org.junit.Assert.assertTrue;
-
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@@ -35,13 +34,14 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.BlockMissingException;
import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.util.StringUtils;
+import org.junit.Test;
/**
* This class tests the listCorruptFileBlocks API.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java Fri Oct 19 02:25:55 2012
@@ -17,27 +17,27 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import org.junit.Test;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import static org.junit.Assert.assertTrue;
+
import java.io.BufferedReader;
-import java.io.FileInputStream;
import java.io.DataInputStream;
-import java.io.InputStreamReader;
+import java.io.FileInputStream;
import java.io.IOException;
-import java.lang.InterruptedException;
+import java.io.InputStreamReader;
import java.util.Random;
-import static org.junit.Assert.assertTrue;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
/**
* This class tests the creation and validation of metasave
@@ -49,17 +49,6 @@ public class TestMetaSave {
private static MiniDFSCluster cluster = null;
private static FileSystem fileSys = null;
- private void createFile(FileSystem fileSys, Path name) throws IOException {
- FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
- .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
- (short) 2, blockSize);
- byte[] buffer = new byte[1024];
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
-
@BeforeClass
public static void setUp() throws IOException {
// start a cluster
@@ -85,7 +74,8 @@ public class TestMetaSave {
for (int i = 0; i < 2; i++) {
Path file = new Path("/filestatus" + i);
- createFile(fileSys, file);
+ DFSTestUtil.createFile(fileSys, file, 1024, 1024, blockSize, (short) 2,
+ seed);
}
cluster.stopDataNode(1);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,11 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
+import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
+import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
+import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals;
+
import java.io.File;
import java.io.IOException;
@@ -31,11 +36,6 @@ import org.junit.Test;
import com.google.common.base.Joiner;
-import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals;
-import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
-import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
-import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
-
/**
* Functional tests for NNStorageRetentionManager. This differs from