You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2013/04/17 21:41:53 UTC
svn commit: r1469042 - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/ src/main/java/org/apache/hadoop/hdfs/server/namenode/
src/main/native/ src/main/webapps/datanode/ src/main/webapps/hdfs/
src/main/webapps/sec...
Author: szetszwo
Date: Wed Apr 17 19:41:50 2013
New Revision: 1469042
URL: http://svn.apache.org/r1469042
Log:
Merge r1467713 through r1469041 from trunk.
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1467713-1469041
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1469042&r1=1469041&r2=1469042&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Apr 17 19:41:50 2013
@@ -461,6 +461,9 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4679. Namenode operation checks should be done in a consistent
manner. (suresh)
+ HDFS-4693. Some test cases in TestCheckpoint do not clean up after
+ themselves. (Arpit Agarwal, suresh via suresh)
+
OPTIMIZATIONS
BUG FIXES
@@ -565,6 +568,9 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4639. startFileInternal() should not increment generation stamp.
(Plamen Jeliazkov via shv)
+ HDFS-4695. TestEditLog leaks open file handles between tests.
+ (Ivan Mitic via suresh)
+
Release 2.0.4-alpha - UNRELEASED
INCOMPATIBLE CHANGES
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1467713-1469041
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1469042&r1=1469041&r2=1469042&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Wed Apr 17 19:41:50 2013
@@ -290,6 +290,7 @@ public class SecondaryNameNode implement
try {
infoServer.join();
} catch (InterruptedException ie) {
+ LOG.debug("Exception ", ie);
}
}
@@ -309,15 +310,25 @@ public class SecondaryNameNode implement
}
}
try {
- if (infoServer != null) infoServer.stop();
+ if (infoServer != null) {
+ infoServer.stop();
+ infoServer = null;
+ }
} catch (Exception e) {
LOG.warn("Exception shutting down SecondaryNameNode", e);
}
try {
- if (checkpointImage != null) checkpointImage.close();
+ if (checkpointImage != null) {
+ checkpointImage.close();
+ checkpointImage = null;
+ }
} catch(IOException e) {
LOG.warn("Exception while closing CheckpointStorage", e);
}
+ if (namesystem != null) {
+ namesystem.shutdown();
+ namesystem = null;
+ }
}
@Override
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1467713-1469041
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1467713-1469041
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1467713-1469041
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1467713-1469041
Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1467713-1469041
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1469042&r1=1469041&r2=1469042&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java Wed Apr 17 19:41:50 2013
@@ -109,6 +109,7 @@ public class UpgradeUtilities {
config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeStorage.toString());
config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString());
MiniDFSCluster cluster = null;
+ String bpid = null;
try {
// format data-node
createEmptyDirs(new String[] {datanodeStorage.toString()});
@@ -149,6 +150,7 @@ public class UpgradeUtilities {
// write more files
writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
+ bpid = cluster.getNamesystem(0).getBlockPoolId();
} finally {
// shutdown
if (cluster != null) cluster.shutdown();
@@ -160,7 +162,6 @@ public class UpgradeUtilities {
File dnCurDir = new File(datanodeStorage, "current");
datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir);
- String bpid = cluster.getNamesystem(0).getBlockPoolId();
File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir),
"current");
blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1469042&r1=1469041&r2=1469042&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Wed Apr 17 19:41:50 2013
@@ -111,9 +111,8 @@ public class TestCheckpoint {
private CheckpointFaultInjector faultInjector;
@Before
- public void setUp() throws IOException {
+ public void setUp() {
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
-
faultInjector = Mockito.mock(CheckpointFaultInjector.class);
CheckpointFaultInjector.instance = faultInjector;
}
@@ -145,9 +144,8 @@ public class TestCheckpoint {
public void testNameDirError() throws IOException {
LOG.info("Starting testNameDirError");
Configuration conf = new HdfsConfiguration();
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(0)
- .build();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+ .build();
Collection<URI> nameDirs = cluster.getNameDirs(0);
cluster.shutdown();
@@ -159,20 +157,15 @@ public class TestCheckpoint {
try {
// Simulate the mount going read-only
dir.setWritable(false);
- cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(0)
- .format(false)
- .build();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+ .format(false).build();
fail("NN should have failed to start with " + dir + " set unreadable");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
- "storage directory does not exist or is not accessible",
- ioe);
+ "storage directory does not exist or is not accessible", ioe);
} finally {
- if (cluster != null) {
- cluster.shutdown();
- cluster = null;
- }
+ cleanup(cluster);
+ cluster = null;
dir.setWritable(true);
}
}
@@ -258,15 +251,13 @@ public class TestCheckpoint {
assertTrue("Another checkpoint should have reloaded image",
secondary.doCheckpoint());
} finally {
- if (secondary != null) {
- secondary.shutdown();
- }
if (fs != null) {
fs.close();
}
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
Mockito.reset(faultInjector);
}
}
@@ -307,15 +298,13 @@ public class TestCheckpoint {
ExitUtil.resetFirstExitException();
assertEquals("Max retries", 1, secondary.getMergeErrorCount() - 1);
} finally {
- if (secondary != null) {
- secondary.shutdown();
- }
if (fs != null) {
fs.close();
}
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
Mockito.reset(faultInjector);
}
}
@@ -329,17 +318,18 @@ public class TestCheckpoint {
LOG.info("Starting testSecondaryNamenodeError1");
Configuration conf = new HdfsConfiguration();
Path file1 = new Path("checkpointxx.dat");
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(numDatanodes)
- .build();
- cluster.waitActive();
- FileSystem fileSys = cluster.getFileSystem();
+ MiniDFSCluster cluster = null;
+ FileSystem fileSys = null;
+ SecondaryNameNode secondary = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .build();
+ cluster.waitActive();
+ fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
- //
+
// Make the checkpoint fail after rolling the edits log.
- //
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
+ secondary = startSecondaryNameNode(conf);
Mockito.doThrow(new IOException(
"Injecting failure after rolling edit logs"))
@@ -349,10 +339,10 @@ public class TestCheckpoint {
secondary.doCheckpoint(); // this should fail
assertTrue(false);
} catch (IOException e) {
+ // expected
}
Mockito.reset(faultInjector);
- secondary.shutdown();
//
// Create a new file
@@ -362,7 +352,10 @@ public class TestCheckpoint {
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
//
@@ -370,20 +363,22 @@ public class TestCheckpoint {
// Then take another checkpoint to verify that the
// namenode restart accounted for the rolled edit logs.
//
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
- .format(false).build();
- cluster.waitActive();
-
- fileSys = cluster.getFileSystem();
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .format(false).build();
+ cluster.waitActive();
+ fileSys = cluster.getFileSystem();
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
+ secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
} finally {
fileSys.close();
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -395,17 +390,19 @@ public class TestCheckpoint {
LOG.info("Starting testSecondaryNamenodeError2");
Configuration conf = new HdfsConfiguration();
Path file1 = new Path("checkpointyy.dat");
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(numDatanodes)
- .build();
- cluster.waitActive();
- FileSystem fileSys = cluster.getFileSystem();
+ MiniDFSCluster cluster = null;
+ FileSystem fileSys = null;
+ SecondaryNameNode secondary = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .build();
+ cluster.waitActive();
+ fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
//
// Make the checkpoint fail after uploading the new fsimage.
//
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
+ secondary = startSecondaryNameNode(conf);
Mockito.doThrow(new IOException(
"Injecting failure after uploading new image"))
@@ -415,9 +412,9 @@ public class TestCheckpoint {
secondary.doCheckpoint(); // this should fail
assertTrue(false);
} catch (IOException e) {
+ // expected
}
Mockito.reset(faultInjector);
- secondary.shutdown();
//
// Create a new file
@@ -427,7 +424,10 @@ public class TestCheckpoint {
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
//
@@ -435,18 +435,22 @@ public class TestCheckpoint {
// Then take another checkpoint to verify that the
// namenode restart accounted for the rolled edit logs.
//
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
- cluster.waitActive();
- fileSys = cluster.getFileSystem();
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .format(false).build();
+ cluster.waitActive();
+ fileSys = cluster.getFileSystem();
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
+ secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
} finally {
fileSys.close();
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -458,18 +462,19 @@ public class TestCheckpoint {
LOG.info("Starting testSecondaryNamenodeError3");
Configuration conf = new HdfsConfiguration();
Path file1 = new Path("checkpointzz.dat");
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(numDatanodes)
- .build();
-
- cluster.waitActive();
- FileSystem fileSys = cluster.getFileSystem();
+ MiniDFSCluster cluster = null;
+ FileSystem fileSys = null;
+ SecondaryNameNode secondary = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .build();
+ cluster.waitActive();
+ fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
//
// Make the checkpoint fail after rolling the edit log.
//
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
+ secondary = startSecondaryNameNode(conf);
Mockito.doThrow(new IOException(
"Injecting failure after rolling edit logs"))
@@ -479,6 +484,7 @@ public class TestCheckpoint {
secondary.doCheckpoint(); // this should fail
assertTrue(false);
} catch (IOException e) {
+ // expected
}
Mockito.reset(faultInjector);
secondary.shutdown(); // secondary namenode crash!
@@ -489,7 +495,6 @@ public class TestCheckpoint {
//
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint(); // this should work correctly
- secondary.shutdown();
//
// Create a new file
@@ -499,7 +504,10 @@ public class TestCheckpoint {
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
//
@@ -507,18 +515,22 @@ public class TestCheckpoint {
// Then take another checkpoint to verify that the
// namenode restart accounted for the twice-rolled edit logs.
//
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
- cluster.waitActive();
- fileSys = cluster.getFileSystem();
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .format(false).build();
+ cluster.waitActive();
+ fileSys = cluster.getFileSystem();
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
+ secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
} finally {
fileSys.close();
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -555,13 +567,16 @@ public class TestCheckpoint {
LOG.info("Starting testSecondaryFailsToReturnImage");
Configuration conf = new HdfsConfiguration();
Path file1 = new Path("checkpointRI.dat");
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(numDatanodes)
- .build();
- cluster.waitActive();
- FileSystem fileSys = cluster.getFileSystem();
- FSImage image = cluster.getNameNode().getFSImage();
+ MiniDFSCluster cluster = null;
+ FileSystem fileSys = null;
+ FSImage image = null;
+ SecondaryNameNode secondary = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .build();
+ cluster.waitActive();
+ fileSys = cluster.getFileSystem();
+ image = cluster.getNameNode().getFSImage();
assertTrue(!fileSys.exists(file1));
StorageDirectory sd = image.getStorage().getStorageDir(0);
@@ -570,7 +585,7 @@ public class TestCheckpoint {
//
// Make the checkpoint
//
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
+ secondary = startSecondaryNameNode(conf);
try {
secondary.doCheckpoint(); // this should fail
@@ -591,10 +606,12 @@ public class TestCheckpoint {
assertEquals(fsimageLength, len);
}
- secondary.shutdown();
} finally {
fileSys.close();
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -648,17 +665,19 @@ public class TestCheckpoint {
throws IOException {
Configuration conf = new HdfsConfiguration();
Path file1 = new Path("checkpoint-doSendFailTest-doSendFailTest.dat");
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(numDatanodes)
- .build();
- cluster.waitActive();
- FileSystem fileSys = cluster.getFileSystem();
+ MiniDFSCluster cluster = null;
+ FileSystem fileSys = null;
+ SecondaryNameNode secondary = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .build();
+ cluster.waitActive();
+ fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
//
// Make the checkpoint fail after rolling the edit log.
//
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
+ secondary = startSecondaryNameNode(conf);
try {
secondary.doCheckpoint(); // this should fail
@@ -669,6 +688,7 @@ public class TestCheckpoint {
}
Mockito.reset(faultInjector);
secondary.shutdown(); // secondary namenode crash!
+ secondary = null;
// start new instance of secondary and verify that
// a new rollEditLog succedes in spite of the fact that we had
@@ -676,7 +696,6 @@ public class TestCheckpoint {
//
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint(); // this should work correctly
- secondary.shutdown();
//
// Create a new file
@@ -686,7 +705,10 @@ public class TestCheckpoint {
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -697,21 +719,21 @@ public class TestCheckpoint {
@Test
public void testNameDirLocking() throws IOException {
Configuration conf = new HdfsConfiguration();
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(0)
- .build();
+ MiniDFSCluster cluster = null;
// Start a NN, and verify that lock() fails in all of the configured
// directories
StorageDirectory savedSd = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
for (StorageDirectory sd : storage.dirIterable(null)) {
assertLockFails(sd);
savedSd = sd;
}
} finally {
- cluster.shutdown();
+ cleanup(cluster);
+ cluster = null;
}
assertNotNull(savedSd);
@@ -732,15 +754,14 @@ public class TestCheckpoint {
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
editsDir.getAbsolutePath());
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .manageNameDfsDirs(false)
- .numDataNodes(0)
- .build();
+ MiniDFSCluster cluster = null;
// Start a NN, and verify that lock() fails in all of the configured
// directories
StorageDirectory savedSd = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
+ .numDataNodes(0).build();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
assertEquals(editsDir.getAbsoluteFile(), sd.getRoot());
@@ -748,7 +769,8 @@ public class TestCheckpoint {
savedSd = sd;
}
} finally {
- cluster.shutdown();
+ cleanup(cluster);
+ cluster = null;
}
assertNotNull(savedSd);
@@ -764,12 +786,10 @@ public class TestCheckpoint {
public void testSecondaryNameNodeLocking() throws Exception {
// Start a primary NN so that the secondary will start successfully
Configuration conf = new HdfsConfiguration();
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(0)
- .build();
-
+ MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
StorageDirectory savedSd = null;
// Start a secondary NN, then make sure that all of its storage
// dirs got locked.
@@ -799,10 +819,10 @@ public class TestCheckpoint {
}
} finally {
- if (secondary != null) {
- secondary.shutdown();
- }
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -813,12 +833,10 @@ public class TestCheckpoint {
@Test
public void testStorageAlreadyLockedErrorMessage() throws Exception {
Configuration conf = new HdfsConfiguration();
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(0)
- .build();
-
+ MiniDFSCluster cluster = null;
StorageDirectory savedSd = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
for (StorageDirectory sd : storage.dirIterable(null)) {
assertLockFails(sd);
@@ -836,7 +854,8 @@ public class TestCheckpoint {
+ "'", logs.getOutput().contains(jvmName));
}
} finally {
- cluster.shutdown();
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -864,18 +883,17 @@ public class TestCheckpoint {
Configuration conf, StorageDirectory sdToLock) throws IOException {
// Lock the edits dir, then start the NN, and make sure it fails to start
sdToLock.lock();
+ MiniDFSCluster cluster = null;
try {
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .format(false)
- .manageNameDfsDirs(false)
- .numDataNodes(0)
- .build();
+ cluster = new MiniDFSCluster.Builder(conf).format(false)
+ .manageNameDfsDirs(false).numDataNodes(0).build();
assertFalse("cluster should fail to start after locking " +
sdToLock, sdToLock.isLockSupported());
- cluster.shutdown();
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("already locked", ioe);
} finally {
+ cleanup(cluster);
+ cluster = null;
sdToLock.unlock();
}
}
@@ -892,11 +910,12 @@ public class TestCheckpoint {
Configuration conf = new HdfsConfiguration();
Path testPath = new Path("/testfile");
SecondaryNameNode snn = null;
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(0)
- .build();
- Collection<URI> nameDirs = cluster.getNameDirs(0);
+ MiniDFSCluster cluster = null;
+ Collection<URI> nameDirs = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+ nameDirs = cluster.getNameDirs(0);
+
// Make an entry in the namespace, used for verifying checkpoint
// later.
cluster.getFileSystem().mkdirs(testPath);
@@ -905,21 +924,16 @@ public class TestCheckpoint {
snn = startSecondaryNameNode(conf);
snn.doCheckpoint();
} finally {
- if (snn != null) {
- snn.shutdown();
- }
- cluster.shutdown();
+ cleanup(snn);
+ cleanup(cluster);
cluster = null;
}
LOG.info("Trying to import checkpoint when the NameNode already " +
"contains an image. This should fail.");
try {
- cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(0)
- .format(false)
- .startupOption(StartupOption.IMPORT)
- .build();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false)
+ .startupOption(StartupOption.IMPORT).build();
fail("NameNode did not fail to start when it already contained " +
"an image");
} catch (IOException ioe) {
@@ -927,10 +941,8 @@ public class TestCheckpoint {
GenericTestUtils.assertExceptionContains(
"NameNode already contains an image", ioe);
} finally {
- if (cluster != null) {
- cluster.shutdown();
- cluster = null;
- }
+ cleanup(cluster);
+ cluster = null;
}
LOG.info("Removing NN storage contents");
@@ -942,11 +954,8 @@ public class TestCheckpoint {
LOG.info("Trying to import checkpoint");
try {
- cluster = new MiniDFSCluster.Builder(conf)
- .format(false)
- .numDataNodes(0)
- .startupOption(StartupOption.IMPORT)
- .build();
+ cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0)
+ .startupOption(StartupOption.IMPORT).build();
assertTrue("Path from checkpoint should exist after import",
cluster.getFileSystem().exists(testPath));
@@ -954,9 +963,8 @@ public class TestCheckpoint {
// Make sure that the image got saved on import
FSImageTestUtil.assertNNHasCheckpoints(cluster, Ints.asList(3));
} finally {
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -993,12 +1001,15 @@ public class TestCheckpoint {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(numDatanodes).build();
- cluster.waitActive();
- FileSystem fileSys = cluster.getFileSystem();
-
+
+ MiniDFSCluster cluster = null;
+ FileSystem fileSys = null;
+ SecondaryNameNode secondary = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+ numDatanodes).build();
+ cluster.waitActive();
+ fileSys = cluster.getFileSystem();
//
// verify that 'format' really blew away all pre-existing files
//
@@ -1015,22 +1026,26 @@ public class TestCheckpoint {
//
// Take a checkpoint
//
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
+ secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
- secondary.shutdown();
} finally {
fileSys.close();
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
//
// Restart cluster and verify that file1 still exist.
//
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
- cluster.waitActive();
- fileSys = cluster.getFileSystem();
Path tmpDir = new Path("/tmp_tmp");
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .format(false).build();
+ cluster.waitActive();
+ fileSys = cluster.getFileSystem();
+
// check that file1 still exists
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
@@ -1043,17 +1058,18 @@ public class TestCheckpoint {
//
// Take a checkpoint
//
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
+ secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
fileSys.delete(tmpDir, true);
fileSys.mkdirs(tmpDir);
secondary.doCheckpoint();
-
- secondary.shutdown();
} finally {
fileSys.close();
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
//
@@ -1073,6 +1089,7 @@ public class TestCheckpoint {
} finally {
fileSys.close();
cluster.shutdown();
+ cluster = null;
}
}
@@ -1088,7 +1105,7 @@ public class TestCheckpoint {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
- fs = (DistributedFileSystem)(cluster.getFileSystem());
+ fs = (cluster.getFileSystem());
fc = FileContext.getFileContext(cluster.getURI(0));
// Saving image without safe mode should fail
@@ -1171,17 +1188,14 @@ public class TestCheckpoint {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
- fs = (DistributedFileSystem)(cluster.getFileSystem());
+ fs = (cluster.getFileSystem());
checkFile(fs, file, replication);
fc = FileContext.getFileContext(cluster.getURI(0));
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
} finally {
- try {
- if(fs != null) fs.close();
- if(cluster!= null) cluster.shutdown();
- } catch (Throwable t) {
- LOG.error("Failed to shutdown", t);
- }
+ if(fs != null) fs.close();
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -1193,26 +1207,31 @@ public class TestCheckpoint {
MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration();
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
- .format(true).build();
- NameNode nn = cluster.getNameNode();
- NamenodeProtocols nnRpc = nn.getRpcServer();
-
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
- // prepare checkpoint image
- secondary.doCheckpoint();
- CheckpointSignature sig = nnRpc.rollEditLog();
- // manipulate the CheckpointSignature fields
- sig.setBlockpoolID("somerandomebpid");
- sig.clusterID = "somerandomcid";
- try {
- sig.validateStorageInfo(nn.getFSImage()); // this should fail
- assertTrue("This test is expected to fail.", false);
- } catch (Exception ignored) {
- }
+ SecondaryNameNode secondary = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+ .format(true).build();
+ NameNode nn = cluster.getNameNode();
+ NamenodeProtocols nnRpc = nn.getRpcServer();
- secondary.shutdown();
- cluster.shutdown();
+ secondary = startSecondaryNameNode(conf);
+ // prepare checkpoint image
+ secondary.doCheckpoint();
+ CheckpointSignature sig = nnRpc.rollEditLog();
+ // manipulate the CheckpointSignature fields
+ sig.setBlockpoolID("somerandomebpid");
+ sig.clusterID = "somerandomcid";
+ try {
+ sig.validateStorageInfo(nn.getFSImage()); // this should fail
+ assertTrue("This test is expected to fail.", false);
+ } catch (Exception ignored) {
+ }
+ } finally {
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
+ }
}
/**
@@ -1263,12 +1282,10 @@ public class TestCheckpoint {
secondary.doCheckpoint();
} finally {
- if (secondary != null) {
- secondary.shutdown();
- }
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -1286,44 +1303,57 @@ public class TestCheckpoint {
String nameserviceId2 = "ns2";
conf.set(DFSConfigKeys.DFS_NAMESERVICES, nameserviceId1
+ "," + nameserviceId2);
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
- .build();
- Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
- Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
- InetSocketAddress nn1RpcAddress =
- cluster.getNameNode(0).getNameNodeAddress();
- InetSocketAddress nn2RpcAddress =
- cluster.getNameNode(1).getNameNodeAddress();
- String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
- String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();
-
- // Set the Service Rpc address to empty to make sure the node specific
- // setting works
- snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
- snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
-
- // Set the nameserviceIds
- snConf1.set(DFSUtil.addKeySuffixes(
- DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1), nn1);
- snConf2.set(DFSUtil.addKeySuffixes(
- DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2), nn2);
-
- SecondaryNameNode secondary1 = startSecondaryNameNode(snConf1);
- SecondaryNameNode secondary2 = startSecondaryNameNode(snConf2);
-
- // make sure the two secondary namenodes are talking to correct namenodes.
- assertEquals(secondary1.getNameNodeAddress().getPort(), nn1RpcAddress.getPort());
- assertEquals(secondary2.getNameNodeAddress().getPort(), nn2RpcAddress.getPort());
- assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2
- .getNameNodeAddress().getPort());
-
- // both should checkpoint.
- secondary1.doCheckpoint();
- secondary2.doCheckpoint();
- secondary1.shutdown();
- secondary2.shutdown();
- cluster.shutdown();
+ MiniDFSCluster cluster = null;
+ SecondaryNameNode secondary1 = null;
+ SecondaryNameNode secondary2 = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf)
+ .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+ .build();
+ Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
+ Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
+ InetSocketAddress nn1RpcAddress = cluster.getNameNode(0)
+ .getNameNodeAddress();
+ InetSocketAddress nn2RpcAddress = cluster.getNameNode(1)
+ .getNameNodeAddress();
+ String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
+ String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();
+
+ // Set the Service Rpc address to empty to make sure the node specific
+ // setting works
+ snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
+ snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
+
+ // Set the nameserviceIds
+ snConf1.set(DFSUtil.addKeySuffixes(
+ DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1),
+ nn1);
+ snConf2.set(DFSUtil.addKeySuffixes(
+ DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2),
+ nn2);
+
+ secondary1 = startSecondaryNameNode(snConf1);
+ secondary2 = startSecondaryNameNode(snConf2);
+
+ // make sure the two secondary namenodes are talking to correct namenodes.
+ assertEquals(secondary1.getNameNodeAddress().getPort(),
+ nn1RpcAddress.getPort());
+ assertEquals(secondary2.getNameNodeAddress().getPort(),
+ nn2RpcAddress.getPort());
+ assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2
+ .getNameNodeAddress().getPort());
+
+ // both should checkpoint.
+ secondary1.doCheckpoint();
+ secondary2.doCheckpoint();
+ } finally {
+ cleanup(secondary1);
+ secondary1 = null;
+ cleanup(secondary2);
+ secondary2 = null;
+ cleanup(cluster);
+ cluster = null;
+ }
}
/**
@@ -1342,12 +1372,13 @@ public class TestCheckpoint {
cluster.waitActive();
FileSystem fileSys = cluster.getFileSystem();
FSImage image = cluster.getNameNode().getFSImage();
+ SecondaryNameNode secondary = null;
try {
assertTrue(!fileSys.exists(dir));
//
// Make the checkpoint
//
- SecondaryNameNode secondary = startSecondaryNameNode(conf);
+ secondary = startSecondaryNameNode(conf);
File secondaryDir = new File(MiniDFSCluster.getBaseDirectory(), "namesecondary1");
File secondaryCurrent = new File(secondaryDir, "current");
@@ -1391,10 +1422,12 @@ public class TestCheckpoint {
imageFile.length() > fsimageLength);
}
- secondary.shutdown();
} finally {
fileSys.close();
- cluster.shutdown();
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -1409,13 +1442,10 @@ public class TestCheckpoint {
SecondaryNameNode secondary = null;
MiniDFSCluster cluster = null;
FileSystem fs = null;
- NameNode namenode = null;
-
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.build();
cluster.waitActive();
- namenode = cluster.getNameNode();
fs = cluster.getFileSystem();
secondary = startSecondaryNameNode(conf);
fos = fs.create(new Path("tmpfile0"));
@@ -1439,15 +1469,13 @@ public class TestCheckpoint {
// Namenode should still restart successfully
cluster.restartNameNode();
} finally {
- if (secondary != null) {
- secondary.shutdown();
- }
if (fs != null) {
fs.close();
}
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
Mockito.reset(faultInjector);
}
}
@@ -1470,13 +1498,12 @@ public class TestCheckpoint {
@Test
public void testMultipleSecondaryNNsAgainstSameNN() throws Exception {
Configuration conf = new HdfsConfiguration();
-
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(0)
- .format(true).build();
-
+ MiniDFSCluster cluster = null;
SecondaryNameNode secondary1 = null, secondary2 = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
+ .build();
+
// Start 2NNs
secondary1 = startSecondaryNameNode(conf, 1);
secondary2 = startSecondaryNameNode(conf, 2);
@@ -1519,20 +1546,23 @@ public class TestCheckpoint {
// NN should have received new checkpoint
assertEquals(6, storage.getMostRecentCheckpointTxId());
+
+ // Validate invariant that files named the same are the same.
+ assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2));
+
+ // NN should have removed the checkpoint at txid 2 at this point, but has
+ // one at txid 6
+ assertNNHasCheckpoints(cluster, ImmutableList.of(4,6));
} finally {
cleanup(secondary1);
+ secondary1 = null;
cleanup(secondary2);
+ secondary2 = null;
if (cluster != null) {
cluster.shutdown();
+ cluster = null;
}
}
-
- // Validate invariant that files named the same are the same.
- assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2));
-
- // NN should have removed the checkpoint at txid 2 at this point, but has
- // one at txid 6
- assertNNHasCheckpoints(cluster, ImmutableList.of(4,6));
}
@@ -1556,13 +1586,12 @@ public class TestCheckpoint {
@Test
public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception {
Configuration conf = new HdfsConfiguration();
-
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(0)
- .format(true).build();
-
+ MiniDFSCluster cluster = null;
SecondaryNameNode secondary1 = null, secondary2 = null;
try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
+ .build();
+
// Start 2NNs
secondary1 = startSecondaryNameNode(conf, 1);
secondary2 = startSecondaryNameNode(conf, 2);
@@ -1627,19 +1656,20 @@ public class TestCheckpoint {
// NN should have received new checkpoint
assertEquals(8, storage.getMostRecentCheckpointTxId());
+
+ // Validate invariant that files named the same are the same.
+ assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2));
+ // Validate that the NN received checkpoints at expected txids
+ // (i.e that both checkpoints went through)
+ assertNNHasCheckpoints(cluster, ImmutableList.of(6,8));
} finally {
cleanup(secondary1);
+ secondary1 = null;
cleanup(secondary2);
- if (cluster != null) {
- cluster.shutdown();
- }
+ secondary2 = null;
+ cleanup(cluster);
+ cluster = null;
}
-
- // Validate invariant that files named the same are the same.
- assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2));
- // Validate that the NN received checkpoints at expected txids
- // (i.e that both checkpoints went through)
- assertNNHasCheckpoints(cluster, ImmutableList.of(6,8));
}
/**
@@ -1678,11 +1708,9 @@ public class TestCheckpoint {
}
// Start a new NN with the same host/port.
- cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(0)
- .nameNodePort(origPort)
- .nameNodeHttpPort(origHttpPort)
- .format(true).build();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+ .nameNodePort(origPort).nameNodeHttpPort(origHttpPort).format(true)
+ .build();
try {
secondary.doCheckpoint();
@@ -1692,12 +1720,10 @@ public class TestCheckpoint {
assertTrue(ioe.toString().contains("Inconsistent checkpoint"));
}
} finally {
- if (secondary != null) {
- secondary.shutdown();
- }
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -1756,9 +1782,8 @@ public class TestCheckpoint {
assertTrue(msg, msg.contains("but the secondary expected"));
}
} finally {
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -1815,12 +1840,10 @@ public class TestCheckpoint {
if (currentDir != null) {
currentDir.setExecutable(true);
}
- if (secondary != null) {
- secondary.shutdown();
- }
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -1850,10 +1873,8 @@ public class TestCheckpoint {
fileAsURI(new File(base_dir, "namesecondary1")).toString());
try {
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
- .format(true)
- .manageNameDfsDirs(false)
- .build();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
+ .manageNameDfsDirs(false).build();
secondary = startSecondaryNameNode(conf);
@@ -1893,12 +1914,10 @@ public class TestCheckpoint {
if (currentDir != null) {
currentDir.setExecutable(true);
}
- if (secondary != null) {
- secondary.shutdown();
- }
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -1945,9 +1964,9 @@ public class TestCheckpoint {
}, 200, 15000);
} finally {
cleanup(secondary);
- if (cluster != null) {
- cluster.shutdown();
- }
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -1962,7 +1981,6 @@ public class TestCheckpoint {
public void testSecondaryHasVeryOutOfDateImage() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
-
Configuration conf = new HdfsConfiguration();
try {
@@ -1987,12 +2005,10 @@ public class TestCheckpoint {
secondary.doCheckpoint();
} finally {
- if (secondary != null) {
- secondary.shutdown();
- }
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -2003,12 +2019,11 @@ public class TestCheckpoint {
public void testSecondaryPurgesEditLogs() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
-
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
try {
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
- .format(true).build();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
+ .build();
FileSystem fs = cluster.getFileSystem();
fs.mkdirs(new Path("/foo"));
@@ -2031,12 +2046,10 @@ public class TestCheckpoint {
}
} finally {
- if (secondary != null) {
- secondary.shutdown();
- }
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -2074,12 +2087,10 @@ public class TestCheckpoint {
// Ensure that the 2NN can still perform a checkpoint.
secondary.doCheckpoint();
} finally {
- if (secondary != null) {
- secondary.shutdown();
- }
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -2127,12 +2138,10 @@ public class TestCheckpoint {
if (fos != null) {
fos.close();
}
- if (secondary != null) {
- secondary.shutdown();
- }
- if (cluster != null) {
- cluster.shutdown();
- }
+ cleanup(secondary);
+ secondary = null;
+ cleanup(cluster);
+ cluster = null;
}
}
@@ -2163,15 +2172,19 @@ public class TestCheckpoint {
try {
opts.parse("-geteditsize", "-checkpoint");
fail("Should have failed bad parsing for two actions");
- } catch (ParseException e) {}
+ } catch (ParseException e) {
+ LOG.warn("Encountered ", e);
+ }
try {
opts.parse("-checkpoint", "xx");
fail("Should have failed for bad checkpoint arg");
- } catch (ParseException e) {}
+ } catch (ParseException e) {
+ LOG.warn("Encountered ", e);
+ }
}
- private void cleanup(SecondaryNameNode snn) {
+ private static void cleanup(SecondaryNameNode snn) {
if (snn != null) {
try {
snn.shutdown();
@@ -2181,6 +2194,15 @@ public class TestCheckpoint {
}
}
+ private static void cleanup(MiniDFSCluster cluster) {
+ if (cluster != null) {
+ try {
+ cluster.shutdown();
+ } catch (Exception e) {
+ LOG.warn("Could not shutdown MiniDFSCluster ", e);
+ }
+ }
+ }
/**
* Assert that if any two files have the same name across the 2NNs
@@ -2240,3 +2262,4 @@ public class TestCheckpoint {
}
}
+
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1469042&r1=1469041&r2=1469042&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed Apr 17 19:41:50 2013
@@ -67,6 +67,7 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
@@ -634,6 +635,7 @@ public class TestEditLog {
// Now restore the backup
FileUtil.fullyDeleteContents(dfsDir);
+ dfsDir.delete();
backupDir.renameTo(dfsDir);
// Directory layout looks like:
@@ -760,19 +762,24 @@ public class TestEditLog {
File log = new File(currentDir,
NNStorage.getInProgressEditsFileName(3));
- new EditLogFileOutputStream(log, 1024).create();
- if (!inBothDirs) {
- break;
- }
-
- NNStorage storage = new NNStorage(conf,
- Collections.<URI>emptyList(),
- Lists.newArrayList(uri));
-
- if (updateTransactionIdFile) {
- storage.writeTransactionIdFileToStorage(3);
+ EditLogFileOutputStream stream = new EditLogFileOutputStream(log, 1024);
+ try {
+ stream.create();
+ if (!inBothDirs) {
+ break;
+ }
+
+ NNStorage storage = new NNStorage(conf,
+ Collections.<URI>emptyList(),
+ Lists.newArrayList(uri));
+
+ if (updateTransactionIdFile) {
+ storage.writeTransactionIdFileToStorage(3);
+ }
+ storage.close();
+ } finally {
+ stream.close();
}
- storage.close();
}
try {
@@ -1335,12 +1342,15 @@ public class TestEditLog {
FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
+ Collection<EditLogInputStream> streams = null;
try {
- readAllEdits(editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL),
- startTxId);
+ streams = editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL);
+ readAllEdits(streams, startTxId);
} catch (IOException e) {
LOG.error("edit log failover didn't work", e);
fail("Edit log failover didn't work");
+ } finally {
+ IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0]));
}
}
@@ -1382,12 +1392,15 @@ public class TestEditLog {
FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
+ Collection<EditLogInputStream> streams = null;
try {
- readAllEdits(editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL),
- startTxId);
+ streams = editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL);
+ readAllEdits(streams, startTxId);
} catch (IOException e) {
LOG.error("edit log failover didn't work", e);
fail("Edit log failover didn't work");
+ } finally {
+ IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0]));
}
}