You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by bo...@apache.org on 2012/10/16 19:52:01 UTC
svn commit: r1398901 - in
/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/
src/main/java/org/apache/hadoop/hdfs/server/datanode/
src/main/java/org/apache/hadoop/hdfs/se...
Author: bobby
Date: Tue Oct 16 17:52:01 2012
New Revision: 1398901
URL: http://svn.apache.org/viewvc?rev=1398901&view=rev
Log:
HDFS-4016. back-port HDFS-3582 to branch-0.23 (Ivan A. Veselovsky via bobby)
Modified:
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1398901&r1=1398900&r2=1398901&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Oct 16 17:52:01 2012
@@ -8,6 +8,9 @@ Release 0.23.5 - UNRELEASED
IMPROVEMENTS
+ HDFS-4016. back-port HDFS-3582 to branch-0.23 (Ivan A. Veselovsky via
+ bobby)
+
OPTIMIZATIONS
BUG FIXES
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1398901&r1=1398900&r2=1398901&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Tue Oct 16 17:52:01 2012
@@ -50,6 +50,9 @@ import org.apache.hadoop.hdfs.protocol.U
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Util;
@@ -2596,8 +2599,8 @@ public class BlockManager {
} catch (IOException ie) {
LOG.warn("ReplicationMonitor thread received exception. " , ie);
} catch (Throwable t) {
- LOG.warn("ReplicationMonitor thread received Runtime exception. ", t);
- Runtime.getRuntime().exit(-1);
+ LOG.fatal("ReplicationMonitor thread received Runtime exception. ", t);
+ terminate(1);
}
}
}
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1398901&r1=1398900&r2=1398901&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Tue Oct 16 17:52:01 2012
@@ -113,6 +113,9 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.Util;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods;
@@ -1715,7 +1718,7 @@ public class DataNode extends Configured
if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
LOG.error("-r, --rack arguments are not supported anymore. RackID " +
"resolution is handled by the NameNode.");
- System.exit(-1);
+ terminate(1);
} else if ("-rollback".equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
} else if ("-regular".equalsIgnoreCase(cmd)) {
@@ -1764,15 +1767,15 @@ public class DataNode extends Configured
if (datanode != null)
datanode.join();
} catch (Throwable e) {
- LOG.error("Exception in secureMain", e);
- System.exit(-1);
+ LOG.fatal("Exception in secureMain", e);
+ terminate(1);
} finally {
- // We need to add System.exit here because either shutdown was called or
- // some disk related conditions like volumes tolerated or volumes required
+ // We need to terminate the process here because either shutdown was called
+ // or some disk related conditions like volumes tolerated or volumes required
// condition was not met. Also, In secure mode, control will go to Jsvc
- // and Datanode process hangs without System.exit.
+ // and Datanode process hangs if it does not exit.
LOG.warn("Exiting Datanode");
- System.exit(0);
+ terminate(0);
}
}
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1398901&r1=1398900&r2=1398901&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Tue Oct 16 17:52:01 2012
@@ -31,6 +31,9 @@ import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import static org.apache.hadoop.hdfs.server.common.Util.now;
@@ -102,10 +105,6 @@ public class FSEditLog {
// is an automatic sync scheduled?
private volatile boolean isAutoSyncScheduled = false;
- // Used to exit in the event of a failure to sync to all journals. It's a
- // member variable so it can be swapped out for testing.
- private Runtime runtime = Runtime.getRuntime();
-
// these are statistics counters.
private long numTransactions; // number of transactions
private long numTransactionsBatchedInSync;
@@ -440,15 +439,19 @@ public class FSEditLog {
// Prevent RuntimeException from blocking other log edit sync
synchronized (this) {
if (sync) {
- if (badJournals.size() >= journals.size()) {
- LOG.fatal("Could not sync any journal to persistent storage. " +
- "Unsynced transactions: " + (txid - synctxid),
- new Exception());
- runtime.exit(1);
+ try {
+ if (badJournals.size() >= journals.size()) {
+ final String msg =
+ "Could not sync enough journals to persistent storage. "
+ + "Unsynced transactions: " + (txid - synctxid);
+ LOG.fatal(msg, new Exception());
+ terminate(1, msg);
+ }
+ } finally {
+ synctxid = syncStart;
+ // NB: do that if finally block because even if #terminate(2) called above, we must unlock the waiting threads:
+ isSyncRunning = false;
}
-
- synctxid = syncStart;
- isSyncRunning = false;
}
this.notifyAll();
}
@@ -721,14 +724,6 @@ public class FSEditLog {
}
/**
- * Used only by unit tests.
- */
- @VisibleForTesting
- synchronized void setRuntimeForTesting(Runtime runtime) {
- this.runtime = runtime;
- }
-
- /**
* Return a manifest of what finalized edit logs are available
*/
public synchronized RemoteEditLogManifest getEditLogManifest(
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1398901&r1=1398900&r2=1398901&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Tue Oct 16 17:52:01 2012
@@ -52,6 +52,8 @@ import org.apache.hadoop.security.UserGr
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
/**********************************************************
* NameNode serves as both directory namespace manager and
* "inode table" for the Hadoop DFS. There is a single NameNode
@@ -731,25 +733,26 @@ public class NameNode {
switch (startOpt) {
case FORMAT:
boolean aborted = format(conf, true);
- System.exit(aborted ? 1 : 0);
+ terminate(aborted ? 1 : 0);
return null; // avoid javac warning
case GENCLUSTERID:
System.err.println("Generating new cluster id:");
System.out.println(NNStorage.newClusterID());
- System.exit(0);
+ terminate(0);
return null;
case FINALIZE:
aborted = finalize(conf, true);
- System.exit(aborted ? 1 : 0);
+ terminate(aborted ? 1 : 0);
return null; // avoid javac warning
case BACKUP:
case CHECKPOINT:
NamenodeRole role = startOpt.toNodeRole();
DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
return new BackupNode(conf, role);
- default:
+ default: {
DefaultMetricsSystem.initialize("NameNode");
return new NameNode(conf);
+ }
}
}
@@ -801,8 +804,8 @@ public class NameNode {
if (namenode != null)
namenode.join();
} catch (Throwable e) {
- LOG.error("Exception in namenode join", e);
- System.exit(-1);
+ LOG.fatal("Exception in namenode join", e);
+ terminate(1);
}
}
}
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1398901&r1=1398900&r2=1398901&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Tue Oct 16 17:52:01 2012
@@ -53,6 +53,9 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
+
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
@@ -336,7 +339,7 @@ public class SecondaryNameNode implement
} catch (IOException e) {
LOG.error("Exception while getting login user", e);
e.printStackTrace();
- Runtime.getRuntime().exit(-1);
+ terminate(-1);
}
ugi.doAs(new PrivilegedAction<Object>() {
@Override
@@ -385,9 +388,9 @@ public class SecondaryNameNode implement
LOG.error("Exception in doCheckpoint", e);
e.printStackTrace();
} catch (Throwable e) {
- LOG.error("Throwable Exception in doCheckpoint", e);
+ LOG.fatal("Throwable Exception in doCheckpoint", e);
e.printStackTrace();
- Runtime.getRuntime().exit(-1);
+ terminate(1);
}
}
}
@@ -593,7 +596,7 @@ public class SecondaryNameNode implement
//
// This is a error returned by hadoop server. Print
// out the first line of the error mesage, ignore the stack trace.
- exitCode = -1;
+ exitCode = 1;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
@@ -605,7 +608,7 @@ public class SecondaryNameNode implement
//
// IO exception encountered locally.
//
- exitCode = -1;
+ exitCode = 1;
LOG.error(cmd + ": " + e.getLocalizedMessage());
} finally {
// Does the RPC connection need to be closed?
@@ -633,7 +636,8 @@ public class SecondaryNameNode implement
public static void main(String[] argv) throws Exception {
CommandLineOpts opts = SecondaryNameNode.parseArgs(argv);
if (opts == null) {
- System.exit(-1);
+ LOG.fatal("Failed to parse options");
+ terminate(1);
}
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
@@ -642,7 +646,7 @@ public class SecondaryNameNode implement
if (opts.getCommand() != null) {
int ret = secondary.processStartupCommand(opts);
- System.exit(ret);
+ terminate(ret);
}
// Create a never ending deamon
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java?rev=1398901&r1=1398900&r2=1398901&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java Tue Oct 16 17:52:01 2012
@@ -94,7 +94,9 @@ public class TestFcHdfsSetUMask {
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
- cluster.shutdown();
+ if (cluster != null) {
+ cluster.shutdown();
+ }
}
{
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1398901&r1=1398900&r2=1398901&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Oct 16 17:52:01 2012
@@ -75,6 +75,8 @@ import org.apache.hadoop.security.author
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.util.ExitUtil;
+
/**
* This class creates a single-process DFS cluster for junit testing.
@@ -115,6 +117,7 @@ public class MiniDFSCluster {
private boolean waitSafeMode = true;
private boolean setupHostsFile = false;
private boolean federation = false;
+ private boolean checkExitOnShutdown = true;
public Builder(Configuration conf) {
this.conf = conf;
@@ -232,7 +235,7 @@ public class MiniDFSCluster {
this.waitSafeMode = val;
return this;
}
-
+
/**
* Default: null
*/
@@ -249,7 +252,15 @@ public class MiniDFSCluster {
this.setupHostsFile = val;
return this;
}
-
+
+ /**
+ * Default: true
+ */
+ public Builder checkExitOnShutdown(boolean val) {
+ this.checkExitOnShutdown = val;
+ return this;
+ }
+
/**
* Construct the actual MiniDFSCluster
*/
@@ -283,7 +294,8 @@ public class MiniDFSCluster {
builder.clusterId,
builder.waitSafeMode,
builder.setupHostsFile,
- builder.federation);
+ builder.federation,
+ builder.checkExitOnShutdown);
}
public class DataNodeProperties {
@@ -307,6 +319,7 @@ public class MiniDFSCluster {
private File data_dir;
private boolean federation = false;
private boolean waitSafeMode = true;
+ private boolean checkExitOnShutdown = true;
/**
* Stores the information related to a namenode in the cluster
@@ -499,7 +512,7 @@ public class MiniDFSCluster {
this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster
initMiniDFSCluster(nameNodePort, 0, conf, numDataNodes, format,
manageNameDfsDirs, true, manageDataDfsDirs, operation, racks, hosts,
- simulatedCapacities, null, true, false, false);
+ simulatedCapacities, null, true, false, false, true);
}
private void initMiniDFSCluster(int nameNodePort, int nameNodeHttpPort,
@@ -508,13 +521,16 @@ public class MiniDFSCluster {
boolean enableManagedDfsDirsRedundancy,
boolean manageDataDfsDirs, StartupOption operation, String[] racks,
String[] hosts, long[] simulatedCapacities, String clusterId,
- boolean waitSafeMode, boolean setupHostsFile, boolean federation)
+ boolean waitSafeMode, boolean setupHostsFile, boolean federation, boolean checkExitOnShutdown)
throws IOException {
+ ExitUtil.disableSystemExit();
+
this.conf = conf;
base_dir = new File(determineDfsBaseDir());
data_dir = new File(base_dir, "data");
this.federation = federation;
this.waitSafeMode = waitSafeMode;
+ this.checkExitOnShutdown = checkExitOnShutdown;
// use alternate RPC engine if spec'd
String rpcEngineName = System.getProperty("hdfs.rpc.engine");
@@ -1130,6 +1146,11 @@ public class MiniDFSCluster {
*/
public void shutdown() {
LOG.info("Shutting down the Mini HDFS Cluster");
+ if (checkExitOnShutdown) {
+ if (ExitUtil.terminateCalled()) {
+ throw new AssertionError("Test resulted in an unexpected exit");
+ }
+ }
shutdownDataNodes();
for (NameNodeInfo nnInfo : nameNodes) {
NameNode nameNode = nnInfo.nameNode;
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=1398901&r1=1398900&r2=1398901&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java Tue Oct 16 17:52:01 2012
@@ -26,6 +26,7 @@ import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
+import java.util.List;
import java.util.Properties;
import org.apache.commons.logging.Log;
@@ -35,6 +36,8 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.util.ExitUtil.ExitException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -43,6 +46,7 @@ import org.junit.Test;
public class TestClusterId {
private static final Log LOG = LogFactory.getLog(TestClusterId.class);
File hdfsDir;
+ Configuration config;
private String getClusterId(Configuration config) throws IOException {
// see if cluster id not empty.
@@ -58,33 +62,34 @@ public class TestClusterId {
LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid);
return cid;
}
-
+
@Before
public void setUp() throws IOException {
+ ExitUtil.disableSystemExit();
+
String baseDir = System.getProperty("test.build.data", "build/test/data");
- hdfsDir = new File(baseDir, "dfs");
- if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
- throw new IOException("Could not delete test directory '" +
- hdfsDir + "'");
+ hdfsDir = new File(baseDir, "dfs/name");
+ if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
+ throw new IOException("Could not delete test directory '" + hdfsDir + "'");
}
LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
+
+ config = new Configuration();
+ config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
-
+
@After
public void tearDown() throws IOException {
- if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
- throw new IOException("Could not tearDown test directory '" +
- hdfsDir + "'");
+ if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
+ throw new IOException("Could not tearDown test directory '" + hdfsDir
+ + "'");
}
}
-
+
@Test
public void testFormatClusterIdOption() throws IOException {
- Configuration config = new Configuration();
- config.set(DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
-
// 1. should format without cluster id
//StartupOption.FORMAT.setClusterId("");
NameNode.format(config);
Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java?rev=1398901&r1=1398900&r2=1398901&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java Tue Oct 16 17:52:01 2012
@@ -19,12 +19,9 @@ package org.apache.hadoop.hdfs.server.na
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.anyInt;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
import java.io.IOException;
@@ -33,17 +30,19 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ExitUtil.ExitException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import static org.junit.Assert.fail;
public class TestEditLogJournalFailures {
private int editsPerformed = 0;
- private Configuration conf;
private MiniDFSCluster cluster;
private FileSystem fs;
- private Runtime runtime;
/**
* Create the mini cluster for testing and sub in a custom runtime so that
@@ -51,22 +50,30 @@ public class TestEditLogJournalFailures
*/
@Before
public void setUpMiniCluster() throws IOException {
- conf = new HdfsConfiguration();
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+ setUpMiniCluster(new HdfsConfiguration(), true);
+ }
+
+ private void setUpMiniCluster(Configuration conf, boolean manageNameDfsDirs)
+ throws IOException {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+ .manageNameDfsDirs(manageNameDfsDirs).checkExitOnShutdown(false).build();
cluster.waitActive();
fs = cluster.getFileSystem();
-
- runtime = Runtime.getRuntime();
- runtime = spy(runtime);
- doNothing().when(runtime).exit(anyInt());
-
- cluster.getNameNode().getFSImage().getEditLog().setRuntimeForTesting(runtime);
}
@After
public void shutDownMiniCluster() throws IOException {
- fs.close();
- cluster.shutdown();
+ if (fs != null) {
+ fs.close();
+ }
+ if (cluster != null) {
+ try {
+ cluster.shutdown();
+ } catch (ExitException ee) {
+ // Ignore ExitExceptions as the tests may result in the
+ // NameNode doing an immediate shutdown.
+ }
+ }
}
@Test
@@ -74,11 +81,9 @@ public class TestEditLogJournalFailures
assertTrue(doAnEdit());
// Invalidate one edits journal.
invalidateEditsDirAtIndex(0, true);
- // Make sure runtime.exit(...) hasn't been called at all yet.
- assertExitInvocations(0);
+ // The NN has not terminated (no ExitException thrown)
assertTrue(doAnEdit());
- // A single journal failure should not result in a call to runtime.exit(...).
- assertExitInvocations(0);
+ // The NN has not terminated (no ExitException thrown)
assertFalse(cluster.getNameNode().isInSafeMode());
}
@@ -88,12 +93,17 @@ public class TestEditLogJournalFailures
// Invalidate both edits journals.
invalidateEditsDirAtIndex(0, true);
invalidateEditsDirAtIndex(1, true);
- // Make sure runtime.exit(...) hasn't been called at all yet.
- assertExitInvocations(0);
- assertTrue(doAnEdit());
- // The previous edit could not be synced to any persistent storage, should
- // have halted the NN.
- assertExitInvocations(1);
+ // The NN has not terminated (no ExitException thrown)
+ try {
+ doAnEdit();
+ fail("The previous edit could not be synced to any persistent storage, "
+ + "should have halted the NN");
+ } catch (RemoteException re) {
+ assertTrue(re.toString().contains("ExitException"));
+ GenericTestUtils.assertExceptionContains(
+ "Could not sync enough journals to persistent storage. " +
+ "Unsynced transactions: 1", re);
+ }
}
@Test
@@ -101,11 +111,9 @@ public class TestEditLogJournalFailures
assertTrue(doAnEdit());
// Invalidate one edits journal.
invalidateEditsDirAtIndex(0, false);
- // Make sure runtime.exit(...) hasn't been called at all yet.
- assertExitInvocations(0);
+ // The NN has not terminated (no ExitException thrown)
assertTrue(doAnEdit());
- // A single journal failure should not result in a call to runtime.exit(...).
- assertExitInvocations(0);
+ // A single journal failure should not result in a call to terminate
assertFalse(cluster.getNameNode().isInSafeMode());
}
@@ -141,21 +149,6 @@ public class TestEditLogJournalFailures
}
/**
- * Restore the journal at index <code>index</code> with the passed
- * {@link EditLogOutputStream}.
- *
- * @param index index of the journal to restore.
- * @param elos the {@link EditLogOutputStream} to put at that index.
- */
- private void restoreEditsDirAtIndex(int index, EditLogOutputStream elos) {
- FSImage fsimage = cluster.getNamesystem().getFSImage();
- FSEditLog editLog = fsimage.getEditLog();
-
- FSEditLog.JournalAndStream jas = editLog.getJournals().get(index);
- jas.setCurrentStreamForTests(elos);
- }
-
- /**
* Do a mutative metadata operation on the file system.
*
* @return true if the operation was successful, false otherwise.
@@ -164,13 +157,4 @@ public class TestEditLogJournalFailures
return fs.mkdirs(new Path("/tmp", Integer.toString(editsPerformed++)));
}
- /**
- * Make sure that Runtime.exit(...) has been called
- * <code>expectedExits<code> number of times.
- *
- * @param expectedExits the number of times Runtime.exit(...) should have been called.
- */
- private void assertExitInvocations(int expectedExits) {
- verify(runtime, times(expectedExits)).exit(anyInt());
- }
}