You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by at...@apache.org on 2012/12/08 00:52:09 UTC
svn commit: r1418559 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt
src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
Author: atm
Date: Fri Dec 7 23:52:08 2012
New Revision: 1418559
URL: http://svn.apache.org/viewvc?rev=1418559&view=rev
Log:
HDFS-4279. NameNode does not initialize generic conf keys when started with -recover. Contributed by Colin Patrick McCabe.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1418559&r1=1418558&r2=1418559&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Dec 7 23:52:08 2012
@@ -578,6 +578,9 @@ Release 2.0.3-alpha - Unreleased
HDFS-4236. Remove artificial limit on username length introduced in
HDFS-4171. (tucu via suresh)
+ HDFS-4279. NameNode does not initialize generic conf keys when started
+ with -recover. (Colin Patrick McCabe via atm)
+
BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1418559&r1=1418558&r2=1418559&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Dec 7 23:52:08 2012
@@ -1050,6 +1050,9 @@ public class NameNode {
private static void doRecovery(StartupOption startOpt, Configuration conf)
throws IOException {
+ String nsId = DFSUtil.getNamenodeNameServiceId(conf);
+ String namenodeId = HAUtil.getNameNodeId(conf, nsId);
+ initializeGenericKeys(conf, nsId, namenodeId);
if (startOpt.getForce() < MetaRecoveryContext.FORCE_ALL) {
if (!confirmPrompt("You have selected Metadata Recovery mode. " +
"This mode is intended to recover lost metadata on a corrupt " +
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java?rev=1418559&r1=1418558&r2=1418559&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java Fri Dec 7 23:52:08 2012
@@ -30,11 +30,16 @@ import java.io.RandomAccessFile;
import java.util.HashSet;
import java.util.Set;
+import junit.framework.Assert;
+
+import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@@ -438,6 +443,39 @@ public class TestNameNodeRecovery {
}
}
+ /**
+ * Create a test configuration that will exercise the initializeGenericKeys
+ * code path. This is a regression test for HDFS-4279.
+ */
+ static void setupRecoveryTestConf(Configuration conf) throws IOException {
+ conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
+ conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
+ conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
+ "ns1"), "nn1,nn2");
+ String baseDir = System.getProperty(
+ MiniDFSCluster.PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
+ File nameDir = new File(baseDir, "nameR");
+ File secondaryDir = new File(baseDir, "namesecondaryR");
+ conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
+ DFS_NAMENODE_NAME_DIR_KEY, "ns1", "nn1"),
+ nameDir.getCanonicalPath());
+ conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
+ DFS_NAMENODE_CHECKPOINT_DIR_KEY, "ns1", "nn1"),
+ secondaryDir.getCanonicalPath());
+ conf.unset(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
+ conf.unset(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
+ FileUtils.deleteQuietly(nameDir);
+ if (!nameDir.mkdirs()) {
+ throw new RuntimeException("failed to make directory " +
+ nameDir.getAbsolutePath());
+ }
+ FileUtils.deleteQuietly(secondaryDir);
+ if (!secondaryDir.mkdirs()) {
+ throw new RuntimeException("failed to make directory " +
+ secondaryDir.getAbsolutePath());
+ }
+ }
+
static void testNameNodeRecoveryImpl(Corruptor corruptor, boolean finalize)
throws IOException {
final String TEST_PATH = "/test/path/dir";
@@ -446,12 +484,13 @@ public class TestNameNodeRecovery {
// start a cluster
Configuration conf = new HdfsConfiguration();
+ setupRecoveryTestConf(conf);
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
StorageDirectory sd = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
- .enableManagedDfsDirsRedundancy(false).build();
+ .manageNameDfsDirs(false).build();
cluster.waitActive();
if (!finalize) {
// Normally, the in-progress edit log would be finalized by