You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by bo...@apache.org on 2010/09/22 02:12:36 UTC
svn commit: r999695 - in /hadoop/hdfs/branches/HDFS-1052: ./
src/java/org/apache/hadoop/hdfs/server/common/
src/java/org/apache/hadoop/hdfs/server/datanode/
src/java/org/apache/hadoop/hdfs/server/namenode/
src/test/hdfs/org/apache/hadoop/hdfs/ src/test...
Author: boryas
Date: Wed Sep 22 00:12:35 2010
New Revision: 999695
URL: http://svn.apache.org/viewvc?rev=999695&view=rev
Log:
HDFS-1394. modify -format option for namenode to generated new blockpool id and accept newcluster
Added:
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
Modified:
hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java
hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Wed Sep 22 00:12:35 2010
@@ -39,6 +39,9 @@ Trunk (unreleased changes)
HDFS-1365.HDFS federation: propose ClusterID and BlockPoolID format (tanping via boryas)
+ HDFS-1394. modify -format option for namenode to generated new blockpool id
+ and accept newcluster (boryas)
+
IMPROVEMENTS
HDFS-1096. fix for prev. commit. (boryas)
Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java Wed Sep 22 00:12:35 2010
@@ -41,6 +41,8 @@ public interface HdfsConstants {
/** Startup options */
static public enum StartupOption{
FORMAT ("-format"),
+ CLUSTERID ("-clusterid"),
+ GENCLUSTERID ("-genclusterid"),
REGULAR ("-regular"),
BACKUP ("-backup"),
CHECKPOINT("-checkpoint"),
Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/common/Storage.java Wed Sep 22 00:12:35 2010
@@ -232,22 +232,26 @@ public abstract class Storage extends St
public void read() throws IOException {
read(getVersionFile());
}
-
public void read(File from) throws IOException {
+ Properties props = readFrom(from);
+ getFields(props, this);
+ }
+
+ public Properties readFrom(File from) throws IOException {
RandomAccessFile file = new RandomAccessFile(from, "rws");
FileInputStream in = null;
+ Properties props = new Properties();
try {
in = new FileInputStream(file.getFD());
file.seek(0);
- Properties props = new Properties();
props.load(in);
- getFields(props, this);
} finally {
if (in != null) {
in.close();
}
file.close();
}
+ return props;
}
/**
Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Sep 22 00:12:35 2010
@@ -25,6 +25,7 @@ import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
+import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
@@ -49,6 +50,9 @@ import java.util.Random;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -65,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.B
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -72,17 +77,16 @@ import org.apache.hadoop.hdfs.protocol.F
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
@@ -92,6 +96,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.StreamFile;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -101,7 +106,6 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC;
@@ -117,20 +121,15 @@ import org.apache.hadoop.security.token.
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DiskChecker;
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.VersionInfo;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.mortbay.util.ajax.JSON;
-import java.lang.management.ManagementFactory;
-
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-
/**********************************************************
* DataNode is a class (and program) that stores a set of
* blocks for a DFS deployment. A single deployment can
@@ -364,6 +363,7 @@ public class DataNode extends Configured
} else { // real storage
// read storage info, lock data dirs and transition fs state if necessary
storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
+
// adjust
this.dnRegistration.setStorageInfo(storage);
// initialize data node internal structure
Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed Sep 22 00:12:35 2010
@@ -150,7 +150,7 @@ class FSDirectory implements Closeable {
// format before starting up if requested
if (startOpt == StartupOption.FORMAT) {
fsImage.setStorageDirectories(dataDirs, editsDirs);
- fsImage.format();
+ fsImage.format(fsImage.determineClusterId()); // reuse current id
startOpt = StartupOption.REGULAR;
}
try {
Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Wed Sep 22 00:12:35 2010
@@ -1470,9 +1470,8 @@ public class FSImage extends Storage {
*
* @return new clusterID
*/
- private String newClusterID() {
- this.clusterID = "cid-" + UUID.randomUUID().toString();
- return this.clusterID;
+ public static String newClusterID() {
+ return "CID-" + UUID.randomUUID().toString();
}
/**
@@ -1514,10 +1513,10 @@ public class FSImage extends Storage {
+ " has been successfully formatted.");
}
- public void format() throws IOException {
+ public void format(String clusterId) throws IOException {;
this.layoutVersion = FSConstants.LAYOUT_VERSION;
this.namespaceID = newNamespaceID();
- this.clusterID = newClusterID();
+ this.clusterID = clusterId;
this.blockpoolID = newBlockPoolID();
this.cTime = 0L;
this.checkpointTime = now();
@@ -2037,6 +2036,33 @@ public class FSImage extends Storage {
}
return list.toArray(new File[list.size()]);
}
+
+ /**
+ * try to find current cluster id in the VERSION files
+ * returns first cluster id found in any VERSION file
+ * null in case none found
+ * @return clusterId or null in case no cluster id found
+ */
+ public String determineClusterId() {
+ String cid = null;
+ Iterator<StorageDirectory> sdit = dirIterator(NameNodeDirType.IMAGE);
+ while(sdit.hasNext()) {
+ StorageDirectory sd = sdit.next();
+ try {
+ Properties props = sd.readFrom(sd.getVersionFile());
+ cid = props.getProperty("clusterID");
+ LOG.info("current cluster id for sd="+sd.getCurrentDir() +
+ ";lv=" + layoutVersion + ";cid=" + cid);
+
+ if(cid != null && !cid.equals(""))
+ return cid;
+ } catch (Exception e) {
+ LOG.warn("this sd not available: " + e.getLocalizedMessage());
+ } //ignore
+ }
+ LOG.warn("couldn't find any VERSION file containing valid ClusterId");
+ return null;
+ }
/**
* DatanodeImage is used to store persistent information
Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Sep 22 00:12:35 2010
@@ -76,9 +76,6 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
-import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
-import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
@@ -164,6 +161,7 @@ public class NameNode implements Namenod
public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
+ public static String clusterIdStr;
protected FSNamesystem namesystem;
protected NamenodeRole role;
@@ -1376,7 +1374,7 @@ public class NameNode implements Namenod
continue;
if (isConfirmationNeeded) {
System.err.print("Re-format filesystem in " + curDir +" ? (Y or N) ");
- if (!(System.in.read() == 'Y')) {
+ if (System.in.read() != 'Y') {
System.err.println("Format aborted in "+ curDir);
return true;
}
@@ -1384,9 +1382,25 @@ public class NameNode implements Namenod
}
}
- FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat,
- editDirsToFormat), conf);
- nsys.dir.fsImage.format();
+ FSImage fsImage = new FSImage(dirsToFormat, editDirsToFormat);
+ FSNamesystem nsys = new FSNamesystem(fsImage, conf);
+ //new cluster id
+ // if not provided - see if you can find the current one
+ if(clusterIdStr == null || clusterIdStr.equals("")) {
+ // try to get one from the existing storage
+ clusterIdStr = fsImage.determineClusterId();
+ if (clusterIdStr == null || clusterIdStr.equals("")) {
+ throw new IllegalArgumentException("Format must be provided with clusterid");
+ }
+ if(isConfirmationNeeded) {
+ System.err.print("Use existing cluster id=" + clusterIdStr + "? (Y or N)");
+ if(System.in.read() != 'Y') {
+ throw new IllegalArgumentException("Format must be provided with clusterid");
+ }
+ while(System.in.read() != '\n'); // discard the enter-key
+ }
+ }
+ nsys.dir.fsImage.format(clusterIdStr);
return false;
}
@@ -1443,7 +1457,8 @@ public class NameNode implements Namenod
"Usage: java NameNode [" +
StartupOption.BACKUP.getName() + "] | [" +
StartupOption.CHECKPOINT.getName() + "] | [" +
- StartupOption.FORMAT.getName() + "] | [" +
+ StartupOption.FORMAT.getName() + "[" + StartupOption.CLUSTERID.getName() +
+ " cid ]] | [" +
StartupOption.UPGRADE.getName() + "] | [" +
StartupOption.ROLLBACK.getName() + "] | [" +
StartupOption.FINALIZE.getName() + "] | [" +
@@ -1457,6 +1472,13 @@ public class NameNode implements Namenod
String cmd = args[i];
if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FORMAT;
+ // might be followed by two args
+ if(i+2<argsLen && args[i+1].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
+ i+=2;
+ clusterIdStr = args[i];
+ }
+ } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
+ startOpt = StartupOption.GENCLUSTERID;
} else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.REGULAR;
} else if (StartupOption.BACKUP.getName().equalsIgnoreCase(cmd)) {
@@ -1502,6 +1524,11 @@ public class NameNode implements Namenod
boolean aborted = format(conf, true);
System.exit(aborted ? 1 : 0);
return null; // avoid javac warning
+ case GENCLUSTERID:
+ System.err.println("Generating new cluster id:");
+ System.out.println(FSImage.newClusterID());
+ System.exit(0);
+ return null;
case FINALIZE:
aborted = finalize(conf, true);
System.exit(aborted ? 1 : 0);
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed Sep 22 00:12:35 2010
@@ -60,6 +60,7 @@ import org.apache.hadoop.security.Refres
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
@@ -302,7 +303,7 @@ public class MiniDFSCluster {
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
throw new IOException("Cannot remove data directory: " + data_dir);
}
- NameNode.format(conf);
+ GenericTestUtils.formatNamenode(conf);
}
// Start the NameNode
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java Wed Sep 22 00:12:35 2010
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.test.GenericTestUtils;
/**
* This test checks correctness of port usage by hdfs components:
@@ -103,7 +104,7 @@ public class TestHDFSServerPorts extends
NameNode.setServiceAddress(config, NAME_NODE_HOST + "0");
}
config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
- NameNode.format(config);
+ GenericTestUtils.formatNamenode(config);
String[] args = new String[] {};
// NameNode will modify config with the ports it bound to
@@ -261,7 +262,7 @@ public class TestHDFSServerPorts extends
Configuration conf2 = new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(hdfsDir, "name2")).toString());
- NameNode.format(conf2);
+ GenericTestUtils.formatNamenode(conf2);
boolean started = canStartNameNode(conf2);
assertFalse(started); // should fail
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java Wed Sep 22 00:12:35 2010
@@ -47,6 +47,8 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
+import org.apache.hadoop.test.GenericTestUtils;
/**
* This class defines a number of static helper methods used by the
@@ -101,7 +103,7 @@ public class UpgradeUtilities {
createEmptyDirs(new String[] {datanodeStorage.toString()});
// format and start NameNode and start DataNode
- NameNode.format(config);
+ GenericTestUtils.formatNamenode(config);
cluster = new MiniDFSCluster(config, 1, StartupOption.REGULAR);
NameNode namenode = cluster.getNameNode();
Added: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=999695&view=auto
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java (added)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java Wed Sep 22 00:12:35 2010
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+
+public class TestClusterId {
+ private static final Log LOG = LogFactory.getLog(TestClusterId.class);
+ File hdfsDir;
+
+ @Before
+ public void setUp() throws IOException {
+ String baseDir = System.getProperty("test.build.data", "build/test/data");
+
+ hdfsDir = new File(baseDir, "dfs");
+ if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+ throw new IOException("Could not delete test directory '" + hdfsDir + "'");
+ }
+ LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+ throw new IOException("Could not tearDown test directory '" + hdfsDir + "'");
+ }
+ }
+
+ @Test
+ public void testFormatClusterIdOption() throws IOException {
+ Configuration config = new Configuration();
+
+ config.set(DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
+
+ // 1. should fail to format without cluster id
+ NameNode.clusterIdStr = null;
+ try {
+ NameNode.format(config);
+ fail("should fail to format without cluster id");
+ } catch (IllegalArgumentException e) {
+ LOG.info("correctly thrown IllegalArgumentException ");
+ } catch (Exception e) {
+ fail("failed with a wrong exception:" + e.getLocalizedMessage());
+ }
+
+ // 2. successful format
+ NameNode.clusterIdStr = "mycluster";
+ try {
+ NameNode.format(config);
+ } catch (Exception e) {
+ fail("failed to format namenode:"+e.getLocalizedMessage());
+ }
+ // see if cluster id not empty.
+ Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
+ Collection<URI> editsToFormat = new ArrayList<URI>(0);
+ FSImage fsImage = new FSImage(dirsToFormat, editsToFormat);
+
+ Iterator<StorageDirectory> sdit = fsImage.dirIterator(NameNodeDirType.IMAGE);
+ StorageDirectory sd = sdit.next();
+ Properties props = sd.readFrom(sd.getVersionFile());
+ String cid = props.getProperty("clusterID");
+ LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid);
+ if(cid == null || cid.equals("")) {
+ fail("didn't get new ClusterId");
+ }
+
+
+ // 3. format with existing cluster id
+ NameNode.clusterIdStr="";
+ try {
+ NameNode.format(config);
+ } catch (Exception e) {
+ fail("failed to format namenode:"+e.getLocalizedMessage());
+ }
+ props = sd.readFrom(sd.getVersionFile());
+ String newCid = props.getProperty("clusterID");
+ LOG.info("successfully formated with new cid: sd="+sd.getCurrentDir() + ";cid="+newCid);
+ if(newCid == null || newCid.equals("")) {
+ fail("didn't get new ClusterId");
+ }
+ assertTrue("should be the same", newCid.equals(cid));
+ }
+}
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Wed Sep 22 00:12:35 2010
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.test.GenericTestUtils;
import static org.junit.Assert.*;
import org.junit.Test;
@@ -320,7 +321,7 @@ public class TestEditLogRace {
public void testSaveImageWhileSyncInProgress() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
- NameNode.format(conf);
+ GenericTestUtils.formatNamenode(conf);
final FSNamesystem namesystem = new FSNamesystem(conf);
try {
@@ -410,7 +411,7 @@ public class TestEditLogRace {
public void testSaveRightBeforeSync() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
- NameNode.format(conf);
+ GenericTestUtils.formatNamenode(conf);
final FSNamesystem namesystem = new FSNamesystem(conf);
try {
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java Wed Sep 22 00:12:35 2010
@@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
public class TestNNThroughputBenchmark {
@@ -35,7 +36,7 @@ public class TestNNThroughputBenchmark {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
- NameNode.format(conf);
+ GenericTestUtils.formatNamenode(conf);
String[] args = new String[] {"-op", "all"};
NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(args));
}
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java Wed Sep 22 00:12:35 2010
@@ -26,6 +26,7 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
+import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.FSConstants;
@@ -59,7 +60,7 @@ public class TestReplicationPolicy exten
try {
FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
- NameNode.format(CONF);
+ GenericTestUtils.formatNamenode(CONF);
namenode = new NameNode(CONF);
} catch (IOException e) {
e.printStackTrace();
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Wed Sep 22 00:12:35 2010
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -88,7 +89,7 @@ public class TestSaveNamespace {
private void saveNamespaceWithInjectedFault(Fault fault) throws IOException {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
- NameNode.format(conf);
+ GenericTestUtils.formatNamenode(conf);
FSNamesystem fsn = new FSNamesystem(conf);
// Replace the FSImage with a spy
@@ -162,7 +163,7 @@ public class TestSaveNamespace {
public void testSaveWhileEditsRolled() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
- NameNode.format(conf);
+ GenericTestUtils.formatNamenode(conf);
FSNamesystem fsn = new FSNamesystem(conf);
// Replace the FSImage with a spy
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/test/GenericTestUtils.java Wed Sep 22 00:12:35 2010
@@ -17,6 +17,11 @@
*/
package org.apache.hadoop.test;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+
/**
* Test provides some very generic helpers which might be used across the tests
*/
@@ -28,4 +33,14 @@ public abstract class GenericTestUtils {
public static String getMethodName() {
return Thread.currentThread().getStackTrace()[2].getMethodName();
}
+
+ /**
+ * when formating a namenode - we must provide clusterid.
+ * @param conf
+ * @throws IOException
+ */
+ public static void formatNamenode(Configuration conf) throws IOException {
+ NameNode.clusterIdStr = "testClusterId";
+ NameNode.format(conf);
+ }
}
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java?rev=999695&r1=999694&r2=999695&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java Wed Sep 22 00:12:35 2010
@@ -75,7 +75,7 @@ public class TestNNLeaseRecovery {
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
- NameNode.format(conf);
+ GenericTestUtils.formatNamenode(conf);
fsn = spy(new FSNamesystem(conf));
}