You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/04/13 01:23:43 UTC
svn commit: r1325570 - in
/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project: hadoop-hdfs-httpfs/
hadoop-hdfs/ hadoop-hdfs/src/contrib/bkjournal/
hadoop-hdfs/src/contrib/fuse-dfs/ hadoop-hdfs/src/main/java/
hadoop-hdfs/src/main/java/org/apache/hadoo...
Author: szetszwo
Date: Thu Apr 12 23:23:39 2012
New Revision: 1325570
URL: http://svn.apache.org/viewvc?rev=1325570&view=rev
Log:
Merge r1325052 through r1325569 from trunk.
Added:
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java
- copied unchanged from r1325569, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/VersionUtil.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java
- copied unchanged from r1325569, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestVersionUtil.java
Modified:
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/ (props changed)
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/ (props changed)
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/ (props changed)
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1325052-1325569
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs-httpfs/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Thu Apr 12 23:23:39 2012
@@ -1 +1,4 @@
target
+.classpath
+.project
+.settings
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Apr 12 23:23:39 2012
@@ -370,6 +370,12 @@ Release 2.0.0 - UNRELEASED
HDFS-3179. Improve the exception message thrown by DataStreamer when
it failed to add a datanode. (szetszwo)
+ HDFS-2983. Relax the build version check to permit rolling upgrades within
+ a release. (atm)
+
+ HDFS-3259. NameNode#initializeSharedEdits should populate shared edits dir
+ with edit log segments. (atm)
+
OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -495,6 +501,14 @@ Release 2.0.0 - UNRELEASED
HDFS-2696. Fix the fuse-fds build. (Bruno Mahé via eli)
+ HDFS-3260. TestDatanodeRegistration should set minimum DN version in
+ addition to minimum NN version. (atm)
+
+ HDFS-3255. HA DFS returns wrong token service (Daryn Sharp via todd)
+
+ HDFS-3256. HDFS considers blocks under-replicated if topology script is
+ configured with only 1 rack. (atm)
+
BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Thu Apr 12 23:23:39 2012
@@ -1 +1,4 @@
target
+.classpath
+.project
+.settings
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Thu Apr 12 23:23:39 2012
@@ -0,0 +1,4 @@
+target
+.classpath
+.project
+.settings
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1325052-1325569
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Thu Apr 12 23:23:39 2012
@@ -146,6 +146,8 @@ public class DFSConfigKeys extends Commo
public static final int DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_DEFAULT = 2;
public static final String DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY = "dfs.namenode.num.extra.edits.retained";
public static final int DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT = 1000000; //1M
+ public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version";
+ public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0";
public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
@@ -265,6 +267,8 @@ public class DFSConfigKeys extends Commo
public static final String DFS_DATANODE_IPC_ADDRESS_KEY = "dfs.datanode.ipc.address";
public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020;
public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0" + DFS_DATANODE_IPC_DEFAULT_PORT;
+ public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version";
+ public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0";
public static final String DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable";
public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java Thu Apr 12 23:23:39 2012
@@ -848,8 +848,9 @@ public class DistributedFileSystem exten
*/
@Override
public String getCanonicalServiceName() {
- if (HAUtil.isLogicalUri(getConf(), getUri())) {
- return getUri().getHost();
+ URI uri = getUri();
+ if (HAUtil.isLogicalUri(getConf(), uri)) {
+ return HAUtil.buildTokenServiceForLogicalUri(uri).toString();
} else {
return super.getCanonicalServiceName();
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java Thu Apr 12 23:23:39 2012
@@ -386,7 +386,7 @@ public class PBHelper {
StorageInfoProto storage = info.getStorageInfo();
return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(),
info.getBlockPoolID(), storage.getCTime(), info.getDistUpgradeVersion(),
- info.getBuildVersion());
+ info.getBuildVersion(), info.getSoftwareVersion());
}
public static NamenodeCommand convert(NamenodeCommandProto cmd) {
@@ -612,13 +612,14 @@ public class PBHelper {
.newBuilder();
return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration))
.setStorageInfo(PBHelper.convert(registration.getStorageInfo()))
- .setKeys(PBHelper.convert(registration.getExportedKeys())).build();
+ .setKeys(PBHelper.convert(registration.getExportedKeys()))
+ .setSoftwareVersion(registration.getSoftwareVersion()).build();
}
public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {
return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()),
PBHelper.convert(proto.getStorageInfo()), PBHelper.convert(proto
- .getKeys()));
+ .getKeys()), proto.getSoftwareVersion());
}
public static DatanodeCommand convert(DatanodeCommandProto proto) {
@@ -894,7 +895,8 @@ public class PBHelper {
.setBlockPoolID(info.getBlockPoolID())
.setBuildVersion(info.getBuildVersion())
.setDistUpgradeVersion(info.getDistributedUpgradeVersion())
- .setStorageInfo(PBHelper.convert((StorageInfo)info)).build();
+ .setStorageInfo(PBHelper.convert((StorageInfo)info))
+ .setSoftwareVersion(info.getSoftwareVersion()).build();
}
// Located Block Arrays and Lists
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Thu Apr 12 23:23:39 2012
@@ -247,8 +247,7 @@ public class BlockManager {
this.maxReplicationStreams = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
- this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null ? false
- : true;
+ this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) != null;
this.replicationRecheckInterval =
conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
@@ -2829,7 +2828,9 @@ assert storedBlock.findDatanode(dn) < 0
DatanodeDescriptor cur = it.next();
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
if ((corruptNodes == null ) || !corruptNodes.contains(cur)) {
- if (numExpectedReplicas == 1) {
+ if (numExpectedReplicas == 1 ||
+ (numExpectedReplicas > 1 &&
+ !datanodeManager.hasClusterEverBeenMultiRack())) {
enoughRacks = true;
break;
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java Thu Apr 12 23:23:39 2012
@@ -71,6 +71,7 @@ import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.HostsFileReader;
import org.apache.hadoop.util.ReflectionUtils;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.net.InetAddresses;
/**
@@ -126,6 +127,12 @@ public class DatanodeManager {
/** Ask Datanode only up to this many blocks to delete. */
final int blockInvalidateLimit;
+ /**
+ * Whether or not this cluster has ever consisted of more than 1 rack,
+ * according to the NetworkTopology.
+ */
+ private boolean hasClusterEverBeenMultiRack = false;
+
DatanodeManager(final BlockManager blockManager,
final Namesystem namesystem, final Configuration conf
) throws IOException {
@@ -331,6 +338,7 @@ public class DatanodeManager {
host2DatanodeMap.add(node);
networktopology.add(node);
+ checkIfClusterIsNowMultiRack(node);
if (LOG.isDebugEnabled()) {
LOG.debug(getClass().getSimpleName() + ".addDatanode: "
@@ -769,6 +777,42 @@ public class DatanodeManager {
}
/**
+ * @return true if this cluster has ever consisted of multiple racks, even if
+ * it is not now a multi-rack cluster.
+ */
+ boolean hasClusterEverBeenMultiRack() {
+ return hasClusterEverBeenMultiRack;
+ }
+
+ /**
+ * Check if the cluster now consists of multiple racks. If it does, and this
+ * is the first time it's consisted of multiple racks, then process blocks
+ * that may now be misreplicated.
+ *
+ * @param node DN which caused cluster to become multi-rack. Used for logging.
+ */
+ @VisibleForTesting
+ void checkIfClusterIsNowMultiRack(DatanodeDescriptor node) {
+ if (!hasClusterEverBeenMultiRack && networktopology.getNumOfRacks() > 1) {
+ String message = "DN " + node + " joining cluster has expanded a formerly " +
+ "single-rack cluster to be multi-rack. ";
+ if (namesystem.isPopulatingReplQueues()) {
+ message += "Re-checking all blocks for replication, since they should " +
+ "now be replicated cross-rack";
+ LOG.info(message);
+ } else {
+ message += "Not checking for mis-replicated blocks because this NN is " +
+ "not yet processing repl queues.";
+ LOG.debug(message);
+ }
+ hasClusterEverBeenMultiRack = true;
+ if (namesystem.isPopulatingReplQueues()) {
+ blockManager.processMisReplicatedBlocks();
+ }
+ }
+ }
+
+ /**
* Parse a DatanodeID from a hosts file entry
* @param hostLine of form [hostname|ip][:port]?
* @return DatanodeID constructed from the given string
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java Thu Apr 12 23:23:39 2012
@@ -32,7 +32,19 @@ import org.apache.hadoop.hdfs.protocol.H
@InterfaceStability.Evolving
public class IncorrectVersionException extends IOException {
private static final long serialVersionUID = 1L;
+
+ public IncorrectVersionException(String message) {
+ super(message);
+ }
+ public IncorrectVersionException(String minimumVersion, String reportedVersion,
+ String remoteDaemon, String thisDaemon) {
+ this("The reported " + remoteDaemon + " version is too low to communicate" +
+ " with this " + thisDaemon + ". " + remoteDaemon + " version: '" +
+ reportedVersion + "' Minimum " + remoteDaemon + " version: '" +
+ minimumVersion + "'");
+ }
+
public IncorrectVersionException(int versionReported, String ofWhat) {
this(versionReported, ofWhat, HdfsConstants.LAYOUT_VERSION);
}
@@ -40,16 +52,9 @@ public class IncorrectVersionException e
public IncorrectVersionException(int versionReported,
String ofWhat,
int versionExpected) {
- super("Unexpected version "
- + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
- + versionReported + ". Expecting = " + versionExpected + ".");
+ this("Unexpected version "
+ + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
+ + versionReported + ". Expecting = " + versionExpected + ".");
}
- public IncorrectVersionException(String versionReported,
- String ofWhat,
- String versionExpected) {
- super("Unexpected version "
- + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
- + versionReported + ". Expecting = " + versionExpected + ".");
- }
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java Thu Apr 12 23:23:39 2012
@@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.L
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
-import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -49,9 +48,11 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.VersionUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.VersionInfo;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
@@ -178,17 +179,23 @@ class BPServiceActor implements Runnable
private void checkNNVersion(NamespaceInfo nsInfo)
throws IncorrectVersionException {
// build and layout versions should match
- String nsBuildVer = nsInfo.getBuildVersion();
- String stBuildVer = Storage.getBuildVersion();
- if (!nsBuildVer.equals(stBuildVer)) {
- LOG.warn("Data-node and name-node Build versions must be the same. " +
- "Namenode build version: " + nsBuildVer + "Datanode " +
- "build version: " + stBuildVer);
- throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer);
+ String nnVersion = nsInfo.getSoftwareVersion();
+ String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
+ if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
+ IncorrectVersionException ive = new IncorrectVersionException(
+ minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
+ LOG.warn(ive.getMessage());
+ throw ive;
+ }
+ String dnVersion = VersionInfo.getVersion();
+ if (!nnVersion.equals(dnVersion)) {
+ LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
+ "DataNode version '" + dnVersion + "' but is within acceptable " +
+ "limits. Note: This is normal during a rolling upgrade.");
}
if (HdfsConstants.LAYOUT_VERSION != nsInfo.getLayoutVersion()) {
- LOG.warn("Data-node and name-node layout versions must be the same." +
+ LOG.warn("DataNode and NameNode layout versions must be the same." +
" Expected: "+ HdfsConstants.LAYOUT_VERSION +
" actual "+ nsInfo.getLayoutVersion());
throw new IncorrectVersionException(
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java Thu Apr 12 23:23:39 2012
@@ -31,6 +31,8 @@ import static org.apache.hadoop.hdfs.DFS
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -58,6 +60,8 @@ class DNConf {
final long deleteReportInterval;
final long initialBlockReportDelay;
final int writePacketSize;
+
+ final String minimumNameNodeVersion;
public DNConf(Configuration conf) {
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
@@ -111,5 +115,12 @@ class DNConf {
this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY,
DFS_DATANODE_SYNCONCLOSE_DEFAULT);
+ this.minimumNameNodeVersion = conf.get(DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY,
+ DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT);
+ }
+
+ // We get minimumNameNodeVersion via a method so it can be mocked out in tests.
+ String getMinimumNameNodeVersion() {
+ return this.minimumNameNodeVersion;
}
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Thu Apr 12 23:23:39 2012
@@ -673,6 +673,7 @@ public class DataNode extends Configured
bpRegistration.setIpcPort(getIpcPort());
bpRegistration.setHostName(hostName);
bpRegistration.setStorageID(getStorageId());
+ bpRegistration.setSoftwareVersion(VersionInfo.getVersion());
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
if (storageInfo == null) {
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Thu Apr 12 23:23:39 2012
@@ -242,7 +242,7 @@ public class BackupNode extends NameNode
*/
private void verifyJournalRequest(JournalInfo journalInfo)
throws IOException {
- verifyVersion(journalInfo.getLayoutVersion());
+ verifyLayoutVersion(journalInfo.getLayoutVersion());
String errorMsg = null;
int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
if (journalInfo.getNamespaceId() != expectedNamespaceID) {
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Thu Apr 12 23:23:39 2012
@@ -311,10 +311,12 @@ public class FSEditLog {
endCurrentLogSegment(true);
}
- try {
- journalSet.close();
- } catch (IOException ioe) {
- LOG.warn("Error closing journalSet", ioe);
+ if (!journalSet.isEmpty()) {
+ try {
+ journalSet.close();
+ } catch (IOException ioe) {
+ LOG.warn("Error closing journalSet", ioe);
+ }
}
state = State.CLOSED;
@@ -813,9 +815,8 @@ public class FSEditLog {
}
/**
- * Used only by unit tests.
+ * Get all the journals this edit log is currently operating on.
*/
- @VisibleForTesting
synchronized List<JournalAndStream> getJournals() {
return journalSet.getAllJournalStreams();
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java Thu Apr 12 23:23:39 2012
@@ -344,7 +344,7 @@ class FileJournalManager implements Jour
}
}
- private List<EditLogFile> getLogFiles(long fromTxId) throws IOException {
+ List<EditLogFile> getLogFiles(long fromTxId) throws IOException {
File currentDir = sd.getCurrentDir();
List<EditLogFile> allLogFiles = matchEditLogs(currentDir);
List<EditLogFile> logFiles = Lists.newArrayList();
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Apr 12 23:23:39 2012
@@ -18,14 +18,17 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -41,7 +44,6 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Trash;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -49,6 +51,9 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
+import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState;
import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
@@ -61,6 +66,8 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.util.AtomicFileOutputStream;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
@@ -749,9 +756,10 @@ public class NameNode {
boolean force) {
return initializeSharedEdits(conf, force, false);
}
-
+
/**
- * Format a new shared edits dir.
+ * Format a new shared edits dir and copy in enough edit log segments so that
+ * the standby NN can start up.
*
* @param conf configuration
* @param force format regardless of whether or not the shared edits dir exists
@@ -785,8 +793,19 @@ public class NameNode {
existingStorage.getBlockPoolID(),
existingStorage.getCTime(),
existingStorage.getDistributedUpgradeVersion()));
- } catch (Exception e) {
- LOG.error("Could not format shared edits dir", e);
+
+ // Need to make sure the edit log segments are in good shape to initialize
+ // the shared edits dir.
+ fsns.getFSImage().getEditLog().close();
+ fsns.getFSImage().getEditLog().initJournalsForWrite();
+ fsns.getFSImage().getEditLog().recoverUnclosedStreams();
+
+ if (copyEditLogSegmentsToSharedDir(fsns, sharedEditsDirs,
+ newSharedStorage, conf)) {
+ return true; // aborted
+ }
+ } catch (IOException ioe) {
+ LOG.error("Could not initialize shared edits dir", ioe);
return true; // aborted
} finally {
// Have to unlock storage explicitly for the case when we're running in a
@@ -802,6 +821,44 @@ public class NameNode {
}
return false; // did not abort
}
+
+ private static boolean copyEditLogSegmentsToSharedDir(FSNamesystem fsns,
+ Collection<URI> sharedEditsDirs, NNStorage newSharedStorage,
+ Configuration conf) throws FileNotFoundException, IOException {
+ // Copy edit log segments into the new shared edits dir.
+ for (JournalAndStream jas : fsns.getFSImage().getEditLog().getJournals()) {
+ FileJournalManager fjm = null;
+ if (!(jas.getManager() instanceof FileJournalManager)) {
+ LOG.error("Cannot populate shared edits dir from non-file " +
+ "journal manager: " + jas.getManager());
+ return true; // aborted
+ } else {
+ fjm = (FileJournalManager) jas.getManager();
+ }
+ for (EditLogFile elf : fjm.getLogFiles(fsns.getFSImage()
+ .getMostRecentCheckpointTxId())) {
+ File editLogSegment = elf.getFile();
+ for (URI sharedEditsUri : sharedEditsDirs) {
+ StorageDirectory sharedEditsDir = newSharedStorage
+ .getStorageDirectory(sharedEditsUri);
+ File targetFile = new File(sharedEditsDir.getCurrentDir(),
+ editLogSegment.getName());
+ if (!targetFile.exists()) {
+ InputStream in = null;
+ OutputStream out = null;
+ try {
+ in = new FileInputStream(editLogSegment);
+ out = new AtomicFileOutputStream(targetFile);
+ IOUtils.copyBytes(in, out, conf);
+ } finally {
+ IOUtils.cleanup(LOG, in, out);
+ }
+ }
+ }
+ }
+ }
+ return false; // did not abort
+ }
private static boolean finalize(Configuration conf,
boolean isConfirmationNeeded
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Thu Apr 12 23:23:39 2012
@@ -47,6 +47,7 @@ import org.apache.hadoop.ha.ServiceFaile
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -107,6 +108,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.hdfs.util.VersionUtil;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -121,6 +123,7 @@ import org.apache.hadoop.security.author
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.VersionInfo;
import com.google.protobuf.BlockingService;
@@ -147,6 +150,8 @@ class NameNodeRpcServer implements Namen
/** The RPC server that listens to requests from clients */
protected final RPC.Server clientRpcServer;
protected final InetSocketAddress clientRpcAddress;
+
+ private final String minimumDataNodeVersion;
public NameNodeRpcServer(Configuration conf, NameNode nn)
throws IOException {
@@ -261,6 +266,10 @@ class NameNodeRpcServer implements Namen
// The rpc-server port can be ephemeral... ensure we have the correct info
this.clientRpcAddress = this.clientRpcServer.getListenerAddress();
nn.setRpcServerAddress(conf, clientRpcAddress);
+
+ this.minimumDataNodeVersion = conf.get(
+ DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY,
+ DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT);
}
/**
@@ -326,7 +335,7 @@ class NameNodeRpcServer implements Namen
@Override // NamenodeProtocol
public NamenodeRegistration register(NamenodeRegistration registration)
throws IOException {
- verifyVersion(registration.getVersion());
+ verifyLayoutVersion(registration.getVersion());
NamenodeRegistration myRegistration = nn.setRegistration();
namesystem.registerBackupNode(registration, myRegistration);
return myRegistration;
@@ -829,9 +838,10 @@ class NameNodeRpcServer implements Namen
@Override // DatanodeProtocol
- public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg
- ) throws IOException {
- verifyVersion(nodeReg.getVersion());
+ public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg)
+ throws IOException {
+ verifyLayoutVersion(nodeReg.getVersion());
+ verifySoftwareVersion(nodeReg);
namesystem.registerDatanode(nodeReg);
return nodeReg;
}
@@ -916,7 +926,7 @@ class NameNodeRpcServer implements Namen
* @throws UnregisteredNodeException if the registration is invalid
*/
void verifyRequest(NodeRegistration nodeReg) throws IOException {
- verifyVersion(nodeReg.getVersion());
+ verifyLayoutVersion(nodeReg.getVersion());
if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
LOG.warn("Invalid registrationID - expected: "
+ namesystem.getRegistrationID() + " received: "
@@ -989,10 +999,39 @@ class NameNodeRpcServer implements Namen
* @param version
* @throws IOException
*/
- void verifyVersion(int version) throws IOException {
+ void verifyLayoutVersion(int version) throws IOException {
if (version != HdfsConstants.LAYOUT_VERSION)
throw new IncorrectVersionException(version, "data node");
}
+
+ private void verifySoftwareVersion(DatanodeRegistration dnReg)
+ throws IncorrectVersionException {
+ String dnVersion = dnReg.getSoftwareVersion();
+ if (VersionUtil.compareVersions(dnVersion, minimumDataNodeVersion) < 0) {
+ IncorrectVersionException ive = new IncorrectVersionException(
+ minimumDataNodeVersion, dnVersion, "DataNode", "NameNode");
+ LOG.warn(ive.getMessage() + " DN: " + dnReg);
+ throw ive;
+ }
+ String nnVersion = VersionInfo.getVersion();
+ if (!dnVersion.equals(nnVersion)) {
+ String messagePrefix = "Reported DataNode version '" + dnVersion +
+ "' of DN " + dnReg + " does not match NameNode version '" +
+ nnVersion + "'";
+ long nnCTime = nn.getFSImage().getStorage().getCTime();
+ long dnCTime = dnReg.getStorageInfo().getCTime();
+ if (nnCTime != dnCTime) {
+ IncorrectVersionException ive = new IncorrectVersionException(
+ messagePrefix + " and CTime of DN ('" + dnCTime +
+ "') does not match CTime of NN ('" + nnCTime + "')");
+ LOG.warn(ive);
+ throw ive;
+ } else {
+ LOG.info(messagePrefix +
+ ". Note: This is normal during a rolling upgrade.");
+ }
+ }
+ }
private static String getClientMachine() {
String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java Thu Apr 12 23:23:39 2012
@@ -37,12 +37,14 @@ public class DatanodeRegistration extend
private StorageInfo storageInfo;
private ExportedBlockKeys exportedKeys;
+ private String softwareVersion;
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
- ExportedBlockKeys keys) {
+ ExportedBlockKeys keys, String softwareVersion) {
super(dn);
this.storageInfo = info;
this.exportedKeys = keys;
+ this.softwareVersion = softwareVersion;
}
public DatanodeRegistration(String ipAddr, int xferPort) {
@@ -71,6 +73,14 @@ public class DatanodeRegistration extend
public ExportedBlockKeys getExportedKeys() {
return exportedKeys;
}
+
+ public void setSoftwareVersion(String softwareVersion) {
+ this.softwareVersion = softwareVersion;
+ }
+
+ public String getSoftwareVersion() {
+ return softwareVersion;
+ }
@Override // NodeRegistration
public int getVersion() {
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Thu Apr 12 23:23:39 2012
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
+import org.apache.hadoop.util.VersionInfo;
/**
* NamespaceInfo is returned by the name-node in reply
@@ -38,6 +39,7 @@ public class NamespaceInfo extends Stora
String buildVersion;
int distributedUpgradeVersion;
String blockPoolID = ""; // id of the block pool
+ String softwareVersion;
public NamespaceInfo() {
super();
@@ -45,16 +47,18 @@ public class NamespaceInfo extends Stora
}
public NamespaceInfo(int nsID, String clusterID, String bpID,
- long cT, int duVersion, String buildVersion) {
+ long cT, int duVersion, String buildVersion, String softwareVersion) {
super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT);
blockPoolID = bpID;
this.buildVersion = buildVersion;
this.distributedUpgradeVersion = duVersion;
+ this.softwareVersion = softwareVersion;
}
public NamespaceInfo(int nsID, String clusterID, String bpID,
long cT, int duVersion) {
- this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion());
+ this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion(),
+ VersionInfo.getVersion());
}
public String getBuildVersion() {
@@ -68,6 +72,10 @@ public class NamespaceInfo extends Stora
public String getBlockPoolID() {
return blockPoolID;
}
+
+ public String getSoftwareVersion() {
+ return softwareVersion;
+ }
public String toString(){
return super.toString() + ";bpid=" + blockPoolID;
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1325052-1325569
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Thu Apr 12 23:23:39 2012
@@ -33,6 +33,7 @@ message DatanodeRegistrationProto {
required DatanodeIDProto datanodeID = 1; // Datanode information
required StorageInfoProto storageInfo = 2; // Node information
required ExportedBlockKeysProto keys = 3; // Block keys
+ required string softwareVersion = 4; // Software version of the DN, e.g. "2.0.0"
}
/**
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Thu Apr 12 23:23:39 2012
@@ -303,10 +303,11 @@ message RemoteEditLogManifestProto {
* Namespace information that describes namespace on a namenode
*/
message NamespaceInfoProto {
- required string buildVersion = 1; // Software build version
+ required string buildVersion = 1; // Software revision version (e.g. an svn or git revision)
required uint32 distUpgradeVersion = 2; // Distributed upgrade version
required string blockPoolID = 3; // block pool used by the namespace
- required StorageInfoProto storageInfo = 4;// Noe information
+ required StorageInfoProto storageInfo = 4;// Node information
+ required string softwareVersion = 5; // Software version number (e.g. 2.0.0)
}
/**
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1325052-1325569
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1325052-1325569
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1325052-1325569
Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1325052-1325569
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java Thu Apr 12 23:23:39 2012
@@ -17,24 +17,40 @@
*/
package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.*;
+
import java.net.InetSocketAddress;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.DFSClient;
-import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.VersionInfo;
+import org.junit.Test;
/**
* This class tests that a file need not be closed before its
* data can be read by another client.
*/
-public class TestDatanodeRegistration extends TestCase {
+public class TestDatanodeRegistration {
+
+ public static final Log LOG = LogFactory.getLog(TestDatanodeRegistration.class);
/**
* Regression test for HDFS-894 ensures that, when datanodes
* are restarted, the new IPC port is registered with the
* namenode.
*/
+ @Test
public void testChangeIpcPort() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
@@ -74,4 +90,102 @@ public class TestDatanodeRegistration ex
}
}
}
+
+ @Test
+ public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, "3.0.0");
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(0)
+ .build();
+
+ NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
+
+ long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
+ StorageInfo mockStorageInfo = mock(StorageInfo.class);
+ doReturn(nnCTime).when(mockStorageInfo).getCTime();
+
+ DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
+ doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
+ doReturn("fake-storage-id").when(mockDnReg).getStorageID();
+ doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
+
+ // Should succeed when software versions are the same.
+ doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
+ rpcServer.registerDatanode(mockDnReg);
+
+ // Should succeed when software version of DN is above minimum required by NN.
+ doReturn("4.0.0").when(mockDnReg).getSoftwareVersion();
+ rpcServer.registerDatanode(mockDnReg);
+
+ // Should fail when software version of DN is below minimum required by NN.
+ doReturn("2.0.0").when(mockDnReg).getSoftwareVersion();
+ try {
+ rpcServer.registerDatanode(mockDnReg);
+ fail("Should not have been able to register DN with too-low version.");
+ } catch (IncorrectVersionException ive) {
+ GenericTestUtils.assertExceptionContains(
+ "The reported DataNode version is too low", ive);
+ LOG.info("Got expected exception", ive);
+ }
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ @Test
+ public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade()
+ throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "1.0.0");
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(0)
+ .build();
+
+ NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
+
+ long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
+ StorageInfo mockStorageInfo = mock(StorageInfo.class);
+ doReturn(nnCTime).when(mockStorageInfo).getCTime();
+
+ DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
+ doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
+ doReturn("fake-storage-id").when(mockDnReg).getStorageID();
+ doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
+
+ // Should succeed when software versions are the same and CTimes are the
+ // same.
+ doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
+ rpcServer.registerDatanode(mockDnReg);
+
+ // Should succeed when software versions are the same and CTimes are
+ // different.
+ doReturn(nnCTime + 1).when(mockStorageInfo).getCTime();
+ rpcServer.registerDatanode(mockDnReg);
+
+ // Should fail when software version of DN is different from NN and CTimes
+ // are different.
+ doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion();
+ try {
+ rpcServer.registerDatanode(mockDnReg);
+ fail("Should not have been able to register DN with different software" +
+ " versions and CTimes");
+ } catch (IncorrectVersionException ive) {
+ GenericTestUtils.assertExceptionContains(
+ "does not match CTime of NN", ive);
+ LOG.info("Got expected exception", ive);
+ }
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Thu Apr 12 23:23:39 2012
@@ -429,12 +429,13 @@ public class TestPBHelper {
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
getBlockKey(1), keys);
DatanodeRegistration reg = new DatanodeRegistration(dnId,
- new StorageInfo(), expKeys);
+ new StorageInfo(), expKeys, "3.0.0");
DatanodeRegistrationProto proto = PBHelper.convert(reg);
DatanodeRegistration reg2 = PBHelper.convert(proto);
compare(reg.getStorageInfo(), reg2.getStorageInfo());
compare(reg.getExportedKeys(), reg2.getExportedKeys());
compare((DatanodeID)reg, (DatanodeID)reg2);
+ assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
@Test
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Thu Apr 12 23:23:39 2012
@@ -92,6 +92,7 @@ public class TestBlockManager {
dn.updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
+ bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn);
}
}
@@ -310,6 +311,32 @@ public class TestBlockManager {
rackB.contains(pipeline[1]));
}
+ @Test
+ public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception {
+ List<DatanodeDescriptor> nodes = ImmutableList.of(
+ new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
+ new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
+ new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
+ new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackA"),
+ new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackA"),
+ new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackA")
+ );
+ addNodes(nodes);
+ List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);;
+ for (int i = 0; i < NUM_TEST_ITERS; i++) {
+ doTestSingleRackClusterIsSufficientlyReplicated(i, origNodes);
+ }
+ }
+
+ private void doTestSingleRackClusterIsSufficientlyReplicated(int testIndex,
+ List<DatanodeDescriptor> origNodes)
+ throws Exception {
+ assertEquals(0, bm.numOfUnderReplicatedBlocks());
+ addBlockOnNodes((long)testIndex, origNodes);
+ bm.processMisReplicatedBlocks();
+ assertEquals(0, bm.numOfUnderReplicatedBlocks());
+ }
+
/**
* Tell the block manager that replication is completed for the given
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java Thu Apr 12 23:23:39 2012
@@ -97,7 +97,7 @@ public class TestBlocksWithNotEnoughRack
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
- DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
+ DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
// Add a new datanode on a different rack
String newRacks[] = {"/rack2"};
@@ -165,7 +165,7 @@ public class TestBlocksWithNotEnoughRack
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
- DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
+ DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
// Add new datanodes on a different rack and increase the
// replication factor so the block is underreplicated and make
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java Thu Apr 12 23:23:39 2012
@@ -18,48 +18,105 @@
package org.apache.hadoop.hdfs.server.datanode;
-import java.net.InetSocketAddress;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.VersionInfo;
+import org.junit.Before;
import org.junit.Test;
-import org.mockito.Mockito;
-
public class TestDatanodeRegister {
public static final Log LOG = LogFactory.getLog(TestDatanodeRegister.class);
// Invalid address
- static final InetSocketAddress INVALID_ADDR =
+ private static final InetSocketAddress INVALID_ADDR =
new InetSocketAddress("127.0.0.1", 1);
-
- @Test
- public void testDataNodeRegister() throws Exception {
+
+ private BPServiceActor actor;
+ NamespaceInfo fakeNsInfo;
+ DNConf mockDnConf;
+
+ @Before
+ public void setUp() throws IOException {
+ mockDnConf = mock(DNConf.class);
+ doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
+
DataNode mockDN = mock(DataNode.class);
- Mockito.doReturn(true).when(mockDN).shouldRun();
+ doReturn(true).when(mockDN).shouldRun();
+ doReturn(mockDnConf).when(mockDN).getDnConf();
- BPOfferService mockBPOS = Mockito.mock(BPOfferService.class);
- Mockito.doReturn(mockDN).when(mockBPOS).getDataNode();
+ BPOfferService mockBPOS = mock(BPOfferService.class);
+ doReturn(mockDN).when(mockBPOS).getDataNode();
- BPServiceActor actor = new BPServiceActor(INVALID_ADDR, mockBPOS);
+ actor = new BPServiceActor(INVALID_ADDR, mockBPOS);
- NamespaceInfo fakeNSInfo = mock(NamespaceInfo.class);
- when(fakeNSInfo.getBuildVersion()).thenReturn("NSBuildVersion");
- DatanodeProtocolClientSideTranslatorPB fakeDNProt =
+ fakeNsInfo = mock(NamespaceInfo.class);
+ // Return a a good software version.
+ doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
+ // Return a good layout version for now.
+ doReturn(HdfsConstants.LAYOUT_VERSION).when(fakeNsInfo).getLayoutVersion();
+
+ DatanodeProtocolClientSideTranslatorPB fakeDnProt =
mock(DatanodeProtocolClientSideTranslatorPB.class);
- when(fakeDNProt.versionRequest()).thenReturn(fakeNSInfo);
+ when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
+ actor.setNameNode(fakeDnProt);
+ }
- actor.setNameNode( fakeDNProt );
- try {
+ @Test
+ public void testSoftwareVersionDifferences() throws Exception {
+ // We expect no exception to be thrown when the software versions match.
+ assertEquals(VersionInfo.getVersion(),
+ actor.retrieveNamespaceInfo().getSoftwareVersion());
+
+ // We expect no exception to be thrown when the min NN version is below the
+ // reported NN version.
+ doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
+ doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
+ assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion());
+
+ // When the NN reports a version that's too low, throw an exception.
+ doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
+ doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
+ try {
+ actor.retrieveNamespaceInfo();
+ fail("Should have thrown an exception for NN with too-low version");
+ } catch (IncorrectVersionException ive) {
+ GenericTestUtils.assertExceptionContains(
+ "The reported NameNode version is too low", ive);
+ LOG.info("Got expected exception", ive);
+ }
+ }
+
+ @Test
+ public void testDifferentLayoutVersions() throws Exception {
+ // We expect no exceptions to be thrown when the layout versions match.
+ assertEquals(HdfsConstants.LAYOUT_VERSION,
+ actor.retrieveNamespaceInfo().getLayoutVersion());
+
+ // We expect an exception to be thrown when the NN reports a layout version
+ // different from that of the DN.
+ doReturn(HdfsConstants.LAYOUT_VERSION * 1000).when(fakeNsInfo)
+ .getLayoutVersion();
+ try {
actor.retrieveNamespaceInfo();
- fail("register() did not throw exception! " +
- "Expected: IncorrectVersionException");
- } catch (IncorrectVersionException ie) {
- LOG.info("register() returned correct Exception: IncorrectVersionException");
+ fail("Should have failed to retrieve NS info from DN with bad layout version");
+ } catch (IncorrectVersionException ive) {
+ GenericTestUtils.assertExceptionContains(
+ "Unexpected version of namenode", ive);
+ LOG.info("Got expected exception", ive);
}
}
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Thu Apr 12 23:23:39 2012
@@ -58,6 +58,7 @@ import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.VersionInfo;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
@@ -783,6 +784,7 @@ public class NNThroughputBenchmark {
String hostName = DNS.getDefaultHost("default", "default");
dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx));
dnRegistration.setHostName(hostName);
+ dnRegistration.setSoftwareVersion(VersionInfo.getVersion());
this.blocks = new ArrayList<Block>(blockCapacity);
this.nrBlocks = 0;
}
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Thu Apr 12 23:23:39 2012
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
@@ -196,8 +197,7 @@ public class TestDelegationTokensWithHA
// check that the token selected for one of the physical IPC addresses
// matches the one we received
InetSocketAddress addr = nn0.getNameNodeAddress();
- Text ipcDtService = new Text(
- addr.getAddress().getHostAddress() + ":" + addr.getPort());
+ Text ipcDtService = SecurityUtil.buildTokenService(addr);
Token<DelegationTokenIdentifier> token2 =
DelegationTokenSelector.selectHdfsDelegationToken(ipcDtService, ugi);
assertNotNull(token2);
@@ -212,8 +212,15 @@ public class TestDelegationTokensWithHA
*/
@Test
public void testDFSGetCanonicalServiceName() throws Exception {
- assertEquals(fs.getCanonicalServiceName(),
- HATestUtil.getLogicalUri(cluster).getHost());
+ URI hAUri = HATestUtil.getLogicalUri(cluster);
+ String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString();
+ assertEquals(haService, dfs.getCanonicalServiceName());
+ Token<?> token = dfs.getDelegationToken(
+ UserGroupInformation.getCurrentUser().getShortUserName());
+ assertEquals(haService, token.getService().toString());
+ // make sure the logical uri is handled correctly
+ token.renew(dfs.getConf());
+ token.cancel(dfs.getConf());
}
enum TokenTestAction {
Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java?rev=1325570&r1=1325569&r2=1325570&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java Thu Apr 12 23:23:39 2012
@@ -19,17 +19,22 @@ package org.apache.hadoop.hdfs.server.na
import java.io.File;
import java.io.IOException;
+import java.net.URISyntaxException;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
@@ -48,7 +53,10 @@ public class TestInitializeSharedEdits {
@Before
public void setupCluster() throws IOException {
conf = new Configuration();
-
+ conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
+ conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+ HAUtil.setAllowStandbyReads(conf, true);
+
MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
cluster = new MiniDFSCluster.Builder(conf)
@@ -56,11 +64,8 @@ public class TestInitializeSharedEdits {
.numDataNodes(0)
.build();
cluster.waitActive();
-
- cluster.shutdownNameNode(0);
- cluster.shutdownNameNode(1);
- File sharedEditsDir = new File(cluster.getSharedEditsDir(0, 1));
- assertTrue(FileUtil.fullyDelete(sharedEditsDir));
+
+ shutdownClusterAndRemoveSharedEditsDir();
}
@After
@@ -70,8 +75,14 @@ public class TestInitializeSharedEdits {
}
}
- @Test
- public void testInitializeSharedEdits() throws Exception {
+ private void shutdownClusterAndRemoveSharedEditsDir() throws IOException {
+ cluster.shutdownNameNode(0);
+ cluster.shutdownNameNode(1);
+ File sharedEditsDir = new File(cluster.getSharedEditsDir(0, 1));
+ assertTrue(FileUtil.fullyDelete(sharedEditsDir));
+ }
+
+ private void assertCannotStartNameNodes() {
// Make sure we can't currently start either NN.
try {
cluster.restartNameNode(0, false);
@@ -89,24 +100,27 @@ public class TestInitializeSharedEdits {
GenericTestUtils.assertExceptionContains(
"Cannot start an HA namenode with name dirs that need recovery", ioe);
}
-
- // Initialize the shared edits dir.
- assertFalse(NameNode.initializeSharedEdits(conf));
-
+ }
+
+ private void assertCanStartHaNameNodes(String pathSuffix)
+ throws ServiceFailedException, IOException, URISyntaxException,
+ InterruptedException {
// Now should be able to start both NNs. Pass "false" here so that we don't
// try to waitActive on all NNs, since the second NN doesn't exist yet.
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, true);
// Make sure HA is working.
- cluster.transitionToActive(0);
+ cluster.getNameNode(0).getRpcServer().transitionToActive();
FileSystem fs = null;
try {
+ Path newPath = new Path(TEST_PATH, pathSuffix);
fs = HATestUtil.configureFailoverFs(cluster, conf);
- assertTrue(fs.mkdirs(TEST_PATH));
- cluster.transitionToStandby(0);
- cluster.transitionToActive(1);
- assertTrue(fs.isDirectory(TEST_PATH));
+ assertTrue(fs.mkdirs(newPath));
+ HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
+ cluster.getNameNode(1));
+ assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
+ newPath.toString(), false).isDir());
} finally {
if (fs != null) {
fs.close();
@@ -115,6 +129,29 @@ public class TestInitializeSharedEdits {
}
@Test
+ public void testInitializeSharedEdits() throws Exception {
+ assertCannotStartNameNodes();
+
+ // Initialize the shared edits dir.
+ assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0)));
+
+ assertCanStartHaNameNodes("1");
+
+ // Now that we've done a metadata operation, make sure that deleting and
+ // re-initializing the shared edits dir will let the standby still start.
+
+ shutdownClusterAndRemoveSharedEditsDir();
+
+ assertCannotStartNameNodes();
+
+ // Re-initialize the shared edits dir.
+ assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0)));
+
+ // Should *still* be able to start both NNs
+ assertCanStartHaNameNodes("2");
+ }
+
+ @Test
public void testDontOverWriteExistingDir() {
assertFalse(NameNode.initializeSharedEdits(conf, false));
assertTrue(NameNode.initializeSharedEdits(conf, false));