You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/06/07 23:25:39 UTC
svn commit: r1347804 - in
/hadoop/common/branches/branch-2/hadoop-hdfs-project: ./ hadoop-hdfs/
hadoop-hdfs/dev-support/ hadoop-hdfs/src/main/bin/ hadoop-hdfs/src/main/java/
hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/
hadoop-hdfs/src/main/java/or...
Author: todd
Date: Thu Jun 7 21:25:34 2012
New Revision: 1347804
URL: http://svn.apache.org/viewvc?rev=1347804&view=rev
Log:
Merge HDFS-3042 (automatic failover) to branch-2 from trunk
Added:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-3042.txt
- copied unchanged from r1342112, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-3042.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
- copied unchanged from r1342112, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
- copied unchanged from r1342112, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
- copied unchanged from r1342112, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/pom.xml
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project:r1342112
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs:r1306184-1342109
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1342112
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml Thu Jun 7 21:25:34 2012
@@ -6,6 +6,9 @@
<Package name="org.apache.hadoop.hdfs.protocol.proto" />
</Match>
<Match>
+ <Package name="org.apache.hadoop.hdfs.server.namenode.ha.proto" />
+ </Match>
+ <Match>
<Bug pattern="EI_EXPOSE_REP" />
</Match>
<Match>
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/pom.xml Thu Jun 7 21:25:34 2012
@@ -102,6 +102,33 @@ http://maven.apache.org/xsd/maven-4.0.0.
<artifactId>ant</artifactId>
<scope>provided</scope>
</dependency>
+ <dependency>
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper</artifactId>
+ <version>3.4.2</version>
+ <exclusions>
+ <exclusion>
+ <!-- otherwise seems to drag in junit 3.8.1 via jline -->
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jdmk</groupId>
+ <artifactId>jmxtools</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.sun.jmx</groupId>
+ <artifactId>jmxri</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper</artifactId>
+ <version>3.4.2</version>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs Thu Jun 7 21:25:34 2012
@@ -30,6 +30,7 @@ function print_usage(){
echo " namenode -format format the DFS filesystem"
echo " secondarynamenode run the DFS secondary namenode"
echo " namenode run the DFS namenode"
+ echo " zkfc run the ZK Failover Controller daemon"
echo " datanode run a DFS datanode"
echo " dfsadmin run a DFS admin client"
echo " haadmin run a DFS HA admin client"
@@ -76,6 +77,9 @@ fi
if [ "$COMMAND" = "namenode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
+elif [ "$COMMAND" = "zkfc" ] ; then
+ CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_ZKFC_OPTS"
elif [ "$COMMAND" = "secondarynamenode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh Thu Jun 7 21:25:34 2012
@@ -85,4 +85,15 @@ if [ -n "$SECONDARY_NAMENODES" ]; then
--script "$bin/hdfs" start secondarynamenode
fi
+#---------------------------------------------------------
+# ZK Failover controllers, if auto-HA is enabled
+AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
+if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
+ echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$NAMENODES" \
+ --script "$bin/hdfs" start zkfc
+fi
+
# eof
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1306184-1342109
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1342112
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Thu Jun 7 21:25:34 2012
@@ -348,4 +348,8 @@ public class DFSConfigKeys extends Commo
public static final String DFS_HA_TAILEDITS_PERIOD_KEY = "dfs.ha.tail-edits.period";
public static final int DFS_HA_TAILEDITS_PERIOD_DEFAULT = 60; // 1m
public static final String DFS_HA_FENCE_METHODS_KEY = "dfs.ha.fencing.methods";
+ public static final String DFS_HA_AUTO_FAILOVER_ENABLED_KEY = "dfs.ha.automatic-failover.enabled";
+ public static final boolean DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT = false;
+ public static final String DFS_HA_ZKFC_PORT_KEY = "dfs.ha.zkfc.port";
+ public static final int DFS_HA_ZKFC_PORT_DEFAULT = 8019;
}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java Thu Jun 7 21:25:34 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.ZKFCProtocol;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -47,6 +48,8 @@ public class HDFSPolicyProvider extends
new Service("security.namenode.protocol.acl", NamenodeProtocol.class),
new Service(CommonConfigurationKeys.SECURITY_HA_SERVICE_PROTOCOL_ACL,
HAServiceProtocol.class),
+ new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL,
+ ZKFCProtocol.class),
new Service(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY,
RefreshAuthorizationPolicyProtocol.class),
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Jun 7 21:25:34 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.HadoopIllegalAr
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.HealthCheckFailedException;
import org.apache.hadoop.ha.ServiceFailedException;
@@ -69,6 +70,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.util.AtomicFileOutputStream;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
@@ -145,17 +147,25 @@ public class NameNode {
}
/**
- * HDFS federation configuration can have two types of parameters:
+ * HDFS configuration can have three types of parameters:
* <ol>
- * <li>Parameter that is common for all the name services in the cluster.</li>
- * <li>Parameters that are specific to a name service. This keys are suffixed
+ * <li>Parameters that are common for all the name services in the cluster.</li>
+ * <li>Parameters that are specific to a name service. These keys are suffixed
* with nameserviceId in the configuration. For example,
* "dfs.namenode.rpc-address.nameservice1".</li>
+ * <li>Parameters that are specific to a single name node. These keys are suffixed
+ * with nameserviceId and namenodeId in the configuration. for example,
+ * "dfs.namenode.rpc-address.nameservice1.namenode1"</li>
* </ol>
*
- * Following are nameservice specific keys.
+ * In the latter cases, operators may specify the configuration without
+ * any suffix, with a nameservice suffix, or with a nameservice and namenode
+ * suffix. The more specific suffix will take precedence.
+ *
+ * These keys are specific to a given namenode, and thus may be configured
+ * globally, for a nameservice, or for a specific namenode within a nameservice.
*/
- public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
+ public static final String[] NAMENODE_SPECIFIC_KEYS = {
DFS_NAMENODE_RPC_ADDRESS_KEY,
DFS_NAMENODE_NAME_DIR_KEY,
DFS_NAMENODE_EDITS_DIR_KEY,
@@ -170,8 +180,19 @@ public class NameNode {
DFS_NAMENODE_BACKUP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY,
+ DFS_NAMENODE_USER_NAME_KEY,
DFS_HA_FENCE_METHODS_KEY,
- DFS_NAMENODE_USER_NAME_KEY
+ DFS_HA_ZKFC_PORT_KEY,
+ DFS_HA_FENCE_METHODS_KEY
+ };
+
+ /**
+ * @see #NAMENODE_SPECIFIC_KEYS
+ * These keys are specific to a nameservice, but may not be overridden
+ * for a specific namenode.
+ */
+ public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
+ DFS_HA_AUTO_FAILOVER_ENABLED_KEY
};
public long getProtocolVersion(String protocol,
@@ -1145,8 +1166,11 @@ public class NameNode {
}
DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
+ NAMENODE_SPECIFIC_KEYS);
+ DFSUtil.setGenericConf(conf, nameserviceId, null,
NAMESERVICE_SPECIFIC_KEYS);
}
+
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
@@ -1362,4 +1386,43 @@ public class NameNode {
public boolean isStandbyState() {
return (state.equals(STANDBY_STATE));
}
+
+ /**
+ * Check that a request to change this node's HA state is valid.
+ * In particular, verifies that, if auto failover is enabled, non-forced
+ * requests from the HAAdmin CLI are rejected, and vice versa.
+ *
+ * @param req the request to check
+ * @throws AccessControlException if the request is disallowed
+ */
+ void checkHaStateChange(StateChangeRequestInfo req)
+ throws AccessControlException {
+ boolean autoHaEnabled = conf.getBoolean(DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
+ DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT);
+ switch (req.getSource()) {
+ case REQUEST_BY_USER:
+ if (autoHaEnabled) {
+ throw new AccessControlException(
+ "Manual HA control for this NameNode is disallowed, because " +
+ "automatic HA is enabled.");
+ }
+ break;
+ case REQUEST_BY_USER_FORCED:
+ if (autoHaEnabled) {
+ LOG.warn("Allowing manual HA control from " +
+ Server.getRemoteAddress() +
+ " even though automatic HA is enabled, because the user " +
+ "specified the force flag");
+ }
+ break;
+ case REQUEST_BY_ZKFC:
+ if (!autoHaEnabled) {
+ throw new AccessControlException(
+ "Request from ZK failover controller at " +
+ Server.getRemoteAddress() + " denied since automatic HA " +
+ "is not enabled");
+ }
+ break;
+ }
+ }
}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Thu Jun 7 21:25:34 2012
@@ -979,14 +979,16 @@ class NameNodeRpcServer implements Namen
}
@Override // HAServiceProtocol
- public synchronized void transitionToActive()
+ public synchronized void transitionToActive(StateChangeRequestInfo req)
throws ServiceFailedException, AccessControlException {
+ nn.checkHaStateChange(req);
nn.transitionToActive();
}
@Override // HAServiceProtocol
- public synchronized void transitionToStandby()
+ public synchronized void transitionToStandby(StateChangeRequestInfo req)
throws ServiceFailedException, AccessControlException {
+ nn.checkHaStateChange(req);
nn.transitionToStandby();
}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Thu Jun 7 21:25:34 2012
@@ -207,7 +207,6 @@ public class BootstrapStandby implements
return 0;
}
-
private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
long curTxIdOnOtherNode) {
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java Thu Jun 7 21:25:34 2012
@@ -21,6 +21,7 @@ import java.net.InetSocketAddress;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.BadFencingConfigurationException;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.ha.NodeFencer;
@@ -44,12 +45,14 @@ public class NNHAServiceTarget extends H
private static final String NAMENODE_ID_KEY = "namenodeid";
private final InetSocketAddress addr;
+ private InetSocketAddress zkfcAddr;
private NodeFencer fencer;
private BadFencingConfigurationException fenceConfigError;
private final String nnId;
private final String nsId;
-
- public NNHAServiceTarget(HdfsConfiguration conf,
+ private final boolean autoFailoverEnabled;
+
+ public NNHAServiceTarget(Configuration conf,
String nsId, String nnId) {
Preconditions.checkNotNull(nnId);
@@ -75,12 +78,24 @@ public class NNHAServiceTarget extends H
}
this.addr = NetUtils.createSocketAddr(serviceAddr,
NameNode.DEFAULT_PORT);
+
+ this.autoFailoverEnabled = targetConf.getBoolean(
+ DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
+ DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT);
+ if (autoFailoverEnabled) {
+ int port = DFSZKFailoverController.getZkfcPort(targetConf);
+ if (port != 0) {
+ setZkfcPort(port);
+ }
+ }
+
try {
this.fencer = NodeFencer.create(targetConf,
DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY);
} catch (BadFencingConfigurationException e) {
this.fenceConfigError = e;
}
+
this.nnId = nnId;
this.nsId = nsId;
}
@@ -94,10 +109,29 @@ public class NNHAServiceTarget extends H
}
@Override
+ public InetSocketAddress getZKFCAddress() {
+ Preconditions.checkState(autoFailoverEnabled,
+ "ZKFC address not relevant when auto failover is off");
+ assert zkfcAddr != null;
+
+ return zkfcAddr;
+ }
+
+ void setZkfcPort(int port) {
+ assert autoFailoverEnabled;
+
+ this.zkfcAddr = new InetSocketAddress(addr.getAddress(), port);
+ }
+
+ @Override
public void checkFencingConfigured() throws BadFencingConfigurationException {
if (fenceConfigError != null) {
throw fenceConfigError;
}
+ if (fencer == null) {
+ throw new BadFencingConfigurationException(
+ "No fencer configured for " + this);
+ }
}
@Override
@@ -125,4 +159,9 @@ public class NNHAServiceTarget extends H
ret.put(NAMESERVICE_ID_KEY, getNameServiceId());
ret.put(NAMENODE_ID_KEY, getNameNodeId());
}
+
+ @Override
+ public boolean isAutoFailoverEnabled() {
+ return autoFailoverEnabled;
+ }
}
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1342112
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1306184-1342109
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Thu Jun 7 21:25:34 2012
@@ -829,6 +829,16 @@
</property>
<property>
+ <name>dfs.ha.automatic-failover.enabled</name>
+ <value>false</value>
+ <description>
+ Whether automatic failover is enabled. See the HDFS High
+ Availability documentation for details on automatic HA
+ configuration.
+ </description>
+</property>
+
+<property>
<name>dfs.support.append</name>
<value>true</value>
<description>
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1342112
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1306184-1342109
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1306184-1342109
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1342112
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1342112
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1306184-1342109
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1306184-1342109
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1342112
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu Jun 7 21:25:34 2012
@@ -67,8 +67,10 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HAServiceProtocolHelper;
import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -1672,12 +1674,14 @@ public class MiniDFSCluster {
public void transitionToActive(int nnIndex) throws IOException,
ServiceFailedException {
- getNameNode(nnIndex).getRpcServer().transitionToActive();
+ getNameNode(nnIndex).getRpcServer().transitionToActive(
+ new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED));
}
public void transitionToStandby(int nnIndex) throws IOException,
ServiceFailedException {
- getNameNode(nnIndex).getRpcServer().transitionToStandby();
+ getNameNode(nnIndex).getRpcServer().transitionToStandby(
+ new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED));
}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Thu Jun 7 21:25:34 2012
@@ -274,7 +274,7 @@ public class TestDFSUtil {
conf.set(DFS_NAMESERVICE_ID, nsId);
// Set the nameservice specific keys with nameserviceId in the config key
- for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+ for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
// Note: value is same as the key
conf.set(DFSUtil.addKeySuffixes(key, nsId), key);
}
@@ -284,7 +284,7 @@ public class TestDFSUtil {
// Retrieve the keys without nameserviceId and Ensure generic keys are set
// to the correct value
- for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+ for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
assertEquals(key, conf.get(key));
}
}
@@ -304,7 +304,7 @@ public class TestDFSUtil {
conf.set(DFS_HA_NAMENODES_KEY_PREFIX + "." + nsId, nnId);
// Set the nameservice specific keys with nameserviceId in the config key
- for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+ for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
// Note: value is same as the key
conf.set(DFSUtil.addKeySuffixes(key, nsId, nnId), key);
}
@@ -314,7 +314,7 @@ public class TestDFSUtil {
// Retrieve the keys without nameserviceId and Ensure generic keys are set
// to the correct value
- for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
+ for (String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
assertEquals(key, conf.get(key));
}
}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java Thu Jun 7 21:25:34 2012
@@ -71,7 +71,7 @@ public class TestEditLogsDuringFailover
// Set the first NN to active, make sure it creates edits
// in its own dirs and the shared dir. The standby
// should still have no edits!
- cluster.getNameNode(0).getRpcServer().transitionToActive();
+ cluster.transitionToActive(0);
assertEditFiles(cluster.getNameDirs(0),
NNStorage.getInProgressEditsFileName(1));
@@ -107,7 +107,7 @@ public class TestEditLogsDuringFailover
// If we restart NN0, it'll come back as standby, and we can
// transition NN1 to active and make sure it reads edits correctly at this point.
cluster.restartNameNode(0);
- cluster.getNameNode(1).getRpcServer().transitionToActive();
+ cluster.transitionToActive(1);
// NN1 should have both the edits that came before its restart, and the edits that
// came after its restart.
@@ -134,7 +134,7 @@ public class TestEditLogsDuringFailover
NNStorage.getInProgressEditsFileName(1));
// Transition one of the NNs to active
- cluster.getNameNode(0).getRpcServer().transitionToActive();
+ cluster.transitionToActive(0);
// In the transition to active, it should have read the log -- and
// hence see one of the dirs we made in the fake log.
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java Thu Jun 7 21:25:34 2012
@@ -34,6 +34,8 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HAUtil;
@@ -129,7 +131,8 @@ public class TestHASafeMode {
DFSTestUtil
.createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
restartActive();
- nn0.getRpcServer().transitionToActive();
+ nn0.getRpcServer().transitionToActive(
+ new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FSNamesystem namesystem = nn0.getNamesystem();
String status = namesystem.getSafemode();
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java Thu Jun 7 21:25:34 2012
@@ -37,6 +37,8 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -71,6 +73,8 @@ public class TestHAStateTransitions {
private static final String TEST_FILE_STR = TEST_FILE_PATH.toUri().getPath();
private static final String TEST_FILE_DATA =
"Hello state transitioning world";
+ private static final StateChangeRequestInfo REQ_INFO = new StateChangeRequestInfo(
+ RequestSource.REQUEST_BY_USER_FORCED);
static {
((Log4JLogger)EditLogTailer.LOG).getLogger().setLevel(Level.ALL);
@@ -481,19 +485,19 @@ public class TestHAStateTransitions {
assertFalse(isDTRunning(nn));
banner("Transition 1->3. Should not start secret manager.");
- nn.getRpcServer().transitionToActive();
+ nn.getRpcServer().transitionToActive(REQ_INFO);
assertFalse(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 3->1. Should not start secret manager.");
- nn.getRpcServer().transitionToStandby();
+ nn.getRpcServer().transitionToStandby(REQ_INFO);
assertTrue(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 1->3->4. Should start secret manager.");
- nn.getRpcServer().transitionToActive();
+ nn.getRpcServer().transitionToActive(REQ_INFO);
NameNodeAdapter.leaveSafeMode(nn, false);
assertFalse(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
@@ -514,13 +518,13 @@ public class TestHAStateTransitions {
for (int i = 0; i < 20; i++) {
// Loop the last check to suss out races.
banner("Transition 4->2. Should stop secret manager.");
- nn.getRpcServer().transitionToStandby();
+ nn.getRpcServer().transitionToStandby(REQ_INFO);
assertTrue(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 2->4. Should start secret manager");
- nn.getRpcServer().transitionToActive();
+ nn.getRpcServer().transitionToActive(REQ_INFO);
assertFalse(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertTrue(isDTRunning(nn));
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java Thu Jun 7 21:25:34 2012
@@ -27,6 +27,8 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
@@ -111,7 +113,8 @@ public class TestInitializeSharedEdits {
cluster.restartNameNode(1, true);
// Make sure HA is working.
- cluster.getNameNode(0).getRpcServer().transitionToActive();
+ cluster.getNameNode(0).getRpcServer().transitionToActive(
+ new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FileSystem fs = null;
try {
Path newPath = new Path(TEST_PATH, pathSuffix);
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestNNHealthCheck.java Thu Jun 7 21:25:34 2012
@@ -22,6 +22,8 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.HealthCheckFailedException;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java Thu Jun 7 21:25:34 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.tools;
import static org.junit.Assert.*;
+import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
@@ -32,14 +33,17 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.ha.HealthCheckFailedException;
-import org.apache.hadoop.ha.NodeFencer;
+import org.apache.hadoop.ha.ZKFCProtocol;
import org.apache.hadoop.test.MockitoUtil;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import com.google.common.base.Charsets;
@@ -52,6 +56,7 @@ public class TestDFSHAAdmin {
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
private String errOutput;
private HAServiceProtocol mockProtocol;
+ private ZKFCProtocol mockZkfcProtocol;
private static final String NSID = "ns1";
@@ -59,6 +64,9 @@ public class TestDFSHAAdmin {
new HAServiceStatus(HAServiceState.STANDBY)
.setReadyToBecomeActive();
+ private ArgumentCaptor<StateChangeRequestInfo> reqInfoCaptor =
+ ArgumentCaptor.forClass(StateChangeRequestInfo.class);
+
private static String HOST_A = "1.2.3.1";
private static String HOST_B = "1.2.3.2";
@@ -81,6 +89,7 @@ public class TestDFSHAAdmin {
@Before
public void setup() throws IOException {
mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class);
+ mockZkfcProtocol = MockitoUtil.mockProtocol(ZKFCProtocol.class);
tool = new DFSHAAdmin() {
@Override
@@ -90,7 +99,9 @@ public class TestDFSHAAdmin {
// OVerride the target to return our mock protocol
try {
Mockito.doReturn(mockProtocol).when(spy).getProxy(
- Mockito.<Configuration>any(), Mockito.anyInt());
+ Mockito.<Configuration>any(), Mockito.anyInt());
+ Mockito.doReturn(mockZkfcProtocol).when(spy).getZKFCProxy(
+ Mockito.<Configuration>any(), Mockito.anyInt());
} catch (IOException e) {
throw new AssertionError(e); // mock setup doesn't really throw
}
@@ -139,13 +150,89 @@ public class TestDFSHAAdmin {
@Test
public void testTransitionToActive() throws Exception {
assertEquals(0, runTool("-transitionToActive", "nn1"));
- Mockito.verify(mockProtocol).transitionToActive();
+ Mockito.verify(mockProtocol).transitionToActive(
+ reqInfoCaptor.capture());
+ assertEquals(RequestSource.REQUEST_BY_USER,
+ reqInfoCaptor.getValue().getSource());
+ }
+
+ /**
+ * Test that, if automatic HA is enabled, none of the mutative operations
+ * will succeed, unless the -forcemanual flag is specified.
+ * @throws Exception
+ */
+ @Test
+ public void testMutativeOperationsWithAutoHaEnabled() throws Exception {
+ Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+
+ // Turn on auto-HA in the config
+ HdfsConfiguration conf = getHAConf();
+ conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
+ conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
+ tool.setConf(conf);
+
+ // Should fail without the forcemanual flag
+ assertEquals(-1, runTool("-transitionToActive", "nn1"));
+ assertTrue(errOutput.contains("Refusing to manually manage"));
+ assertEquals(-1, runTool("-transitionToStandby", "nn1"));
+ assertTrue(errOutput.contains("Refusing to manually manage"));
+
+ Mockito.verify(mockProtocol, Mockito.never())
+ .transitionToActive(anyReqInfo());
+ Mockito.verify(mockProtocol, Mockito.never())
+ .transitionToStandby(anyReqInfo());
+
+ // Force flag should bypass the check and change the request source
+ // for the RPC
+ setupConfirmationOnSystemIn();
+ assertEquals(0, runTool("-transitionToActive", "-forcemanual", "nn1"));
+ setupConfirmationOnSystemIn();
+ assertEquals(0, runTool("-transitionToStandby", "-forcemanual", "nn1"));
+
+ Mockito.verify(mockProtocol, Mockito.times(1)).transitionToActive(
+ reqInfoCaptor.capture());
+ Mockito.verify(mockProtocol, Mockito.times(1)).transitionToStandby(
+ reqInfoCaptor.capture());
+
+ // All of the RPCs should have had the "force" source
+ for (StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) {
+ assertEquals(RequestSource.REQUEST_BY_USER_FORCED, ri.getSource());
+ }
+ }
+
+ /**
+ * Setup System.in with a stream that feeds a "yes" answer on the
+ * next prompt.
+ */
+ private static void setupConfirmationOnSystemIn() {
+ // Answer "yes" to the prompt about transition to active
+ System.setIn(new ByteArrayInputStream("yes\n".getBytes()));
+ }
+
+ /**
+ * Test that, even if automatic HA is enabled, the monitoring operations
+ * still function correctly.
+ */
+ @Test
+ public void testMonitoringOperationsWithAutoHaEnabled() throws Exception {
+ Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+
+ // Turn on auto-HA
+ HdfsConfiguration conf = getHAConf();
+ conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
+ tool.setConf(conf);
+
+ assertEquals(0, runTool("-checkHealth", "nn1"));
+ Mockito.verify(mockProtocol).monitorHealth();
+
+ assertEquals(0, runTool("-getServiceState", "nn1"));
+ Mockito.verify(mockProtocol).getServiceStatus();
}
@Test
public void testTransitionToStandby() throws Exception {
assertEquals(0, runTool("-transitionToStandby", "nn1"));
- Mockito.verify(mockProtocol).transitionToStandby();
+ Mockito.verify(mockProtocol).transitionToStandby(anyReqInfo());
}
@Test
@@ -213,6 +300,19 @@ public class TestDFSHAAdmin {
tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
}
+
+ @Test
+ public void testFailoverWithAutoHa() throws Exception {
+ Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+ // Turn on auto-HA in the config
+ HdfsConfiguration conf = getHAConf();
+ conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
+ conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
+ tool.setConf(conf);
+
+ assertEquals(0, runTool("-failover", "nn1", "nn2"));
+ Mockito.verify(mockZkfcProtocol).gracefulFailover();
+ }
@Test
public void testForceFenceOptionListedBeforeArgs() throws Exception {
@@ -283,4 +383,8 @@ public class TestDFSHAAdmin {
LOG.info("Output:\n" + errOutput);
return ret;
}
+
+ private StateChangeRequestInfo anyReqInfo() {
+ return Mockito.<StateChangeRequestInfo>any();
+ }
}
Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml?rev=1347804&r1=1347803&r2=1347804&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml Thu Jun 7 21:25:34 2012
@@ -116,5 +116,11 @@
<description>ACL for HAService protocol used by HAAdmin to manage the
active and stand-by states of namenode.</description>
</property>
+ <property>
+ <name>security.zkfc.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for access to the ZK Failover Controller
+ </description>
+ </property>
</configuration>