You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by to...@apache.org on 2012/04/02 09:28:48 UTC
svn commit: r1308260 - in
/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common: ./
src/main/bin/ src/main/conf/ src/main/docs/ src/main/java/
src/main/java/org/apache/hadoop/fs/ src/main/java/org/apache/hadoop/ha/
src/main/java/org/apa...
Author: todd
Date: Mon Apr 2 07:28:42 2012
New Revision: 1308260
URL: http://svn.apache.org/viewvc?rev=1308260&view=rev
Log:
Merge trunk into auto-failover branch
Modified:
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/CHANGES.txt (contents, props changed)
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/docs/ (props changed)
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/ (props changed)
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/core/ (props changed)
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java
hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/CHANGES.txt Mon Apr 2 07:28:42 2012
@@ -231,10 +231,20 @@ Release 2.0.0 - UNRELEASED
HADOOP-8216. Address log4j.properties inconsistencies btw main and
template dirs. (Patrick Hunt via eli)
+ HADOOP-8149. Cap space usage of default log4j rolling policy.
+ (Patrick Hunt via eli)
+
+ HADOOP-8211. Update commons-net version to 3.1. (eli)
+
+ HADOOP-8236. haadmin should have configurable timeouts for failover
+ commands. (todd)
+
OPTIMIZATIONS
BUG FIXES
+ HADOOP-8199. Fix issues in start-all.sh and stop-all.sh (Devaraj K via umamahesh)
+
HADOOP-7635. RetryInvocationHandler should release underlying resources on
close. (atm)
@@ -295,6 +305,9 @@ Release 2.0.0 - UNRELEASED
HADOOP-8218. RPC.closeProxy shouldn't throw error when closing a mock
(todd)
+ HADOOP-8238. NetUtils#getHostNameOfIP blows up if given ip:port
+ string w/o port. (eli)
+
BREAKDOWN OF HADOOP-7454 SUBTASKS
HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
@@ -445,6 +458,8 @@ Release 0.23.2 - UNRELEASED
HADOOP-8088. User-group mapping cache incorrectly does negative caching on
transient failures (Khiwal Lee via bobby)
+ HADOOP-8208. Disallow self failover. (eli)
+
Release 0.23.1 - 2012-02-17
INCOMPATIBLE CHANGES
Propchange: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/CHANGES.txt
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1306587-1308235
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh Mon Apr 2 07:28:42 2012
@@ -107,8 +107,8 @@ fi
# some variables
export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
-export HADOOP_ROOT_LOGGER="INFO,DRFA"
-export HADOOP_SECURITY_LOGGER="INFO,DRFAS"
+export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
+export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh Mon Apr 2 07:28:42 2012
@@ -18,7 +18,7 @@
# Start all hadoop daemons. Run this on master node.
-echo "This script is Deprecated. Instead use start-dfs.sh and start-mapred.sh"
+echo "This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh"
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
@@ -28,6 +28,11 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:
. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
# start hdfs daemons if hdfs is present
-if [ -f "${HADOOP_HDFS_HOME}"/bin/start-dfs.sh ]; then
- "${HADOOP_HDFS_HOME}"/bin/start-dfs.sh --config $HADOOP_CONF_DIR
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh ]; then
+ "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh --config $HADOOP_CONF_DIR
+fi
+
+# start yarn daemons if yarn is present
+if [ -f "${YARN_HOME}"/sbin/start-dfs.sh ]; then
+ "${YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
fi
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh Mon Apr 2 07:28:42 2012
@@ -18,7 +18,7 @@
# Stop all hadoop daemons. Run this on master node.
-echo "This script is Deprecated. Instead use stop-dfs.sh and stop-mapred.sh"
+echo "This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh"
bin=`dirname "${BASH_SOURCE-$0}"`
bin=`cd "$bin"; pwd`
@@ -28,6 +28,11 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:
. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
# stop hdfs daemons if hdfs is present
-if [ -f "${HADOOP_HDFS_HOME}"/bin/stop-dfs.sh ]; then
- "${HADOOP_HDFS_HOME}"/bin/stop-dfs.sh --config $HADOOP_CONF_DIR
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh ]; then
+ "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh --config $HADOOP_CONF_DIR
+fi
+
+# stop yarn daemons if yarn is present
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh ]; then
+ "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh --config $HADOOP_CONF_DIR
fi
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties Mon Apr 2 07:28:42 2012
@@ -21,7 +21,6 @@ hadoop.root.logger=INFO,console
hadoop.log.dir=.
hadoop.log.file=hadoop.log
-
# Define the root logger to the system property "hadoop.root.logger".
log4j.rootLogger=${hadoop.root.logger}, EventCounter
@@ -32,6 +31,25 @@ log4j.threshold=ALL
log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
#
+# Rolling File Appender - cap space usage at 5gb.
+#
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
# Daily Rolling File Appender
#
@@ -85,54 +103,55 @@ log4j.appender.TLA.layout.ConversionPatt
#Security appender
#
hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
#
# hdfs audit logging
#
hdfs.audit.logger=INFO,console
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
#
# mapred audit logging
#
mapred.audit.logger=INFO,console
+mapred.audit.log.maxfilesize=256MB
+mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
# Custom Logging levels
@@ -153,16 +172,19 @@ log4j.appender.EventCounter=org.apache.h
# Job Summary Appender
#
# Use following logger to send summary to separate file defined by
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.log.file :
# hadoop.mapreduce.jobsummary.logger=INFO,JSA
#
hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
+hadoop.mapreduce.jobsummary.log.maxbackupindex=20
+log4j.appender.JSA=org.apache.log4j.RollingFileAppender
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
@@ -174,7 +196,7 @@ log4j.additivity.org.apache.hadoop.mapre
# Set the ResourceManager summary log level and appender
#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-# Appender for ResourceManager Application Summary Log - rolled daily
+# Appender for ResourceManager Application Summary Log
# Requires the following properties to be set
# - hadoop.log.dir (Hadoop Log directory)
# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
@@ -182,8 +204,9 @@ log4j.additivity.org.apache.hadoop.mapre
#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+#log4j.appender.RMSUMMARY.MaxFileSize=256MB
+#log4j.appender.RMSUMMARY.MaxBackupIndex=20
#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd
Propchange: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/docs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/docs:r1306587-1308235
Propchange: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1306587-1308235
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java Mon Apr 2 07:28:42 2012
@@ -145,5 +145,21 @@ public class CommonConfigurationKeys ext
public static final String HA_HM_RPC_TIMEOUT_KEY =
"ha.health-monitor.rpc-timeout.ms";
public static final int HA_HM_RPC_TIMEOUT_DEFAULT = 45000;
+
+ /* Timeout that the FC waits for the new active to become active */
+ public static final String HA_FC_NEW_ACTIVE_TIMEOUT_KEY =
+ "ha.failover-controller.new-active.rpc-timeout.ms";
+ public static final int HA_FC_NEW_ACTIVE_TIMEOUT_DEFAULT = 60000;
+
+ /* Timeout that the FC waits for the old active to go to standby */
+ public static final String HA_FC_GRACEFUL_FENCE_TIMEOUT_KEY =
+ "ha.failover-controller.graceful-fence.rpc-timeout.ms";
+ public static final int HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT = 5000;
+
+ /* Timeout that the CLI (manual) FC waits for monitorHealth, getServiceState */
+ public static final String HA_FC_CLI_CHECK_TIMEOUT_KEY =
+ "ha.failover-controller.cli-check.rpc-timeout.ms";
+ public static final int HA_FC_CLI_CHECK_TIMEOUT_DEFAULT = 20000;
+
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java Mon Apr 2 07:28:42 2012
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ipc.RPC;
@@ -42,7 +43,22 @@ public class FailoverController {
private static final Log LOG = LogFactory.getLog(FailoverController.class);
- private static final int GRACEFUL_FENCE_TIMEOUT = 5000;
+ private final int gracefulFenceTimeout;
+ private final int rpcTimeoutToNewActive;
+
+ private final Configuration conf;
+
+
+ public FailoverController(Configuration conf) {
+ this.conf = conf;
+
+ this.gracefulFenceTimeout = conf.getInt(
+ CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_KEY,
+ CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT);
+ this.rpcTimeoutToNewActive = conf.getInt(
+ CommonConfigurationKeys.HA_FC_NEW_ACTIVE_TIMEOUT_KEY,
+ CommonConfigurationKeys.HA_FC_NEW_ACTIVE_TIMEOUT_DEFAULT);
+ }
/**
* Perform pre-failover checks on the given service we plan to
@@ -54,18 +70,25 @@ public class FailoverController {
* allow it to become active, eg because it triggers a log roll
* so the standby can learn about new blocks and leave safemode.
*
+ * @param from currently active service
* @param target service to make active
* @param forceActive ignore toSvc if it reports that it is not ready
* @throws FailoverFailedException if we should avoid failover
*/
- private static void preFailoverChecks(HAServiceTarget target,
- boolean forceActive)
+ private void preFailoverChecks(HAServiceTarget from,
+ HAServiceTarget target,
+ boolean forceActive)
throws FailoverFailedException {
HAServiceStatus toSvcStatus;
HAServiceProtocol toSvc;
+ if (from.getAddress().equals(target.getAddress())) {
+ throw new FailoverFailedException(
+ "Can't failover a service to itself");
+ }
+
try {
- toSvc = target.getProxy();
+ toSvc = target.getProxy(conf, rpcTimeoutToNewActive);
toSvcStatus = toSvc.getServiceStatus();
} catch (IOException e) {
String msg = "Unable to get service state for " + target;
@@ -108,11 +131,10 @@ public class FailoverController {
* and no retries. Its only purpose is to avoid fencing a node that
* has already restarted.
*/
- static boolean tryGracefulFence(Configuration conf,
- HAServiceTarget svc) {
+ boolean tryGracefulFence(HAServiceTarget svc) {
HAServiceProtocol proxy = null;
try {
- proxy = svc.getProxy(conf, GRACEFUL_FENCE_TIMEOUT);
+ proxy = svc.getProxy(conf, gracefulFenceTimeout);
proxy.transitionToStandby();
return true;
} catch (ServiceFailedException sfe) {
@@ -139,19 +161,19 @@ public class FailoverController {
* @param forceActive try to make toSvc active even if it is not ready
* @throws FailoverFailedException if the failover fails
*/
- public static void failover(HAServiceTarget fromSvc,
- HAServiceTarget toSvc,
- boolean forceFence,
- boolean forceActive)
+ public void failover(HAServiceTarget fromSvc,
+ HAServiceTarget toSvc,
+ boolean forceFence,
+ boolean forceActive)
throws FailoverFailedException {
Preconditions.checkArgument(fromSvc.getFencer() != null,
"failover requires a fencer");
- preFailoverChecks(toSvc, forceActive);
+ preFailoverChecks(fromSvc, toSvc, forceActive);
// Try to make fromSvc standby
boolean tryFence = true;
- if (tryGracefulFence(new Configuration(), fromSvc)) {
+ if (tryGracefulFence(fromSvc)) {
tryFence = forceFence;
}
@@ -167,7 +189,8 @@ public class FailoverController {
boolean failed = false;
Throwable cause = null;
try {
- HAServiceProtocolHelper.transitionToActive(toSvc.getProxy());
+ HAServiceProtocolHelper.transitionToActive(
+ toSvc.getProxy(conf, rpcTimeoutToNewActive));
} catch (ServiceFailedException sfe) {
LOG.error("Unable to make " + toSvc + " active (" +
sfe.getMessage() + "). Failing back.");
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java Mon Apr 2 07:28:42 2012
@@ -30,7 +30,9 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@@ -49,6 +51,8 @@ public abstract class HAAdmin extends Co
private static final String FORCEACTIVE = "forceactive";
private static final Log LOG = LogFactory.getLog(HAAdmin.class);
+ private int rpcTimeoutForChecks = -1;
+
private static Map<String, UsageInfo> USAGE =
ImmutableMap.<String, UsageInfo>builder()
.put("-transitionToActive",
@@ -165,9 +169,10 @@ public abstract class HAAdmin extends Co
HAServiceTarget fromNode = resolveTarget(args[0]);
HAServiceTarget toNode = resolveTarget(args[1]);
+ FailoverController fc = new FailoverController(getConf());
+
try {
- FailoverController.failover(fromNode, toNode,
- forceFence, forceActive);
+ fc.failover(fromNode, toNode, forceFence, forceActive);
out.println("Failover from "+args[0]+" to "+args[1]+" successful");
} catch (FailoverFailedException ffe) {
errOut.println("Failover failed: " + ffe.getLocalizedMessage());
@@ -184,7 +189,8 @@ public abstract class HAAdmin extends Co
return -1;
}
- HAServiceProtocol proto = resolveTarget(argv[1]).getProxy();
+ HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
+ getConf(), rpcTimeoutForChecks);
try {
HAServiceProtocolHelper.monitorHealth(proto);
} catch (HealthCheckFailedException e) {
@@ -202,7 +208,8 @@ public abstract class HAAdmin extends Co
return -1;
}
- HAServiceProtocol proto = resolveTarget(argv[1]).getProxy();
+ HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
+ getConf(), rpcTimeoutForChecks);
out.println(proto.getServiceStatus().getState());
return 0;
}
@@ -216,6 +223,16 @@ public abstract class HAAdmin extends Co
}
@Override
+ public void setConf(Configuration conf) {
+ super.setConf(conf);
+ if (conf != null) {
+ rpcTimeoutForChecks = conf.getInt(
+ CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
+ CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
+ }
+ }
+
+ @Override
public int run(String[] argv) throws Exception {
try {
return runCmd(argv);
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java Mon Apr 2 07:28:42 2012
@@ -330,8 +330,8 @@ public abstract class ZKFailoverControll
HAServiceTarget target = dataToTarget(data);
LOG.info("Should fence: " + target);
- boolean gracefulWorked =
- FailoverController.tryGracefulFence(conf, target);
+ boolean gracefulWorked = new FailoverController(conf)
+ .tryGracefulFence(target);
if (gracefulWorked) {
// It's possible that it's in standby but just about to go into active,
// no? Is there some race here?
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java Mon Apr 2 07:28:42 2012
@@ -570,31 +570,29 @@ public class NetUtils {
}
}
- private static final Pattern ipPattern = // Pattern for matching hostname to ip:port
- Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:?\\d*");
+ private static final Pattern ipPortPattern = // Pattern for matching ip[:port]
+ Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d+)?");
/**
- * Attempt to obtain the host name of a name specified by ip address.
- * Check that the node name is an ip addr and if so, attempt to determine
- * its host name. If the name is not an IP addr, or the actual name cannot
- * be determined, return null.
+ * Attempt to obtain the host name of the given string which contains
+ * an IP address and an optional port.
*
- * @return Host name or null
+ * @param ipPort string of form ip[:port]
+ * @return Host name or null if the name can not be determined
*/
- public static String getHostNameOfIP(String ip) {
- // If name is not an ip addr, don't bother looking it up
- if(!ipPattern.matcher(ip).matches())
+ public static String getHostNameOfIP(String ipPort) {
+ if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) {
return null;
+ }
- String hostname = "";
try {
- String n = ip.substring(0, ip.indexOf(':'));
- hostname = InetAddress.getByName(n).getHostName();
+ int colonIdx = ipPort.indexOf(':');
+ String ip = (-1 == colonIdx) ? ipPort
+ : ipPort.substring(0, ipPort.indexOf(':'));
+ return InetAddress.getByName(ip).getHostName();
} catch (UnknownHostException e) {
return null;
}
-
- return hostname;
}
/**
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh Mon Apr 2 07:28:42 2012
@@ -48,10 +48,10 @@ done
export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
-HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_NAMENODE_OPTS"
+export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties Mon Apr 2 07:28:42 2012
@@ -21,7 +21,6 @@ hadoop.root.logger=INFO,console
hadoop.log.dir=.
hadoop.log.file=hadoop.log
-
# Define the root logger to the system property "hadoop.root.logger".
log4j.rootLogger=${hadoop.root.logger}, EventCounter
@@ -32,6 +31,25 @@ log4j.threshold=ALL
log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
#
+# Rolling File Appender - cap space usage at 5gb.
+#
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
# Daily Rolling File Appender
#
@@ -85,54 +103,55 @@ log4j.appender.TLA.layout.ConversionPatt
#Security appender
#
hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
#
# hdfs audit logging
#
hdfs.audit.logger=INFO,console
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
#
# mapred audit logging
#
mapred.audit.logger=INFO,console
+mapred.audit.log.maxfilesize=256MB
+mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
# Custom Logging levels
@@ -153,16 +172,19 @@ log4j.appender.EventCounter=org.apache.h
# Job Summary Appender
#
# Use following logger to send summary to separate file defined by
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.log.file :
# hadoop.mapreduce.jobsummary.logger=INFO,JSA
#
hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
+hadoop.mapreduce.jobsummary.log.maxbackupindex=20
+log4j.appender.JSA=org.apache.log4j.RollingFileAppender
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
@@ -174,7 +196,7 @@ log4j.additivity.org.apache.hadoop.mapre
# Set the ResourceManager summary log level and appender
#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-# Appender for ResourceManager Application Summary Log - rolled daily
+# Appender for ResourceManager Application Summary Log
# Requires the following properties to be set
# - hadoop.log.dir (Hadoop Log directory)
# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
@@ -182,8 +204,9 @@ log4j.additivity.org.apache.hadoop.mapre
#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+#log4j.appender.RMSUMMARY.MaxFileSize=256MB
+#log4j.appender.RMSUMMARY.MaxBackupIndex=20
#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm Mon Apr 2 07:28:42 2012
@@ -86,8 +86,6 @@ Deprecated Properties
*---+---+
|dfs.socket.timeout | dfs.client.socket-timeout
*---+---+
-|dfs.upgrade.permission | dfs.namenode.upgrade.permission
-*---+---+
|dfs.write.packet.size | dfs.client-write-packet-size
*---+---+
|fs.checkpoint.dir | dfs.namenode.checkpoint.dir
Propchange: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1306587-1308235
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java Mon Apr 2 07:28:42 2012
@@ -25,11 +25,13 @@ import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
import org.apache.hadoop.ha.TestNodeFencer.AlwaysFailFencer;
import static org.apache.hadoop.ha.TestNodeFencer.setupFencer;
import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.test.MockitoUtil;
import org.junit.Test;
import org.mockito.Mockito;
@@ -40,6 +42,8 @@ import static org.junit.Assert.*;
public class TestFailoverController {
private InetSocketAddress svc1Addr = new InetSocketAddress("svc1", 1234);
private InetSocketAddress svc2Addr = new InetSocketAddress("svc2", 5678);
+
+ private Configuration conf = new Configuration();
HAServiceStatus STATE_NOT_READY = new HAServiceStatus(HAServiceState.STANDBY)
.setNotReadyToBecomeActive("injected not ready");
@@ -51,13 +55,13 @@ public class TestFailoverController {
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
AlwaysSucceedFencer.fenceCalled = 0;
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
assertEquals(HAServiceState.STANDBY, svc1.state);
assertEquals(HAServiceState.ACTIVE, svc2.state);
AlwaysSucceedFencer.fenceCalled = 0;
- FailoverController.failover(svc2, svc1, false, false);
+ doFailover(svc2, svc1, false, false);
assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
assertEquals(HAServiceState.ACTIVE, svc1.state);
assertEquals(HAServiceState.STANDBY, svc2.state);
@@ -69,7 +73,7 @@ public class TestFailoverController {
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
assertEquals(HAServiceState.STANDBY, svc1.state);
assertEquals(HAServiceState.ACTIVE, svc2.state);
}
@@ -81,7 +85,7 @@ public class TestFailoverController {
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
fail("Can't failover to an already active service");
} catch (FailoverFailedException ffe) {
// Expected
@@ -102,7 +106,7 @@ public class TestFailoverController {
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
fail("Can't failover when access is denied");
} catch (FailoverFailedException ffe) {
assertTrue(ffe.getCause().getMessage().contains("Access denied"));
@@ -118,7 +122,7 @@ public class TestFailoverController {
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
fail("Can't failover to a service that's not ready");
} catch (FailoverFailedException ffe) {
// Expected
@@ -131,7 +135,7 @@ public class TestFailoverController {
assertEquals(HAServiceState.STANDBY, svc2.state);
// Forcing it means we ignore readyToBecomeActive
- FailoverController.failover(svc1, svc2, false, true);
+ doFailover(svc1, svc2, false, true);
assertEquals(HAServiceState.STANDBY, svc1.state);
assertEquals(HAServiceState.ACTIVE, svc2.state);
}
@@ -145,7 +149,7 @@ public class TestFailoverController {
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
fail("Failover to unhealthy service");
} catch (FailoverFailedException ffe) {
// Expected
@@ -165,7 +169,7 @@ public class TestFailoverController {
AlwaysSucceedFencer.fenceCalled = 0;
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
} catch (FailoverFailedException ffe) {
fail("Faulty active prevented failover");
}
@@ -188,7 +192,7 @@ public class TestFailoverController {
AlwaysFailFencer.fenceCalled = 0;
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
fail("Failed over even though fencing failed");
} catch (FailoverFailedException ffe) {
// Expected
@@ -208,7 +212,7 @@ public class TestFailoverController {
AlwaysFailFencer.fenceCalled = 0;
try {
- FailoverController.failover(svc1, svc2, true, false);
+ doFailover(svc1, svc2, true, false);
fail("Failed over even though fencing requested and failed");
} catch (FailoverFailedException ffe) {
// Expected
@@ -232,16 +236,26 @@ public class TestFailoverController {
.defaultAnswer(new ThrowsException(
new IOException("Could not connect to host")))
.extraInterfaces(Closeable.class));
- Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy();
+ Mockito.doNothing().when((Closeable)errorThrowingProxy).close();
+
+ Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy(
+ Mockito.<Configuration>any(),
+ Mockito.anyInt());
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
} catch (FailoverFailedException ffe) {
fail("Non-existant active prevented failover");
}
-
+ // Verify that the proxy created to try to make it go to standby
+ // gracefully used the right rpc timeout
+ Mockito.verify(svc1).getProxy(
+ Mockito.<Configuration>any(),
+ Mockito.eq(
+ CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT));
+
// Don't check svc1 because we can't reach it, but that's OK, it's been fenced.
assertEquals(HAServiceState.ACTIVE, svc2.state);
}
@@ -256,7 +270,7 @@ public class TestFailoverController {
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
fail("Failed over to a non-existant standby");
} catch (FailoverFailedException ffe) {
// Expected
@@ -274,7 +288,7 @@ public class TestFailoverController {
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
fail("Failover to already active service");
} catch (FailoverFailedException ffe) {
// Expected
@@ -296,7 +310,7 @@ public class TestFailoverController {
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
- FailoverController.failover(svc1, svc2, true, false);
+ doFailover(svc1, svc2, true, false);
fail("Failed over to service that won't transition to active");
} catch (FailoverFailedException ffe) {
// Expected
@@ -318,7 +332,7 @@ public class TestFailoverController {
AlwaysSucceedFencer.fenceCalled = 0;
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
fail("Failed over to service that won't transition to active");
} catch (FailoverFailedException ffe) {
// Expected
@@ -342,7 +356,7 @@ public class TestFailoverController {
AlwaysFailFencer.fenceCalled = 0;
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
fail("Failed over to service that won't transition to active");
} catch (FailoverFailedException ffe) {
// Expected
@@ -368,7 +382,7 @@ public class TestFailoverController {
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
- FailoverController.failover(svc1, svc2, false, false);
+ doFailover(svc1, svc2, false, false);
fail("Failover to already active service");
} catch (FailoverFailedException ffe) {
// Expected
@@ -377,4 +391,37 @@ public class TestFailoverController {
assertEquals(HAServiceState.STANDBY, svc1.state);
assertEquals(HAServiceState.STANDBY, svc2.state);
}
+
+ @Test
+ public void testSelfFailoverFails() throws Exception {
+ DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
+ DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
+ svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
+ AlwaysSucceedFencer.fenceCalled = 0;
+
+ try {
+ doFailover(svc1, svc1, false, false);
+ fail("Can't failover to yourself");
+ } catch (FailoverFailedException ffe) {
+ // Expected
+ }
+ assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
+ assertEquals(HAServiceState.ACTIVE, svc1.state);
+
+ try {
+ doFailover(svc2, svc2, false, false);
+ fail("Can't failover to yourself");
+ } catch (FailoverFailedException ffe) {
+ // Expected
+ }
+ assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
+ assertEquals(HAServiceState.STANDBY, svc2.state);
+ }
+
+ private void doFailover(HAServiceTarget tgt1, HAServiceTarget tgt2,
+ boolean forceFence, boolean forceActive) throws FailoverFailedException {
+ FailoverController fc = new FailoverController(conf);
+ fc.failover(tgt1, tgt2, forceFence, forceActive);
+ }
+
}
Modified: hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java?rev=1308260&r1=1308259&r2=1308260&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java Mon Apr 2 07:28:42 2012
@@ -499,6 +499,18 @@ public class TestNetUtils {
assertEquals("scheme://host.a.b/path", uri.toString());
}
+ @Test
+ public void testGetHostNameOfIP() {
+ assertNull(NetUtils.getHostNameOfIP(null));
+ assertNull(NetUtils.getHostNameOfIP(""));
+ assertNull(NetUtils.getHostNameOfIP("crazytown"));
+ assertNull(NetUtils.getHostNameOfIP("127.0.0.1:")); // no port
+ assertNull(NetUtils.getHostNameOfIP("127.0.0.1:-1")); // bogus port
+ assertNull(NetUtils.getHostNameOfIP("127.0.0.1:A")); // bogus port
+ assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1"));
+ assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1:1"));
+ }
+
private <T> void assertBetterArrayEquals(T[] expect, T[]got) {
String expectStr = StringUtils.join(expect, ", ");
String gotStr = StringUtils.join(got, ", ");