You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by tg...@apache.org on 2012/02/11 00:26:37 UTC
svn commit: r1242977 - in
/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project: CHANGES.txt
bin/mr-jobhistory-daemon.sh
hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
Author: tgraves
Date: Fri Feb 10 23:26:37 2012
New Revision: 1242977
URL: http://svn.apache.org/viewvc?rev=1242977&view=rev
Log:
merge -r 1242975:1242976 from trunk. FIXES: MAPREDUCE-3843
Modified:
hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/CHANGES.txt
hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/CHANGES.txt?rev=1242977&r1=1242976&r2=1242977&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/CHANGES.txt Fri Feb 10 23:26:37 2012
@@ -753,6 +753,9 @@ Release 0.23.1 - 2012-02-08
MAPREDUCE-3770. Zombie.getJobConf() results into NPE. (amarrk)
+ MAPREDUCE-3843. Job summary log file found missing on the RM host
+ (Anupam Seth via tgraves)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh?rev=1242977&r1=1242976&r2=1242977&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh Fri Feb 10 23:26:37 2012
@@ -20,6 +20,9 @@
#
# Environment Variables
#
+# HADOOP_LOGFILE Hadoop log file.
+# HADOOP_ROOT_LOGGER Hadoop root logger.
+# HADOOP_JHS_LOGGER Hadoop JobSummary logger.
# YARN_CONF_DIR Alternate conf dir. Default is ${YARN_HOME}/conf.
# YARN_LOG_DIR Where log files are stored. PWD by default.
# YARN_MASTER host:path where hadoop code should be rsync'd from
@@ -86,8 +89,9 @@ if [ "$YARN_PID_DIR" = "" ]; then
fi
# some variables
-export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
-export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,DRFA}
+export HADOOP_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
+export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,DRFA}
+export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA}
log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
Modified: hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm?rev=1242977&r1=1242976&r2=1242977&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm (original)
+++ hadoop/common/branches/branch-0.23/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm Fri Feb 10 23:26:37 2012
@@ -437,32 +437,32 @@ Hadoop MapReduce Next Generation - Clust
Format a new distributed filesystem:
----
- $ $HADOOP_PREFIX_HOME/bin/hdfs namenode -format <cluster_name>
+ $ $HADOOP_PREFIX/bin/hdfs namenode -format <cluster_name>
----
Start the HDFS with the following command, run on the designated NameNode:
----
- $ $HADOOP_PREFIX_HOME/bin/hdfs start namenode --config $HADOOP_CONF_DIR
+ $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode
----
Run a script to start DataNodes on all slaves:
----
- $ $HADOOP_PREFIX_HOME/bin/hdfs start datanode --config $HADOOP_CONF_DIR
+ $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
----
Start the YARN with the following command, run on the designated
ResourceManager:
----
- $ $YARN_HOME/bin/yarn start resourcemanager --config $HADOOP_CONF_DIR
+ $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager
----
Run a script to start NodeManagers on all slaves:
----
- $ $YARN_HOME/bin/yarn start nodemanager --config $HADOOP_CONF_DIR
+ $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager
----
Start a standalone WebAppProxy server. If multiple servers
@@ -476,7 +476,7 @@ Hadoop MapReduce Next Generation - Clust
designated server:
----
- $ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR
+ $ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh start historyserver --config $HADOOP_CONF_DIR
----
* Hadoop Shutdown
@@ -485,26 +485,26 @@ Hadoop MapReduce Next Generation - Clust
NameNode:
----
- $ $HADOOP_PREFIX_HOME/bin/hdfs stop namenode --config $HADOOP_CONF_DIR
+ $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode
----
Run a script to stop DataNodes on all slaves:
----
- $ $HADOOP_PREFIX_HOME/bin/hdfs stop datanode --config $HADOOP_CONF_DIR
+ $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode
----
Stop the ResourceManager with the following command, run on the designated
ResourceManager:
----
- $ $YARN_HOME/bin/yarn stop resourcemanager --config $HADOOP_CONF_DIR
+ $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager
----
Run a script to stop NodeManagers on all slaves:
----
- $ $YARN_HOME/bin/yarn stop nodemanager --config $HADOOP_CONF_DIR
+ $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager
----
Stop the WebAppProxy server. If multiple servers are used with load
@@ -519,7 +519,7 @@ Hadoop MapReduce Next Generation - Clust
designated server:
----
- $ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR
+ $ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh stop historyserver --config $HADOOP_CONF_DIR
----
@@ -978,34 +978,34 @@ KVNO Timestamp Principal
Format a new distributed filesystem as <hdfs>:
----
-[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs namenode -format <cluster_name>
+[hdfs]$ $HADOOP_PREFIX/bin/hdfs namenode -format <cluster_name>
----
Start the HDFS with the following command, run on the designated NameNode
as <hdfs>:
----
-[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs start namenode --config $HADOOP_CONF_DIR
+[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode
----
Run a script to start DataNodes on all slaves as <root> with a special
environment variable <<<HADOOP_SECURE_DN_USER>>> set to <hdfs>:
----
-[root]$ HADOOP_SECURE_DN_USER=hdfs $HADOOP_PREFIX_HOME/bin/hdfs start datanode --config $HADOOP_CONF_DIR
+[root]$ HADOOP_SECURE_DN_USER=hdfs $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
----
Start the YARN with the following command, run on the designated
ResourceManager as <yarn>:
----
-[yarn]$ $YARN_HOME/bin/yarn start resourcemanager --config $HADOOP_CONF_DIR
+[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager
----
Run a script to start NodeManagers on all slaves as <yarn>:
----
-[yarn]$ $YARN_HOME/bin/yarn start nodemanager --config $HADOOP_CONF_DIR
+[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager
----
Start a standalone WebAppProxy server. Run on the WebAppProxy
@@ -1020,7 +1020,7 @@ KVNO Timestamp Principal
designated server as <mapred>:
----
-[mapred]$ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR
+[mapred]$ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh start historyserver --config $HADOOP_CONF_DIR
----
* Hadoop Shutdown
@@ -1029,26 +1029,26 @@ KVNO Timestamp Principal
as <hdfs>:
----
-[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs stop namenode --config $HADOOP_CONF_DIR
+[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode
----
Run a script to stop DataNodes on all slaves as <root>:
----
-[root]$ $HADOOP_PREFIX_HOME/bin/hdfs stop datanode --config $HADOOP_CONF_DIR
+[root]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode
----
Stop the ResourceManager with the following command, run on the designated
ResourceManager as <yarn>:
----
-[yarn]$ $YARN_HOME/bin/yarn stop resourcemanager --config $HADOOP_CONF_DIR
+[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager
----
Run a script to stop NodeManagers on all slaves as <yarn>:
----
-[yarn]$ $YARN_HOME/bin/yarn stop nodemanager --config $HADOOP_CONF_DIR
+[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager
----
Stop the WebAppProxy server. Run on the WebAppProxy server as
@@ -1063,7 +1063,7 @@ KVNO Timestamp Principal
designated server as <mapred>:
----
-[mapred]$ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR
+[mapred]$ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh stop historyserver --config $HADOOP_CONF_DIR
----
* {Web Interfaces}