You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/05/09 02:42:35 UTC

[41/50] hadoop git commit: HADOOP-11590. Update sbin commands and documentation to use new --slaves option (aw)

HADOOP-11590. Update sbin commands and documentation to use new --slaves option (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2bab285
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2bab285
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2bab285

Branch: refs/heads/YARN-2928
Commit: b2bab285ab4260ab457c7cd4929651ff9c57c9d4
Parents: 2ad75e2
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri May 8 15:27:25 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Fri May 8 17:40:23 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../src/site/markdown/CommandsManual.md         |  9 +-
 .../hadoop-hdfs/src/main/bin/start-dfs.sh       | 87 ++++++++++++--------
 .../src/main/bin/start-secure-dns.sh            |  6 +-
 .../hadoop-hdfs/src/main/bin/stop-dfs.sh        | 75 +++++++++++------
 .../hadoop-hdfs/src/main/bin/stop-secure-dns.sh | 12 ++-
 .../markdown/HDFSHighAvailabilityWithQJM.md     |  2 +-
 .../hadoop-yarn/bin/start-yarn.sh               | 48 +++++++++--
 .../hadoop-yarn/bin/stop-yarn.sh                | 49 +++++++++--
 9 files changed, 209 insertions(+), 82 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2bab285/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 00ef0be..ed674ec 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -212,6 +212,9 @@ Trunk (Unreleased)
     HADOOP-11813. releasedocmaker.py should use today's date instead of
     unreleased (Darrell Taylor via aw)
 
+    HADOOP-11590. Update sbin commands and documentation to use new --slaves
+    option (aw)
+
   BUG FIXES
 
     HADOOP-11473. test-patch says "-1 overall" even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2bab285/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 207160e..35081a6 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -64,13 +64,14 @@ All of the shell commands will accept a common set of options. For some commands
 | SHELL\_OPTION | Description |
 |:---- |:---- |
 | `--buildpaths` | Enables developer versions of jars. |
-| `--config confdir` | Overwrites the default Configuration directory. Default is `$HADOOP_PREFIX/conf`. |
-| `--daemon mode` | If the command supports daemonization (e.g., `hdfs namenode`), execute in the appropriate mode. Supported modes are `start` to start the process in daemon mode, `stop` to stop the process, and `status` to determine the active status of the process. `status` will return an [LSB-compliant](http://refspecs.linuxbase.org/LSB_3.0.0/LSB-generic/LSB-generic/iniscrptact.html) result code. If no option is provided, commands that support daemonization will run in the foreground. |
+| `--config confdir` | Overwrites the default Configuration directory. Default is `$HADOOP_PREFIX/etc/hadoop`. |
+| `--daemon mode` | If the command supports daemonization (e.g., `hdfs namenode`), execute in the appropriate mode. Supported modes are `start` to start the process in daemon mode, `stop` to stop the process, and `status` to determine the active status of the process. `status` will return an [LSB-compliant](http://refspecs.linuxbase.org/LSB_3.0.0/LSB-generic/LSB-generic/iniscrptact.html) result code. If no option is provided, commands that support daemonization will run in the foreground. For commands that do not support daemonization, this option is ignored. |
 | `--debug` | Enables shell level configuration debugging information |
 | `--help` | Shell script usage information. |
-| `--hostnames` | A space delimited list of hostnames where to execute a multi-host subcommand. By default, the content of the `slaves` file is used. |
-| `--hosts` | A file that contains a list of hostnames where to execute a multi-host subcommand. By default, the content of the `slaves` file is used. |
+| `--hostnames` | When `--slaves` is used, override the slaves file with a space delimited list of hostnames where to execute a multi-host subcommand. If `--slaves` is not used, this option is ignored. |
+| `--hosts` | When `--slaves` is used, override the slaves file with another file that contains a list of hostnames where to execute a multi-host subcommand.  If `--slaves` is not used, this option is ignored. |
 | `--loglevel loglevel` | Overrides the log level. Valid log levels are FATAL, ERROR, WARN, INFO, DEBUG, and TRACE. Default is INFO. |
+| `--slaves` | If possible, execute this command on all hosts in the `slaves` file. |
 
 ### Generic Options
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2bab285/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
index 275c1c0..f5ca410 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
@@ -65,7 +65,7 @@ fi
 
 
 #Add other possible options
-nameStartOpt="$nameStartOpt $@"
+nameStartOpt="$nameStartOpt $*"
 
 #---------------------------------------------------------
 # namenodes
@@ -76,28 +76,32 @@ if [[ -z "${NAMENODES}" ]]; then
   NAMENODES=$(hostname)
 fi
 
-echo "Starting namenodes on [$NAMENODES]"
+echo "Starting namenodes on [${NAMENODES}]"
 
-"${bin}/hadoop-daemons.sh" \
---config "${HADOOP_CONF_DIR}" \
---hostnames "${NAMENODES}" \
-start namenode ${nameStartOpt}
+"${HADOOP_HDFS_HOME}/bin/hdfs" \
+    --slaves \
+    --config "${HADOOP_CONF_DIR}" \
+    --hostnames "${NAMENODES}" \
+    --daemon start \
+    namenode ${nameStartOpt}
 
 #---------------------------------------------------------
 # datanodes (using default slaves file)
 
 if [[ -n "${HADOOP_SECURE_DN_USER}" ]] &&
-[[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
-  echo "ERROR: Attempting to start secure cluster, skipping datanodes. "
-  echo "Run start-secure-dns.sh as root or configure "
-  echo "\${HADOOP_SECURE_COMMAND} to complete startup."
+   [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
+    hadoop_error "ERROR: Attempting to start secure cluster, skipping datanodes. "
+    hadoop_error "ERROR: Run start-secure-dns.sh as root or configure "
+    hadoop_error "ERROR: \${HADOOP_SECURE_COMMAND} to complete startup."
 else
-  
+
   echo "Starting datanodes"
-  
-  "${bin}/hadoop-daemons.sh" \
-  --config "${HADOOP_CONF_DIR}" \
-  start datanode ${dataStartOpt}
+
+  "${HADOOP_HDFS_HOME}/bin/hdfs" \
+    --slaves \
+    --config "${HADOOP_CONF_DIR}" \
+    --daemon start \
+    datanode ${dataStartOpt}
 fi
 
 #---------------------------------------------------------
@@ -105,17 +109,28 @@ fi
 
 SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
 
-if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
-  SECONDARY_NAMENODES=$(hostname)
-fi
-
 if [[ -n "${SECONDARY_NAMENODES}" ]]; then
-  echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
-  
-  "${bin}/hadoop-daemons.sh" \
-  --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${SECONDARY_NAMENODES}" \
-  start secondarynamenode
+
+  if [[ "${NAMENODES}" =~ , ]]; then
+
+    hadoop_error "ERROR: Highly available NameNode is configured."
+    hadoop_error "ERROR: Skipping SecondaryNameNode."
+
+  else
+
+    if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
+      SECONDARY_NAMENODES=$(hostname)
+    fi
+
+    echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
+
+    "${HADOOP_HDFS_HOME}/bin/hdfs" \
+      --slaves \
+      --config "${HADOOP_CONF_DIR}" \
+      --hostnames "${SECONDARY_NAMENODES}" \
+      --daemon start \
+      secondarynamenode
+  fi
 fi
 
 #---------------------------------------------------------
@@ -127,10 +142,13 @@ case "${SHARED_EDITS_DIR}" in
   qjournal://*)
     JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
     echo "Starting journal nodes [${JOURNAL_NODES}]"
-    "${bin}/hadoop-daemons.sh" \
-    --config "${HADOOP_CONF_DIR}" \
-    --hostnames "${JOURNAL_NODES}" \
-    start journalnode
+
+    "${HADOOP_HDFS_HOME}/bin/hdfs" \
+      --slaves \
+      --config "${HADOOP_CONF_DIR}" \
+      --hostnames "${JOURNAL_NODES}" \
+      --daemon start \
+      journalnode
   ;;
 esac
 
@@ -139,10 +157,13 @@ esac
 AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
 if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
   echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
-  "${bin}/hadoop-daemons.sh" \
-  --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${NAMENODES}" \
-  start zkfc
+
+  "${HADOOP_HDFS_HOME}/bin/hdfs" \
+    --slaves \
+    --config "${HADOOP_CONF_DIR}" \
+    --hostnames "${NAMENODES}" \
+    --daemon start \
+    zkfc
 fi
 
 # eof

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2bab285/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh
index ab69cc2..82aa7aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh
@@ -43,7 +43,11 @@ else
 fi
 
 if [[ "${EUID}" -eq 0 ]] && [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
-  exec "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" start datanode "${dataStartOpt}"
+  exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
+     --config "${HADOOP_CONF_DIR}" \
+     --slaves \
+     --daemon start \
+     datanode
 else
   echo hadoop_usage_and_exit 1
 fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2bab285/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
index 4f4d4f4..549e893 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
@@ -15,9 +15,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+
+# Stop hadoop dfs daemons.
+# Run this on master node.
+
 function hadoop_usage
 {
-  echo "Usage: start-balancer.sh [--config confdir]  [-policy <policy>] [-threshold <threshold>]"
+  echo "Usage: stop-dfs.sh [-upgrade|-rollback] [-clusterId]"
 }
 
 this="${BASH_SOURCE-$0}"
@@ -43,28 +47,38 @@ fi
 #---------------------------------------------------------
 # namenodes
 
-NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes)
+NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)
+
+if [[ -z "${NAMENODES}" ]]; then
+  NAMENODES=$(hostname)
+fi
 
-echo "Stopping namenodes on [$NAMENODES]"
+echo "Stopping namenodes on [${NAMENODES}]"
 
-"${bin}/hadoop-daemons.sh" \
---config "${HADOOP_CONF_DIR}" \
---hostnames "${NAMENODES}" \
-stop namenode
+  "${HADOOP_HDFS_HOME}/bin/hdfs" \
+    --slaves \
+    --config "${HADOOP_CONF_DIR}" \
+    --hostnames "${NAMENODES}" \
+    --daemon stop \
+    namenode
 
 #---------------------------------------------------------
 # datanodes (using default slaves file)
 
 if [[ -n "${HADOOP_SECURE_DN_USER}" ]] &&
 [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
-  echo \
-  "ERROR: Attempting to stop secure cluster, skipping datanodes. " \
-  "Run stop-secure-dns.sh as root to complete shutdown."
+  echo "ERROR: Attempting to stop secure cluster, skipping datanodes. "
+  echo "Run stop-secure-dns.sh as root or configure "
+  echo "\${HADOOP_SECURE_COMMAND} to complete stop."
 else
-  
+
   echo "Stopping datanodes"
-  
-  "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" stop datanode
+
+  "${HADOOP_HDFS_HOME}/bin/hdfs" \
+    --slaves \
+    --config "${HADOOP_CONF_DIR}" \
+    --daemon stop \
+    datanode
 fi
 
 #---------------------------------------------------------
@@ -78,11 +92,13 @@ fi
 
 if [[ -n "${SECONDARY_NAMENODES}" ]]; then
   echo "Stopping secondary namenodes [${SECONDARY_NAMENODES}]"
-  
-  "${bin}/hadoop-daemons.sh" \
-  --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${SECONDARY_NAMENODES}" \
-  stop secondarynamenode
+
+  "${HADOOP_HDFS_HOME}/bin/hdfs" \
+    --slaves \
+    --config "${HADOOP_CONF_DIR}" \
+    --hostnames "${SECONDARY_NAMENODES}" \
+    --daemon stop \
+    secondarynamenode
 fi
 
 #---------------------------------------------------------
@@ -94,10 +110,13 @@ case "${SHARED_EDITS_DIR}" in
   qjournal://*)
     JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
     echo "Stopping journal nodes [${JOURNAL_NODES}]"
-    "${bin}/hadoop-daemons.sh" \
-    --config "${HADOOP_CONF_DIR}" \
-    --hostnames "${JOURNAL_NODES}" \
-    stop journalnode
+
+    "${HADOOP_HDFS_HOME}/bin/hdfs" \
+      --slaves \
+      --config "${HADOOP_CONF_DIR}" \
+      --hostnames "${JOURNAL_NODES}" \
+      --daemon stop \
+      journalnode
   ;;
 esac
 
@@ -106,9 +125,13 @@ esac
 AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
 if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
   echo "Stopping ZK Failover Controllers on NN hosts [${NAMENODES}]"
-  "${bin}/hadoop-daemons.sh" \
-  --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${NAMENODES}" \
-  stop zkfc
+
+  "${HADOOP_HDFS_HOME}/bin/hdfs" \
+    --slaves \
+    --config "${HADOOP_CONF_DIR}" \
+    --hostnames "${NAMENODES}" \
+    --daemon stop \
+    zkfc
 fi
+
 # eof

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2bab285/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh
index efce92e..e263580 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh
@@ -15,11 +15,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Run as root to start secure datanodes in a security-enabled cluster.
+# Run as root to stop secure datanodes in a security-enabled cluster.
 
 
 function hadoop_usage {
-  echo "Usage (run as root in order to stop secure datanodes): stop-secure-dns.sh"
+  echo "Usage: stop-secure-dns.sh"
 }
 
 this="${BASH_SOURCE-$0}"
@@ -43,7 +43,11 @@ else
 fi
 
 if [[ "${EUID}" -eq 0 ]] && [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
-  "${bin}/hadoop-daemons.sh" --config "${HADOOP_CONF_DIR}" stop datanode
+  exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
+     --config "${HADOOP_CONF_DIR}" \
+     --slaves \
+     --daemon stop \
+     datanode
 else
-  hadoop_exit_with_usage 1
+  echo hadoop_usage_and_exit 1
 fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2bab285/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
index 63813d9..06e1bb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
@@ -355,7 +355,7 @@ The order in which you set these configurations is unimportant, but the values y
 
 ### Deployment details
 
-After all of the necessary configuration options have been set, you must start the JournalNode daemons on the set of machines where they will run. This can be done by running the command "*hadoop-daemon.sh start journalnode*" and waiting for the daemon to start on each of the relevant machines.
+After all of the necessary configuration options have been set, you must start the JournalNode daemons on the set of machines where they will run. This can be done by running the command "*hdfs --daemon start journalnode*" and waiting for the daemon to start on each of the relevant machines.
 
 Once the JournalNodes have been started, one must initially synchronize the two HA NameNodes' on-disk metadata.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2bab285/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh b/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
index 3d0e772..ac18089 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
@@ -42,10 +42,46 @@ else
 fi
 
 # start resourceManager
-echo "Starting resourcemanager" 
-"${HADOOP_YARN_HOME}/bin/yarn" --config "${HADOOP_CONF_DIR}" --daemon start resourcemanager
-# start nodeManager
-echo "Starting nodemanagers" 
-"${bin}/yarn-daemons.sh" --config "${HADOOP_CONF_DIR}"  start nodemanager
+HARM=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.enabled 2>&-)
+if [[ ${HARM} = "false" ]]; then
+  echo "Starting resourcemanager"
+  "${HADOOP_YARN_HOME}/bin/yarn" \
+      --config "${HADOOP_CONF_DIR}" \
+      --daemon start \
+      resourcemanager
+else
+  logicals=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.rm-ids 2>&-)
+  logicals=${logicals//,/ }
+  for id in ${logicals}
+  do
+      rmhost=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey "yarn.resourcemanager.hostname.${id}" 2>&-)
+      RMHOSTS="${RMHOSTS} ${rmhost}"
+  done
+  echo "Starting resourcemanagers on [${RMHOSTS}]"
+  "${HADOOP_YARN_HOME}/bin/yarn" \
+      --config "${HADOOP_CONF_DIR}" \
+      --daemon start \
+      --slaves \
+      --hostnames "${RMHOSTS}" \
+      resourcemanager
+fi
+
+# start nodemanager
+echo "Starting nodemanagers"
+"${HADOOP_YARN_HOME}/bin/yarn" \
+    --config "${HADOOP_CONF_DIR}" \
+    --slaves \
+    --daemon start \
+    nodemanager
+
 # start proxyserver
-#"${HADOOP_YARN_HOME}/bin/yarn" --config "${HADOOP_CONF_DIR}" --daemon start proxyserver
+PROXYSERVER=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey  yarn.web-proxy.address 2>&- | cut -f1 -d:)
+if [[ -n ${PROXYSERVER} ]]; then
+  "${HADOOP_YARN_HOME}/bin/yarn" \
+      --config "${HADOOP_CONF_DIR}" \
+      --slaves \
+      --hostnames "${PROXYSERVER}" \
+      --daemon start \
+      proxyserver
+fi
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2bab285/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh b/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
index 6feb4a7..d85b44e 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
@@ -16,8 +16,6 @@
 # limitations under the License.
 
 
-# Stop all yarn daemons.  Run this on master node.
-
 function hadoop_usage
 {
   echo "Usage: stop-yarn.sh [--config confdir]"
@@ -44,10 +42,47 @@ else
 fi
 
 # stop resourceManager
-echo "Stopping resourcemanager"
-"${HADOOP_YARN_HOME}/bin/yarn" --config "${HADOOP_CONF_DIR}" --daemon stop resourcemanager
-# stop nodeManager
+HARM=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.enabled 2>&-)
+if [[ ${HARM} = "false" ]]; then
+  echo "Stopping resourcemanager"
+  "${HADOOP_YARN_HOME}/bin/yarn" \
+      --config "${HADOOP_CONF_DIR}" \
+      --daemon stop \
+      resourcemanager
+else
+  logicals=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.rm-ids 2>&-)
+  logicals=${logicals//,/ }
+  for id in ${logicals}
+  do
+      rmhost=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey "yarn.resourcemanager.hostname.${id}" 2>&-)
+      RMHOSTS="${RMHOSTS} ${rmhost}"
+  done
+  echo "Stopping resourcemanagers on [${RMHOSTS}]"
+  "${HADOOP_YARN_HOME}/bin/yarn" \
+      --config "${HADOOP_CONF_DIR}" \
+      --daemon stop \
+      --slaves \
+      --hostnames "${RMHOSTS}" \
+      resourcemanager
+fi
+
+# stop nodemanager
 echo "Stopping nodemanagers"
-"${bin}/yarn-daemons.sh" --config "${HADOOP_CONF_DIR}"  stop nodemanager
+"${HADOOP_YARN_HOME}/bin/yarn" \
+    --config "${HADOOP_CONF_DIR}" \
+    --slaves \
+    --daemon stop \
+    nodemanager
+
 # stop proxyserver
-#"${HADOOP_YARN_HOME}/bin/yarn" --config "${HADOOP_CONF_DIR}" --daemon stop proxyserver
+PROXYSERVER=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey  yarn.web-proxy.address 2>&- | cut -f1 -d:)
+if [[ -n ${PROXYSERVER} ]]; then
+  echo "Stopping proxy server [${PROXYSERVER}]"
+  "${HADOOP_YARN_HOME}/bin/yarn" \
+      --config "${HADOOP_CONF_DIR}" \
+      --slaves \
+      --hostnames "${PROXYSERVER}" \
+      --daemon stop \
+      proxyserver
+fi
+