You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aw...@apache.org on 2015/07/31 23:53:41 UTC

[01/10] hadoop git commit: HDFS-8821. Explain message "Operation category X is not supported in state standby". Contributed by Gautam Gopalakrishnan.

Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12111 e395a3aad -> 21e21b990


HDFS-8821. Explain message "Operation category X is not supported in state standby". Contributed by Gautam Gopalakrishnan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5caa25b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5caa25b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5caa25b

Branch: refs/heads/HADOOP-12111
Commit: c5caa25b8f2953e2b7a9d2c9dcbdbf1fed95c10b
Parents: 88d8736
Author: Harsh J <ha...@cloudera.com>
Authored: Fri Jul 31 08:58:22 2015 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Fri Jul 31 08:58:22 2015 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java   | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5caa25b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7f04125..69e4dd6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -357,6 +357,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    HDFS-8821. Explain message "Operation category X is not supported
+    in state standby" (Gautam Gopalakrishnan via harsh)
+
     HDFS-3918. EditLogTailer shouldn't log WARN when other node
     is in standby mode (todd via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5caa25b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
index 60e8371..d782bdf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
@@ -82,8 +82,9 @@ public class StandbyState extends HAState {
         (op == OperationCategory.READ && context.allowStaleReads())) {
       return;
     }
+    String faq = ". Visit https://s.apache.org/sbnn-error";
     String msg = "Operation category " + op + " is not supported in state "
-        + context.getState();
+        + context.getState() + faq;
     throw new StandbyException(msg);
   }
 


[08/10] hadoop git commit: HADOOP-10854. unit tests for the shell scripts (aw)

Posted by aw...@apache.org.
HADOOP-10854. unit tests for the shell scripts (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a890a315
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a890a315
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a890a315

Branch: refs/heads/HADOOP-12111
Commit: a890a31529cc625326cd3749a4960ad7c02fc6fe
Parents: 666cafc
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Jul 31 14:34:48 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Fri Jul 31 14:34:48 2015 -0700

----------------------------------------------------------------------
 BUILDING.txt                                    |   4 +-
 dev-support/docker/Dockerfile                   |   8 +
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 hadoop-common-project/hadoop-common/pom.xml     |  33 +++
 .../src/main/bin/hadoop-functions.sh            | 114 +++++++---
 .../scripts/hadoop-functions_test_helper.bash   |  56 +++++
 .../src/test/scripts/hadoop_add_classpath.bats  | 100 +++++++++
 .../src/test/scripts/hadoop_add_colonpath.bats  |  96 +++++++++
 .../scripts/hadoop_add_common_to_classpath.bats |  71 +++++++
 .../test/scripts/hadoop_add_javalibpath.bats    |  98 +++++++++
 .../src/test/scripts/hadoop_add_ldlibpath.bats  |  97 +++++++++
 .../src/test/scripts/hadoop_add_param.bats      |  49 +++++
 .../hadoop_add_to_classpath_userpath.bats       |  98 +++++++++
 .../src/test/scripts/hadoop_basic_init.bats     |  94 +++++++++
 .../src/test/scripts/hadoop_bootstrap.bats      |  51 +++++
 .../src/test/scripts/hadoop_confdir.bats        |  92 +++++++++
 .../test/scripts/hadoop_deprecate_envvar.bats   |  32 +++
 .../src/test/scripts/hadoop_finalize.bats       | 206 +++++++++++++++++++
 .../scripts/hadoop_finalize_catalina_opts.bats  |  56 +++++
 .../test/scripts/hadoop_finalize_classpath.bats |  64 ++++++
 .../scripts/hadoop_finalize_hadoop_heap.bats    |  87 ++++++++
 .../scripts/hadoop_finalize_hadoop_opts.bats    |  52 +++++
 .../test/scripts/hadoop_finalize_libpaths.bats  |  30 +++
 .../src/test/scripts/hadoop_java_setup.bats     |  47 +++++
 .../src/test/scripts/hadoop_os_tricks.bats      |  34 +++
 .../src/test/scripts/hadoop_rotate_log.bats     |  52 +++++
 .../src/test/scripts/hadoop_shellprofile.bats   |  91 ++++++++
 .../src/test/scripts/hadoop_slaves.bats         |  37 ++++
 .../src/test/scripts/hadoop_ssh.bats            |  51 +++++
 .../scripts/hadoop_translate_cygwin_path.bats   |  48 +++++
 .../test/scripts/hadoop_validate_classname.bats |  26 +++
 .../hadoop-common/src/test/scripts/run-bats.sh  |  43 ++++
 32 files changed, 1988 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index 2aeade4..ee6e680 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -14,6 +14,8 @@ Requirements:
 * Jansson C XML parsing library ( if compiling libwebhdfs )
 * Linux FUSE (Filesystem in Userspace) version 2.6 or above ( if compiling fuse_dfs )
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
+* python (for releasedocs)
+* bats (for shell code testing)
 
 ----------------------------------------------------------------------------------
 The easiest way to get an environment with all the appropriate tools is by means
@@ -106,7 +108,7 @@ Maven build goals:
 
  * Clean                     : mvn clean [-Preleasedocs]
  * Compile                   : mvn compile [-Pnative]
- * Run tests                 : mvn test [-Pnative]
+ * Run tests                 : mvn test [-Pnative] [-Pshelltest]
  * Create JAR                : mvn package
  * Run findbugs              : mvn compile findbugs:findbugs
  * Run checkstyle            : mvn compile checkstyle:checkstyle

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/dev-support/docker/Dockerfile
----------------------------------------------------------------------
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index f761f8b..c8453cc 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -63,6 +63,14 @@ ENV FINDBUGS_HOME /opt/findbugs
 RUN apt-get install -y cabal-install
 RUN cabal update && cabal install shellcheck --global
 
+#####
+# bats
+#####
+
+RUN add-apt-repository ppa:duggan/bats --yes
+RUN apt-get update -qq
+RUN apt-get install -qq bats
+
 # Fixing the Apache commons / Maven dependency problem under Ubuntu:
 # See http://wiki.apache.org/commons/VfsProblems
 RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar .

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8d0795b..5020e91 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -51,6 +51,8 @@ Trunk (Unreleased)
     HADOOP-7947. Validate XMLs if a relevant tool is available, when using
     scripts (Kengo Seki via aw)
 
+    HADOOP-10854. unit tests for the shell scripts (aw)
+
   IMPROVEMENTS
 
     HADOOP-11203. Allow ditscp to accept bandwitdh in fraction MegaBytes

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 6b1388a..282735d 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -958,6 +958,39 @@
       </build>
     </profile>
 
+    <!-- profile to test shell code -->
+    <profile>
+      <id>shelltest</id>
+      <activation>
+        <activeByDefault>true</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+                <execution>
+                    <id>common-test-bats-driver</id>
+                    <phase>process-test-classes</phase>
+                    <goals>
+                        <goal>run</goal>
+                    </goals>
+                    <configuration>
+                      <target>
+                          <exec dir="src/test/scripts"
+                           executable="bash"
+                           failonerror="true">
+                           <arg value="./run-bats.sh" />
+                         </exec>
+                      </target>
+                    </configuration>
+                </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+
   </profiles>
 </project>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 5e2a2e8..b9b7919 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -358,6 +358,7 @@ function hadoop_import_shellprofiles
 
   if [[ -d "${HADOOP_LIBEXEC_DIR}/shellprofile.d" ]]; then
     files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*.sh)
+    hadoop_debug "shellprofiles: ${files1[*]}"
   else
     hadoop_error "WARNING: ${HADOOP_LIBEXEC_DIR}/shellprofile.d doesn't exist. Functionality may not work."
   fi
@@ -368,7 +369,8 @@ function hadoop_import_shellprofiles
 
   for i in "${files1[@]}" "${files2[@]}"
   do
-    if [[ -n "${i}" ]]; then
+    if [[ -n "${i}"
+      && -f "${i}" ]]; then
       hadoop_debug "Profiles: importing ${i}"
       . "${i}"
     fi
@@ -490,6 +492,26 @@ function hadoop_basic_init
     export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
   fi
 
+  if [[ ! -d "${HADOOP_COMMON_HOME}" ]]; then
+    hadoop_error "ERROR: Invalid HADOOP_COMMON_HOME"
+    exit 1
+  fi
+
+  if [[ ! -d "${HADOOP_HDFS_HOME}" ]]; then
+    hadoop_error "ERROR: Invalid HADOOP_HDFS_HOME"
+    exit 1
+  fi
+
+  if [[ ! -d "${HADOOP_YARN_HOME}" ]]; then
+    hadoop_error "ERROR: Invalid HADOOP_YARN_HOME"
+    exit 1
+  fi
+
+  if [[ ! -d "${HADOOP_MAPRED_HOME}" ]]; then
+    hadoop_error "ERROR: Invalid HADOOP_MAPRED_HOME"
+    exit 1
+  fi
+
   HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
   HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_PREFIX}/logs"}
   HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
@@ -670,7 +692,7 @@ function hadoop_common_slave_mode_execute
   # to prevent loops
   # Also remove --hostnames and --hosts along with arg values
   local argsSize=${#argv[@]};
-  for (( i = 0; i < $argsSize; i++ ))
+  for (( i = 0; i < argsSize; i++ ))
   do
     if [[ "${argv[$i]}" =~ ^--slaves$ ]]; then
       unset argv[$i]
@@ -681,6 +703,10 @@ function hadoop_common_slave_mode_execute
       unset argv[$i];
     fi
   done
+  if [[ ${QATESTMODE} = true ]]; then
+    echo "${argv[@]}"
+    return
+  fi
   hadoop_connect_to_hosts -- "${argv[@]}"
 }
 
@@ -727,8 +753,12 @@ function hadoop_add_param
   # delimited
   #
   if [[ ! ${!1} =~ $2 ]] ; then
-    # shellcheck disable=SC2086
-    eval $1="'${!1} $3'"
+    #shellcheck disable=SC2140
+    eval "$1"="'${!1} $3'"
+    if [[ ${!1:0:1} = ' ' ]]; then
+      #shellcheck disable=SC2140
+      eval "$1"="'${!1# }'"
+    fi
     hadoop_debug "$1 accepted $3"
   else
     hadoop_debug "$1 declined $3"
@@ -766,7 +796,8 @@ function hadoop_add_classpath
   # for wildcard at end, we can
   # at least check the dir exists
   if [[ $1 =~ ^.*\*$ ]]; then
-    local mp=$(dirname "$1")
+    local mp
+    mp=$(dirname "$1")
     if [[ ! -d "${mp}" ]]; then
       hadoop_debug "Rejected CLASSPATH: $1 (not a dir)"
       return 1
@@ -825,7 +856,7 @@ function hadoop_add_colonpath
       hadoop_debug "Prepend colonpath($1): $2"
     else
       # shellcheck disable=SC2086
-      eval $1+="'$2'"
+      eval $1+=":'$2'"
       hadoop_debug "Append colonpath($1): $2"
     fi
     return 0
@@ -864,11 +895,14 @@ function hadoop_add_javalibpath
 ## @return       1 = failure (doesn't exist or some other reason)
 function hadoop_add_ldlibpath
 {
+  local status
   # specialized function for a common use case
   hadoop_add_colonpath LD_LIBRARY_PATH "$1" "$2"
+  status=$?
 
   # note that we export this
   export LD_LIBRARY_PATH
+  return ${status}
 }
 
 ## @description  Add the common/core Hadoop components to the
@@ -876,21 +910,29 @@ function hadoop_add_ldlibpath
 ## @audience     private
 ## @stability    evolving
 ## @replaceable  yes
+## @returns      1 on failure, may exit
+## @returns      0 on success
 function hadoop_add_common_to_classpath
 {
   #
   # get all of the common jars+config in the path
   #
 
+  if [[ -z "${HADOOP_COMMON_HOME}"
+    || -z "${HADOOP_COMMON_DIR}"
+    || -z "${HADOOP_COMMON_LIB_JARS_DIR}" ]]; then
+    hadoop_debug "COMMON_HOME=${HADOOP_COMMON_HOME}"
+    hadoop_debug "COMMON_DIR=${HADOOP_COMMON_DIR}"
+    hadoop_debug "COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR}"
+    hadoop_error "ERROR: HADOOP_COMMON_HOME or related vars are not configured."
+    exit 1
+  fi
+
   # developers
   if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
     hadoop_add_classpath "${HADOOP_COMMON_HOME}/hadoop-common/target/classes"
   fi
 
-  if [[ -d "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}/webapps" ]]; then
-    hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"
-  fi
-
   hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"'/*'
   hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
 }
@@ -909,27 +951,27 @@ function hadoop_add_to_classpath_userpath
   # set env-var HADOOP_USER_CLASSPATH_FIRST
   # we'll also dedupe it, because we're cool like that.
   #
-  local c
-  local array
-  local i
-  local j
-  let c=0
+  declare -a array
+  declare -i c=0
+  declare -i j
+  declare -i i
+  declare idx
 
   if [[ -n "${HADOOP_CLASSPATH}" ]]; then
     # I wonder if Java runs on VMS.
-    for i in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
-      array[$c]=$i
-      let c+=1
+    for idx in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
+      array[${c}]=${idx}
+      ((c=c+1))
     done
-    let j=c-1
+    ((j=c-1))
 
     if [[ -z "${HADOOP_USE_CLIENT_CLASSLOADER}" ]]; then
       if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then
-        for ((i=j; i>=0; i--)); do
+        for ((i=0; i<=j; i++)); do
           hadoop_add_classpath "${array[$i]}" after
         done
       else
-        for ((i=0; i<=j; i++)); do
+        for ((i=j; i>=0; i--)); do
           hadoop_add_classpath "${array[$i]}" before
         done
       fi
@@ -951,18 +993,32 @@ function hadoop_os_tricks
     Darwin)
       if [[ -z "${JAVA_HOME}" ]]; then
         if [[ -x /usr/libexec/java_home ]]; then
-          export JAVA_HOME="$(/usr/libexec/java_home)"
+          JAVA_HOME="$(/usr/libexec/java_home)"
+          export JAVA_HOME
         else
-          export JAVA_HOME=/Library/Java/Home
+          JAVA_HOME=/Library/Java/Home
+          export JAVA_HOME
         fi
       fi
     ;;
     Linux)
-      bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
+
+      # Newer versions of glibc use an arena memory allocator that
+      # causes virtual # memory usage to explode. This interacts badly
+      # with the many threads that we use in Hadoop. Tune the variable
+      # down to prevent vmem explosion.
+      export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+      # we put this in QA test mode off so that non-Linux can test
+      if [[ "${QATESTMODE}" = true ]]; then
+        return
+      fi
 
       # NOTE! HADOOP_ALLOW_IPV6 is a developer hook.  We leave it
       # undocumented in hadoop-env.sh because we don't want users to
       # shoot themselves in the foot while devs make IPv6 work.
+
+      bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
+
       if [[ -n "${bindv6only}" ]] &&
          [[ "${bindv6only}" -eq "1" ]] &&
          [[ "${HADOOP_ALLOW_IPV6}" != "yes" ]]; then
@@ -971,11 +1027,6 @@ function hadoop_os_tricks
         hadoop_error "ERROR: For more info: http://wiki.apache.org/hadoop/HadoopIPv6"
         exit 1
       fi
-      # Newer versions of glibc use an arena memory allocator that
-      # causes virtual # memory usage to explode. This interacts badly
-      # with the many threads that we use in Hadoop. Tune the variable
-      # down to prevent vmem explosion.
-      export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
     ;;
     CYGWIN*)
       # Flag that we're running on Cygwin to trigger path translation later.
@@ -1019,7 +1070,7 @@ function hadoop_finalize_libpaths
   if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
     hadoop_translate_cygwin_path JAVA_LIBRARY_PATH
     hadoop_add_param HADOOP_OPTS java.library.path \
-    "-Djava.library.path=${JAVA_LIBRARY_PATH}"
+      "-Djava.library.path=${JAVA_LIBRARY_PATH}"
     export LD_LIBRARY_PATH
   fi
 }
@@ -1168,6 +1219,7 @@ function hadoop_exit_with_usage
   if [[ -z $exitcode ]]; then
     exitcode=1
   fi
+  # shellcheck disable=SC2034
   if declare -F hadoop_usage >/dev/null ; then
     hadoop_usage
   elif [[ -x /usr/bin/cowsay ]]; then
@@ -1464,6 +1516,7 @@ function hadoop_start_secure_daemon
   hadoop_rotate_log "${daemonoutfile}"
   hadoop_rotate_log "${daemonerrfile}"
 
+  # shellcheck disable=SC2153
   jsvc="${JSVC_HOME}/jsvc"
   if [[ ! -f "${jsvc}" ]]; then
     hadoop_error "JSVC_HOME is not set or set incorrectly. jsvc is required to run secure"
@@ -1490,6 +1543,7 @@ function hadoop_start_secure_daemon
     hadoop_error "ERROR:  Cannot write ${daemonname} pid ${privpidfile}."
   fi
 
+  # shellcheck disable=SC2086
   exec "${jsvc}" \
     "-Dproc_${daemonname}" \
     -outfile "${daemonoutfile}" \

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
new file mode 100755
index 0000000..f718345
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop-functions_test_helper.bash
@@ -0,0 +1,56 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+setup() {
+
+  TMP=../../../target/test-dir/bats.$$.${RANDOM}
+  mkdir -p ${TMP}
+  TMP=$(cd -P -- "${TMP}" >/dev/null && pwd -P)
+  export TMP
+  TESTBINDIR=$(cd -P -- "$(pwd)" >/dev/null && pwd -P)
+  HADOOP_LIBEXEC_DIR=${TESTBINDIR}/../../main/bin
+  HADOOP_LIBEXEC_DIR=$(cd -P -- "${HADOOP_LIBEXEC_DIR}" >/dev/null && pwd -P)
+
+  # shellcheck disable=SC2034
+  HADOOP_SHELL_SCRIPT_DEBUG=true
+  unset HADOOP_CONF_DIR
+  unset HADOOP_HOME
+  unset HADOOP_PREFIX
+
+  echo "bindir: ${TESTBINDIR}" 2>&1
+
+  mkdir -p "${TMP}"
+
+  # shellcheck disable=SC2034
+  QATESTMODE=true
+
+  . ../../main/bin/hadoop-functions.sh
+  pushd "${TMP}" >/dev/null
+}
+
+teardown() {
+  popd >/dev/null
+  rm -rf "${TMP}"
+}
+
+
+strstr() {
+  if [ "${1#*$2}" != "${1}" ]; then
+    echo true
+  else
+    echo false
+  fi
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_classpath.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_classpath.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_classpath.bats
new file mode 100644
index 0000000..8bc50d0
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_classpath.bats
@@ -0,0 +1,100 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_add_classpath (simple not exist)" {
+  run hadoop_add_classpath testvar
+  [ "${status}" -eq 1 ]
+}
+
+@test "hadoop_add_classpath (simple wildcard not exist)" {
+  run hadoop_add_classpath testvar/*
+  [ "${status}" -eq 1 ]
+}
+
+@test "hadoop_add_classpath (simple exist)" {
+  run hadoop_add_classpath "${TMP}"
+  [ "${status}" -eq 0 ]
+}
+
+@test "hadoop_add_classpath (simple wildcard exist)" {
+  run hadoop_add_classpath "${TMP}/*"
+  [ "${status}" -eq 0 ]
+}
+
+@test "hadoop_add_classpath (simple dupecheck)" {
+  hadoop_add_classpath "${TMP}/*"
+  hadoop_add_classpath "${TMP}/*"
+  echo ">${CLASSPATH}<"
+  [ "${CLASSPATH}" = "${TMP}/*" ]
+}
+
+@test "hadoop_add_classpath (default order)" {
+  hadoop_add_classpath "${TMP}/*"
+  hadoop_add_classpath "/tmp"
+  echo ">${CLASSPATH}<"
+  [ "${CLASSPATH}" = "${TMP}/*:/tmp" ]
+}
+
+@test "hadoop_add_classpath (after order)" {
+  hadoop_add_classpath "${TMP}/*"
+  hadoop_add_classpath "/tmp" after
+  echo ">${CLASSPATH}<"
+  [ "${CLASSPATH}" = "${TMP}/*:/tmp" ]
+}
+
+@test "hadoop_add_classpath (before order)" {
+  hadoop_add_classpath "${TMP}/*"
+  hadoop_add_classpath "/tmp" before
+  echo ">${CLASSPATH}<"
+  [ "${CLASSPATH}" = "/tmp:${TMP}/*" ]
+}
+
+@test "hadoop_add_classpath (simple dupecheck 2)" {
+  hadoop_add_classpath "${TMP}/*"
+  hadoop_add_classpath "/tmp"
+  hadoop_add_classpath "${TMP}/*"
+  echo ">${CLASSPATH}<"
+  [ "${CLASSPATH}" = "${TMP}/*:/tmp" ]
+}
+
+@test "hadoop_add_classpath (dupecheck 3)" {
+  hadoop_add_classpath "${TMP}/*"
+  hadoop_add_classpath "/tmp" before
+  hadoop_add_classpath "${TMP}/*"
+  hadoop_add_classpath "/tmp" after
+  echo ">${CLASSPATH}<"
+  [ "${CLASSPATH}" = "/tmp:${TMP}/*" ]
+}
+
+@test "hadoop_add_classpath (complex ordering)" {
+  local j
+  local style="after"
+
+  # 1 -> 2:1 -> 2:1:3 -> 4:2:1:3 -> 4:2:1:3:5
+
+  for j in {1..5}; do
+    mkdir ${TMP}/${j}
+    hadoop_add_classpath "${TMP}/${j}" "${style}"
+    if [ "${style}" = "after" ]; then
+      style=before
+    else
+      style=after
+    fi
+  done
+  echo ">${CLASSPATH}<"
+  [ "${CLASSPATH}" = "${TMP}/4:${TMP}/2:${TMP}/1:${TMP}/3:${TMP}/5" ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_colonpath.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_colonpath.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_colonpath.bats
new file mode 100644
index 0000000..e6c59ad
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_colonpath.bats
@@ -0,0 +1,96 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_add_colonpath (simple not exist)" {
+  run hadoop_add_colonpath testvar
+  [ "${status}" -eq 1 ]
+}
+
+@test "hadoop_add_colonpath (simple exist)" {
+  run hadoop_add_colonpath testvar "${TMP}"
+  [ "${status}" -eq 0 ]
+}
+
+@test "hadoop_add_colonpath (simple dupecheck)" {
+  set +e
+  hadoop_add_colonpath testvar "${TMP}"
+  hadoop_add_colonpath testvar "${TMP}"
+  set -e
+  echo ">${testvar}<"
+  [ "${testvar}" = "${TMP}" ]
+}
+
+@test "hadoop_add_colonpath (default order)" {
+  hadoop_add_colonpath testvar "${TMP}"
+  hadoop_add_colonpath testvar "/tmp"
+  echo ">${testvar}<"
+  [ "${testvar}" = "${TMP}:/tmp" ]
+}
+
+@test "hadoop_add_colonpath (after order)" {
+  hadoop_add_colonpath testvar "${TMP}"
+  hadoop_add_colonpath testvar "/tmp" after
+  echo ">${testvar}<"
+  [ "${testvar}" = "${TMP}:/tmp" ]
+}
+
+@test "hadoop_add_colonpath (before order)" {
+  hadoop_add_colonpath testvar "${TMP}"
+  hadoop_add_colonpath testvar "/tmp" before
+  echo ">${testvar}<"
+  [ "${testvar}" = "/tmp:${TMP}" ]
+}
+
+@test "hadoop_add_colonpath (simple dupecheck 2)" {
+  set +e
+  hadoop_add_colonpath testvar "${TMP}"
+  hadoop_add_colonpath testvar "/tmp"
+  hadoop_add_colonpath testvar "${TMP}"
+  set -e
+  echo ">${testvar}<"
+  [ "${testvar}" = "${TMP}:/tmp" ]
+}
+
+@test "hadoop_add_colonpath (dupecheck 3)" {
+  set +e
+  hadoop_add_colonpath testvar "${TMP}"
+  hadoop_add_colonpath testvar "/tmp" before
+  hadoop_add_colonpath testvar "${TMP}"
+  hadoop_add_colonpath testvar "/tmp" after
+  set -e
+  echo ">${testvar}<"
+  [ "${testvar}" = "/tmp:${TMP}" ]
+}
+
+@test "hadoop_add_colonpath (complex ordering)" {
+  local j
+  local style="after"
+
+  # 1 -> 2:1 -> 2:1:3 -> 4:2:1:3 -> 4:2:1:3:5
+
+  for j in {1..5}; do
+    mkdir ${TMP}/${j}
+    hadoop_add_colonpath testvar "${TMP}/${j}" "${style}"
+    if [ "${style}" = "after" ]; then
+      style=before
+    else
+      style=after
+    fi
+  done
+  echo ">${testvar}<"
+  [ "${testvar}" = "${TMP}/4:${TMP}/2:${TMP}/1:${TMP}/3:${TMP}/5" ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_common_to_classpath.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_common_to_classpath.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_common_to_classpath.bats
new file mode 100644
index 0000000..14e75a6
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_common_to_classpath.bats
@@ -0,0 +1,71 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+freetheclasses () {
+  local j
+
+  for j in HADOOP_CLASSPATH  \
+        HADOOP_ENABLE_BUILD_PATHS \
+        CLASSPATH HADOOP_COMMON_DIR \
+        HADOOP_COMMON_HOME \
+        HADOOP_COMMON_LIB_JARS_DIR \
+        HADOOP_ENABLE_BUILD_PATHS ; do
+      unset ${!j}
+  done
+}
+
+createdirs () {
+  local j
+
+  for j in hadoop-common/target/classes \
+           commondir/webapps commonlibjars ; do
+    mkdir -p "${TMP}/${j}"
+    touch "${TMP}/${j}/fake.jar"
+  done
+  HADOOP_COMMON_HOME=${TMP}
+  HADOOP_COMMON_DIR=commondir
+  HADOOP_COMMON_LIB_JARS_DIR=commonlibjars
+}
+
+@test "hadoop_add_common_to_classpath (negative)" {
+   freetheclasses
+   createdirs
+   unset HADOOP_COMMON_HOME
+   run hadoop_add_common_to_classpath
+   [ "${status}" -eq 1 ]
+}
+
+@test "hadoop_add_common_to_classpath (positive)" {
+   freetheclasses
+   createdirs
+   set +e
+   hadoop_add_common_to_classpath
+   set -e
+   echo ">${CLASSPATH}<"
+   [ "${CLASSPATH}" = "${TMP}/commonlibjars/*:${TMP}/commondir/*" ]
+}
+
+@test "hadoop_add_common_to_classpath (build paths)" {
+   freetheclasses
+   createdirs
+   HADOOP_ENABLE_BUILD_PATHS=true
+   set +e
+   hadoop_add_common_to_classpath
+   set -e
+   echo ">${CLASSPATH}<"
+   [ "${CLASSPATH}" = "${TMP}/hadoop-common/target/classes:${TMP}/commonlibjars/*:${TMP}/commondir/*" ]
+ }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_javalibpath.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_javalibpath.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_javalibpath.bats
new file mode 100644
index 0000000..b17b546
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_javalibpath.bats
@@ -0,0 +1,98 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_add_javalibpath (simple not exist)" {
+  run hadoop_add_javalibpath "${TMP}/foo"
+  [ "${status}" -eq 1 ]
+}
+
+
+@test "hadoop_add_javalibpath (simple exist)" {
+  run hadoop_add_javalibpath "${TMP}"
+  [ "${status}" -eq 0 ]
+}
+
+
+@test "hadoop_add_javalibpath (simple dupecheck)" {
+  set +e
+  hadoop_add_javalibpath "${TMP}"
+  hadoop_add_javalibpath "${TMP}"
+  set -e
+  echo ">${JAVA_LIBRARY_PATH}<"
+  [ "${JAVA_LIBRARY_PATH}" = "${TMP}" ]
+}
+
+@test "hadoop_add_javalibpath (default order)" {
+  hadoop_add_javalibpath "${TMP}"
+  hadoop_add_javalibpath "/tmp"
+  echo ">${JAVA_LIBRARY_PATH}<"
+  [ "${JAVA_LIBRARY_PATH}" = "${TMP}:/tmp" ]
+}
+
+@test "hadoop_add_javalibpath (after order)" {
+  hadoop_add_javalibpath "${TMP}"
+  hadoop_add_javalibpath "/tmp" after
+  echo ">${JAVA_LIBRARY_PATH}<"
+  [ "${JAVA_LIBRARY_PATH}" = "${TMP}:/tmp" ]
+}
+
+@test "hadoop_add_javalibpath (before order)" {
+  hadoop_add_javalibpath "${TMP}"
+  hadoop_add_javalibpath "/tmp" before
+  echo ">${JAVA_LIBRARY_PATH}<"
+  [ "${JAVA_LIBRARY_PATH}" = "/tmp:${TMP}" ]
+}
+
+@test "hadoop_add_javalibpath (simple dupecheck 2)" {
+  set +e
+  hadoop_add_javalibpath "${TMP}"
+  hadoop_add_javalibpath "/tmp"
+  hadoop_add_javalibpath "${TMP}"
+  set -e
+  echo ">${JAVA_LIBRARY_PATH}<"
+  [ "${JAVA_LIBRARY_PATH}" = "${TMP}:/tmp" ]
+}
+
+@test "hadoop_add_javalibpath (dupecheck 3)" {
+  set +e
+  hadoop_add_javalibpath "${TMP}"
+  hadoop_add_javalibpath "/tmp" before
+  hadoop_add_javalibpath "${TMP}"
+  hadoop_add_javalibpath "/tmp" after
+  set -e
+  echo ">${JAVA_LIBRARY_PATH}<"
+  [ "${JAVA_LIBRARY_PATH}" = "/tmp:${TMP}" ]
+}
+
+@test "hadoop_add_javalibpath (complex ordering)" {
+  local j
+  local style="after"
+
+  # 1 -> 2:1 -> 2:1:3 -> 4:2:1:3 -> 4:2:1:3:5
+
+  for j in {1..5}; do
+    mkdir ${TMP}/${j}
+    hadoop_add_javalibpath "${TMP}/${j}" "${style}"
+    if [ "${style}" = "after" ]; then
+      style=before
+    else
+      style=after
+    fi
+  done
+  echo ">${JAVA_LIBRARY_PATH}<"
+  [ "${JAVA_LIBRARY_PATH}" = "${TMP}/4:${TMP}/2:${TMP}/1:${TMP}/3:${TMP}/5" ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_ldlibpath.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_ldlibpath.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_ldlibpath.bats
new file mode 100644
index 0000000..4f909e2
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_ldlibpath.bats
@@ -0,0 +1,97 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_add_ldlibpath (simple not exist)" {
+  run hadoop_add_ldlibpath ${TMP}/foo
+  [ "${status}" -eq 1 ]
+}
+
+
+@test "hadoop_add_ldlibpath (simple exist)" {
+  run hadoop_add_ldlibpath "${TMP}"
+  [ "${status}" -eq 0 ]
+}
+
+@test "hadoop_add_ldlibpath (simple dupecheck)" {
+  set +e
+  hadoop_add_ldlibpath "${TMP}"
+  hadoop_add_ldlibpath "${TMP}"
+  set -e
+  echo ">${LD_LIBRARY_PATH}<"
+  [ "${LD_LIBRARY_PATH}" = "${TMP}" ]
+}
+
+@test "hadoop_add_ldlibpath (default order)" {
+  hadoop_add_ldlibpath "${TMP}"
+  hadoop_add_ldlibpath "/tmp"
+  echo ">${LD_LIBRARY_PATH}<"
+  [ "${LD_LIBRARY_PATH}" = "${TMP}:/tmp" ]
+}
+
+@test "hadoop_add_ldlibpath (after order)" {
+  hadoop_add_ldlibpath "${TMP}"
+  hadoop_add_ldlibpath "/tmp" after
+  echo ">${LD_LIBRARY_PATH}<"
+  [ "${LD_LIBRARY_PATH}" = "${TMP}:/tmp" ]
+}
+
+@test "hadoop_add_ldlibpath (before order)" {
+  hadoop_add_ldlibpath "${TMP}"
+  hadoop_add_ldlibpath "/tmp" before
+  echo ">${LD_LIBRARY_PATH}<"
+  [ "${LD_LIBRARY_PATH}" = "/tmp:${TMP}" ]
+}
+
+@test "hadoop_add_ldlibpath (simple dupecheck 2)" {
+  set +e
+  hadoop_add_ldlibpath "${TMP}"
+  hadoop_add_ldlibpath "/tmp"
+  hadoop_add_ldlibpath "${TMP}"
+  set -e
+  echo ">${LD_LIBRARY_PATH}<"
+  [ "${LD_LIBRARY_PATH}" = "${TMP}:/tmp" ]
+}
+
+@test "hadoop_add_ldlibpath (dupecheck 3)" {
+  set +e
+  hadoop_add_ldlibpath "${TMP}"
+  hadoop_add_ldlibpath "/tmp" before
+  hadoop_add_ldlibpath "${TMP}"
+  hadoop_add_ldlibpath "/tmp" after
+  set -e
+  echo ">${LD_LIBRARY_PATH}<"
+  [ "${LD_LIBRARY_PATH}" = "/tmp:${TMP}" ]
+}
+
+@test "hadoop_add_ldlibpath (complex ordering)" {
+  local j
+  local style="after"
+
+  # 1 -> 2:1 -> 2:1:3 -> 4:2:1:3 -> 4:2:1:3:5
+
+  for j in {1..5}; do
+    mkdir ${TMP}/${j}
+    hadoop_add_ldlibpath "${TMP}/${j}" "${style}"
+    if [ "${style}" = "after" ]; then
+      style=before
+    else
+      style=after
+    fi
+  done
+  echo ">${LD_LIBRARY_PATH}<"
+  [ "${LD_LIBRARY_PATH}" = "${TMP}/4:${TMP}/2:${TMP}/1:${TMP}/3:${TMP}/5" ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_param.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_param.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_param.bats
new file mode 100644
index 0000000..5d65db0
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_param.bats
@@ -0,0 +1,49 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_add_param (positive 1)" {
+  hadoop_add_param testvar foo foo
+  echo ">${testvar}<"
+  [ "${testvar}" = "foo" ]
+}
+
+@test "hadoop_add_param (negative)" {
+  hadoop_add_param testvar foo foo
+  hadoop_add_param testvar foo foo
+  echo ">${testvar}<"
+  [ "${testvar}" = "foo" ]
+}
+
+@test "hadoop_add_param (positive 2)" {
+  hadoop_add_param testvar foo foo
+  hadoop_add_param testvar foo foo
+  hadoop_add_param testvar bar bar
+  echo ">${testvar}<"
+  [ "${testvar}" = "foo bar" ]
+}
+
+@test "hadoop_add_param (positive 3)" {
+  hadoop_add_param testvar foo foo
+  hadoop_add_param testvar foo foo
+  hadoop_add_param testvar bar bar
+  hadoop_add_param testvar bar bar
+  hadoop_add_param testvar baz baz
+  hadoop_add_param testvar baz baz
+
+  echo ">${testvar}<"
+  [ "${testvar}" = "foo bar baz" ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_to_classpath_userpath.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_to_classpath_userpath.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_to_classpath_userpath.bats
new file mode 100644
index 0000000..4d6667f
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_to_classpath_userpath.bats
@@ -0,0 +1,98 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+freetheclasses () {
+  local j
+
+  for j in HADOOP_CLASSPATH  \
+      HADOOP_USE_CLIENT_CLASSLOADER \
+      HADOOP_USER_CLASSPATH_FIRST \
+      CLASSPATH; do
+      unset ${!j}
+  done
+}
+
+createdirs () {
+  local j
+
+  for j in new old foo bar baz; do
+    mkdir -p "${TMP}/${j}"
+  done
+}
+
+@test "hadoop_add_to_classpath_userpath (nothing)" {
+   freetheclasses
+   hadoop_add_to_classpath_userpath
+   [ -z "${CLASSPATH}" ]
+}
+
+@test "hadoop_add_to_classpath_userpath (none)" {
+   freetheclasses
+   CLASSPATH=test
+   hadoop_add_to_classpath_userpath
+   [ "${CLASSPATH}" = "test" ]
+}
+
+@test "hadoop_add_to_classpath_userpath (only)" {
+   freetheclasses
+   createdirs
+   HADOOP_CLASSPATH="${TMP}/new"
+   hadoop_add_to_classpath_userpath
+   [ "${CLASSPATH}" = "${TMP}/new" ]
+}
+
+@test "hadoop_add_to_classpath_userpath (classloader)" {
+   freetheclasses
+   createdirs
+   HADOOP_CLASSPATH="${TMP}/new"
+   HADOOP_USE_CLIENT_CLASSLOADER="true"
+   hadoop_add_to_classpath_userpath
+   [ -z "${CLASSPATH}" ]
+}
+
+@test "hadoop_add_to_classpath_userpath (1+1 dupe)" {
+   freetheclasses
+   createdirs
+   CLASSPATH=${TMP}/foo
+   HADOOP_CLASSPATH=${TMP}/foo
+   HADOOP_USER_CLASSPATH_FIRST=""
+   hadoop_add_to_classpath_userpath
+   echo ">${CLASSPATH}<"
+   [ ${CLASSPATH} = "${TMP}/foo" ]
+}
+
+@test "hadoop_add_to_classpath_userpath (3+2 after)" {
+   freetheclasses
+   createdirs
+   CLASSPATH=${TMP}/foo:${TMP}/bar:${TMP}/baz
+   HADOOP_CLASSPATH=${TMP}/new:${TMP}/old
+   HADOOP_USER_CLASSPATH_FIRST=""
+   hadoop_add_to_classpath_userpath
+   echo ">${CLASSPATH}<"
+   [ ${CLASSPATH} = "${TMP}/foo:${TMP}/bar:${TMP}/baz:${TMP}/new:${TMP}/old" ]
+}
+
+@test "hadoop_add_to_classpath_userpath (3+2 before)" {
+   freetheclasses
+   createdirs
+   CLASSPATH=${TMP}/foo:${TMP}/bar:${TMP}/baz
+   HADOOP_CLASSPATH=${TMP}/new:${TMP}/old
+   HADOOP_USER_CLASSPATH_FIRST="true"
+   hadoop_add_to_classpath_userpath
+   echo ">${CLASSPATH}<"
+   [ ${CLASSPATH} = "${TMP}/new:${TMP}/old:${TMP}/foo:${TMP}/bar:${TMP}/baz" ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_basic_init.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_basic_init.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_basic_init.bats
new file mode 100644
index 0000000..74e2497
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_basic_init.bats
@@ -0,0 +1,94 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+
+
+basicinitsetup () {
+  local j
+
+  testvars="HADOOP_IDENT_STRING \
+        HADOOP_LOG_DIR \
+        HADOOP_LOGFILE \
+        HADOOP_LOGLEVEL \
+        HADOOP_NICENESS \
+        HADOOP_STOP_TIMEOUT \
+        HADOOP_PID_DIR \
+        HADOOP_ROOT_LOGGER \
+        HADOOP_DAEMON_ROOT_LOGGER \
+        HADOOP_SECURITY_LOGGER \
+        HADOOP_SSH_OPTS \
+        HADOOP_SECURE_LOG_DIR \
+        HADOOP_SECURE_PID_DIR \
+        HADOOP_SSH_PARALLEL"
+
+  dirvars="HADOOP_COMMON_HOME \
+        HADOOP_MAPRED_HOME \
+        HADOOP_HDFS_HOME \
+        HADOOP_YARN_HOME"
+
+  for j in ${testvars}; do
+    unset ${!j}
+  done
+
+  HADOOP_PREFIX=${TMP}
+}
+
+check_var_values () {
+  for j in ${testvars}; do
+    echo "Verifying ${j} has a value"
+    [ -n "${!j}" ]
+  done
+}
+
+@test "hadoop_basic_init (bad dir errors)" {
+  local j
+  local i
+  # we need to do these in the same order for
+  # the unit test, so that the tests are easier
+  # to write/test
+  basicinitsetup
+  for j in ${dirvars}; do
+    echo "testing ${j}"
+    i=${TMP}/${j}
+    mkdir -p "${i}"
+    #shellcheck disable=SC2086
+    eval ${j}=${i}
+    hadoop_basic_init
+    echo "Verifying $j has >${i}< >${!j}<"
+    [ ${!j} = ${i} ]
+  done
+}
+
+
+@test "hadoop_basic_init (no non-dir overrides)" {
+  basicinitsetup
+  hadoop_basic_init
+  check_var_values
+}
+
+@test "hadoop_basic_init (test non-dir overrides)" {
+  local j
+  for j in ${testvars}; do
+    basicinitsetup
+    echo testing ${j}
+    eval ${j}=foo
+    hadoop_basic_init
+    check_var_values
+    echo "Verifying $j has foo >${!j}<"
+    [ ${j} = foo ]
+  done
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_bootstrap.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_bootstrap.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_bootstrap.bats
new file mode 100644
index 0000000..0fd5d21
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_bootstrap.bats
@@ -0,0 +1,51 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_deprecate_envvar (no libexec)" {
+  unset HADOOP_LIBEXEC_DIR
+  run hadoop_bootstrap
+  [ "${status}" -eq 1 ]
+}
+
+@test "hadoop_deprecate_envvar (libexec)" {
+  unset   HADOOP_PREFIX
+  unset   HADOOP_COMMON_DIR
+  unset   HADOOP_COMMON_LIB_JARS_DIR
+  unset   HDFS_DIR
+  unset   HDFS_LIB_JARS_DIR
+  unset   YARN_DIR
+  unset   YARN_LIB_JARS_DIR
+  unset   MAPRED_DIR
+  unset   MAPRED_LIB_JARS_DIR
+  unset   TOOL_PATH
+  unset   HADOOP_OS_TYPE
+
+  hadoop_bootstrap
+
+  # all of these should be set
+  [ -n ${HADOOP_PREFIX} ]
+  [ -n ${HADOOP_COMMON_DIR} ]
+  [ -n ${HADOOP_COMMON_LIB_JARS_DIR} ]
+  [ -n ${HDFS_DIR} ]
+  [ -n ${HDFS_LIB_JARS_DIR} ]
+  [ -n ${YARN_DIR} ]
+  [ -n ${YARN_LIB_JARS_DIR} ]
+  [ -n ${MAPRED_DIR} ]
+  [ -n ${MAPRED_LIB_JARS_DIR} ]
+  [ -n ${TOOL_PATH} ]
+  [ -n ${HADOOP_OS_TYPE} ]
+} 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_confdir.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_confdir.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_confdir.bats
new file mode 100644
index 0000000..3e42da9
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_confdir.bats
@@ -0,0 +1,92 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+create_fake_dirs () {
+  HADOOP_PREFIX=${TMP}
+  for j in conf etc/hadoop; do
+    mkdir -p "${HADOOP_PREFIX}/${j}"
+    echo "unittest=${j}" > "${HADOOP_PREFIX}/${j}/hadoop-env.sh"
+  done
+}
+
+@test "hadoop_find_confdir (default)" {
+  create_fake_dirs
+  hadoop_find_confdir
+  [ -n "${HADOOP_CONF_DIR}" ]
+}
+
+@test "hadoop_find_confdir (bw compat: conf)" {
+  create_fake_dirs
+  hadoop_find_confdir
+  echo ">${HADOOP_CONF_DIR}< >${HADOOP_PREFIX}/conf<"
+  [ "${HADOOP_CONF_DIR}" = ${HADOOP_PREFIX}/conf ]
+}
+
+@test "hadoop_find_confdir (etc/hadoop)" {
+  create_fake_dirs
+  rm -rf "${HADOOP_PREFIX}/conf"
+  hadoop_find_confdir
+  [ "${HADOOP_CONF_DIR}" = ${HADOOP_PREFIX}/etc/hadoop ]
+}
+
+@test "hadoop_verify_confdir (negative) " {
+  create_fake_dirs
+  HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
+  run hadoop_verify_confdir
+  [ -n "${output}" ]
+}
+
+@test "hadoop_verify_confdir (positive) " {
+  create_fake_dirs
+  HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
+  touch "${HADOOP_CONF_DIR}/log4j.properties"
+  run hadoop_verify_confdir
+  [ -z "${output}" ]
+}
+
+@test "hadoop_exec_hadoopenv (positive) " {
+  create_fake_dirs
+  HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
+  hadoop_exec_hadoopenv
+  [ -n "${HADOOP_ENV_PROCESSED}" ]
+  [ "${unittest}" = conf ]
+}
+
+@test "hadoop_exec_hadoopenv (negative) " {
+  create_fake_dirs
+  HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
+  HADOOP_ENV_PROCESSED=true
+  hadoop_exec_hadoopenv
+  [ -z "${unittest}" ]
+}
+
+@test "hadoop_exec_userfuncs" {
+  create_fake_dirs
+  HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
+  echo "unittest=userfunc" > "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
+  hadoop_exec_userfuncs
+  [ "${unittest}" = "userfunc" ]
+}
+
+@test "hadoop_exec_hadooprc" {
+  HOME=${TMP}
+  echo "unittest=hadooprc" > "${TMP}/.hadooprc"
+  hadoop_exec_hadooprc
+  [ ${unittest} = "hadooprc" ]
+}
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_deprecate_envvar.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_deprecate_envvar.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_deprecate_envvar.bats
new file mode 100644
index 0000000..ae02c1f
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_deprecate_envvar.bats
@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_deprecate_envvar (replace)" {
+  OLD=value1
+  NEW=value2
+  hadoop_deprecate_envvar OLD NEW
+  [ "${NEW}" = "${OLD}" ]
+}
+
+
+@test "hadoop_deprecate_envvar (no replace)" {
+  OLD=
+  NEW=value2
+  hadoop_deprecate_envvar OLD NEW
+  [ "${NEW}" = value2 ]
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize.bats
new file mode 100644
index 0000000..668c115
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize.bats
@@ -0,0 +1,206 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_finalize (shellprofiles)" {
+  HADOOP_IS_CYGWIN=false
+
+  hadoop_shellprofiles_finalize () { testvar=shell; }
+  hadoop_finalize_classpath () { true; }
+  hadoop_finalize_libpaths () { true; }
+  hadoop_finalize_hadoop_heap () { true; }
+  hadoop_finalize_hadoop_opts () { true; }
+  hadoop_translate_cygwin_path () { true; }
+
+  hadoop_finalize
+
+  [ "${testvar}" = "shell" ];
+}
+
+@test "hadoop_finalize (classpath)" {
+  HADOOP_IS_CYGWIN=false
+
+  hadoop_shellprofiles_finalize () { true; }
+  hadoop_finalize_classpath () {  testvar=class; }
+  hadoop_finalize_libpaths () { true; }
+  hadoop_finalize_hadoop_heap () { true; }
+  hadoop_finalize_hadoop_opts () { true; }
+  hadoop_translate_cygwin_path () { true; }
+
+  hadoop_finalize
+
+  [ "${testvar}" = "class" ];
+}
+
+@test "hadoop_finalize (libpaths)" {
+  HADOOP_IS_CYGWIN=false
+
+  hadoop_shellprofiles_finalize () { true; }
+  hadoop_finalize_classpath () {  true; }
+  hadoop_finalize_libpaths () { testvar=libpaths; }
+  hadoop_finalize_hadoop_heap () { true; }
+  hadoop_finalize_hadoop_opts () { true; }
+  hadoop_translate_cygwin_path () { true; }
+
+  hadoop_finalize
+
+  [ "${testvar}" = "libpaths" ];
+}
+
+
+@test "hadoop_finalize (heap)" {
+  HADOOP_IS_CYGWIN=false
+
+  hadoop_shellprofiles_finalize () { true; }
+  hadoop_finalize_classpath () {  true; }
+  hadoop_finalize_libpaths () { true; }
+  hadoop_finalize_hadoop_heap () { testvar=heap; }
+  hadoop_finalize_hadoop_opts () { true; }
+  hadoop_translate_cygwin_path () { true; }
+
+  hadoop_finalize
+
+  [ "${testvar}" = "heap" ];
+}
+
+@test "hadoop_finalize (opts)" {
+  HADOOP_IS_CYGWIN=false
+
+  hadoop_shellprofiles_finalize () { true; }
+  hadoop_finalize_classpath () {  true; }
+  hadoop_finalize_libpaths () { true; }
+  hadoop_finalize_hadoop_heap () { true; }
+  hadoop_finalize_hadoop_opts () { testvar=opts; }
+  hadoop_translate_cygwin_path () { true; }
+
+  hadoop_finalize
+
+  [ "${testvar}" = "opts" ];
+}
+
+@test "hadoop_finalize (cygwin prefix)" {
+  HADOOP_IS_CYGWIN=false
+
+  hadoop_shellprofiles_finalize () { true; }
+  hadoop_finalize_classpath () {  true; }
+  hadoop_finalize_libpaths () { true; }
+  hadoop_finalize_hadoop_heap () { true; }
+  hadoop_finalize_hadoop_opts () { true; }
+  hadoop_translate_cygwin_path () {
+    if [ $1 = HADOOP_PREFIX ]; then
+      testvar=prefix;
+    fi
+  }
+
+  hadoop_finalize
+
+  [ "${testvar}" = "prefix" ];
+}
+
+@test "hadoop_finalize (cygwin conf dir)" {
+  HADOOP_IS_CYGWIN=false
+
+  hadoop_shellprofiles_finalize () { true; }
+  hadoop_finalize_classpath () {  true; }
+  hadoop_finalize_libpaths () { true; }
+  hadoop_finalize_hadoop_heap () { true; }
+  hadoop_finalize_hadoop_opts () { true; }
+  hadoop_translate_cygwin_path () {
+    if [ $1 = HADOOP_CONF_DIR ]; then
+      testvar=confdir;
+    fi
+  }
+
+  hadoop_finalize
+
+  [ "${testvar}" = "confdir" ];
+}
+
+@test "hadoop_finalize (cygwin common)" {
+  HADOOP_IS_CYGWIN=false
+
+  hadoop_shellprofiles_finalize () { true; }
+  hadoop_finalize_classpath () {  true; }
+  hadoop_finalize_libpaths () { true; }
+  hadoop_finalize_hadoop_heap () { true; }
+  hadoop_finalize_hadoop_opts () { true; }
+  hadoop_translate_cygwin_path () {
+    if [ $1 = HADOOP_COMMON_HOME ]; then
+      testvar=common;
+    fi
+  }
+
+  hadoop_finalize
+
+  [ "${testvar}" = "common" ];
+}
+
+@test "hadoop_finalize (cygwin hdfs)" {
+  HADOOP_IS_CYGWIN=false
+
+  hadoop_shellprofiles_finalize () { true; }
+  hadoop_finalize_classpath () {  true; }
+  hadoop_finalize_libpaths () { true; }
+  hadoop_finalize_hadoop_heap () { true; }
+  hadoop_finalize_hadoop_opts () { true; }
+  hadoop_translate_cygwin_path () {
+    if [ $1 = HADOOP_HDFS_HOME ]; then
+      testvar=hdfs;
+    fi
+  }
+
+  hadoop_finalize
+
+  [ "${testvar}" = "hdfs" ];
+}
+
+@test "hadoop_finalize (cygwin yarn)" {
+  HADOOP_IS_CYGWIN=false
+
+  hadoop_shellprofiles_finalize () { true; }
+  hadoop_finalize_classpath () {  true; }
+  hadoop_finalize_libpaths () { true; }
+  hadoop_finalize_hadoop_heap () { true; }
+  hadoop_finalize_hadoop_opts () { true; }
+  hadoop_translate_cygwin_path () {
+    if [ $1 = HADOOP_YARN_HOME ]; then
+      testvar=yarn;
+    fi
+  }
+
+  hadoop_finalize
+
+  [ "${testvar}" = "yarn" ];
+}
+
+@test "hadoop_finalize (cygwin mapred)" {
+  HADOOP_IS_CYGWIN=false
+
+  hadoop_shellprofiles_finalize () { true; }
+  hadoop_finalize_classpath () {  true; }
+  hadoop_finalize_libpaths () { true; }
+  hadoop_finalize_hadoop_heap () { true; }
+  hadoop_finalize_hadoop_opts () { true; }
+  hadoop_translate_cygwin_path () {
+    if [ $1 = HADOOP_MAPRED_HOME ]; then
+      testvar=mapred;
+    fi
+  }
+
+  hadoop_finalize
+
+  [ "${testvar}" = "mapred" ];
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_catalina_opts.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_catalina_opts.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_catalina_opts.bats
new file mode 100644
index 0000000..d91223e
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_catalina_opts.bats
@@ -0,0 +1,56 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_finalize_catalina_opts (raw)" {
+  local j
+
+  HADOOP_IS_CYGWIN=false
+  HADOOP_CATALINA_PREFIX=test
+  CATALINA_OPTS=""
+  hadoop_finalize_catalina_opts
+  for j in test.home.dir \
+        test.config.dir \
+        test.log.dir \
+        test.admin.port \
+        test.http.port \
+        test.max.threads \
+        test.ssl.keystore.file; do
+    [ "${CATALINA_OPTS#*${j}}" != "${CATALINA_OPTS}" ]
+  done
+}
+
+@test "hadoop_finalize_catalina_opts (cygwin)" {
+  local j
+
+  skip "catalina commands not supported under cygwin yet"
+
+  HADOOP_IS_CYGWIN=true
+  HADOOP_CATALINA_PREFIX=test
+  CATALINA_OPTS=""
+
+  catalina_translate_cygwin_path () {
+    eval ${1}="foobarbaz"
+  }
+
+  hadoop_finalize_catalina_opts
+  for j in test.home.dir \
+        test.config.dir \
+        test.log.dir \
+        test.ssl.keystore.file; do
+    [ "${CATALINA_OPTS#*${j}=foobarbaz}" != "${CATALINA_OPTS}" ]
+  done
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_classpath.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_classpath.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_classpath.bats
new file mode 100644
index 0000000..ac0d4c1
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_classpath.bats
@@ -0,0 +1,64 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_finalize_classpath (only conf dir)" {
+  CLASSPATH=""
+  HADOOP_CONF_DIR="${TMP}"
+
+  hadoop_translate_cygwin_path () { true; }
+  hadoop_add_to_classpath_userpath () { true; }
+
+  hadoop_finalize_classpath
+
+  [ "${CLASSPATH}" = "${TMP}" ]
+
+}
+
+@test "hadoop_finalize_classpath (before conf dir)" {
+  CLASSPATH="1"
+  HADOOP_CONF_DIR="${TMP}"
+
+  hadoop_translate_cygwin_path () { true; }
+  hadoop_add_to_classpath_userpath () { true; }
+
+  hadoop_finalize_classpath
+
+  [ "${CLASSPATH}" = "${TMP}:1" ]
+}
+
+@test "hadoop_finalize_classpath (adds user)" {
+  CLASSPATH=""
+  HADOOP_CONF_DIR="${TMP}"
+
+  hadoop_translate_cygwin_path () { true; }
+  hadoop_add_to_classpath_userpath () { testvar=true; }
+
+  hadoop_finalize_classpath
+
+  [ "${testvar}" = "true" ]
+}
+
+@test "hadoop_finalize_classpath (calls cygwin)" {
+  CLASSPATH=""
+  HADOOP_CONF_DIR="${TMP}"
+  HADOOP_IS_CYGWIN=true
+
+  hadoop_translate_cygwin_path () { [ $1 = CLASSPATH ]; }
+  hadoop_add_to_classpath_userpath () { true; }
+
+  hadoop_finalize_classpath
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_hadoop_heap.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_hadoop_heap.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_hadoop_heap.bats
new file mode 100644
index 0000000..ef49d5b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_hadoop_heap.bats
@@ -0,0 +1,87 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+resetops () {
+  unset HADOOP_HEAPSIZE_MAX
+  unset HADOOP_HEAPSIZE
+  unset HADOOP_HEAPSIZE_MIN
+  unset HADOOP_OPTS
+}
+
+@test "hadoop_finalize_hadoop_heap (negative)" {
+  resetops
+  hadoop_finalize_hadoop_heap
+  [ -z "${HADOOP_OPTS}" ]
+}
+
+@test "hadoop_finalize_hadoop_heap (no unit max)" {
+  resetops
+  HADOOP_HEAPSIZE_MAX=1000
+  hadoop_finalize_hadoop_heap
+  echo ">${HADOOP_OPTS}<"
+  [ "${HADOOP_OPTS}" = "-Xmx1000m" ]
+}
+
+@test "hadoop_finalize_hadoop_heap (no unit old)" {
+  resetops
+  HADOOP_HEAPSIZE=1000
+  hadoop_finalize_hadoop_heap
+  echo ">${HADOOP_OPTS}<"
+  [ "${HADOOP_OPTS}" = "-Xmx1000m" ]
+}
+
+@test "hadoop_finalize_hadoop_heap (unit max)" {
+  resetops
+  HADOOP_HEAPSIZE_MAX=10g
+  hadoop_finalize_hadoop_heap
+  echo ">${HADOOP_OPTS}<"
+  [ "${HADOOP_OPTS}" = "-Xmx10g" ]
+}
+
+@test "hadoop_finalize_hadoop_heap (unit old)" {
+  resetops
+  HADOOP_HEAPSIZE=10g
+  hadoop_finalize_hadoop_heap
+  echo ">${HADOOP_OPTS}<"
+  [ "${HADOOP_OPTS}" = "-Xmx10g" ]
+}
+
+@test "hadoop_finalize_hadoop_heap (no unit min)" {
+  resetops
+  HADOOP_HEAPSIZE_MIN=1000
+  hadoop_finalize_hadoop_heap
+  echo ">${HADOOP_OPTS}<"
+  [ "${HADOOP_OPTS}" = "-Xms1000m" ]
+}
+
+@test "hadoop_finalize_hadoop_heap (unit min)" {
+  resetops
+  HADOOP_HEAPSIZE_MIN=10g
+  hadoop_finalize_hadoop_heap
+  echo ">${HADOOP_OPTS}<"
+  [ "${HADOOP_OPTS}" = "-Xms10g" ]
+}
+
+@test "hadoop_finalize_hadoop_heap (dedupe)" {
+  resetops
+  HADOOP_HEAPSIZE_MAX=1000
+  HADOOP_OPTS="-Xmx5g"
+  hadoop_finalize_hadoop_heap
+  hadoop_finalize_hadoop_heap
+  echo ">${HADOOP_OPTS}<"
+  [ "${HADOOP_OPTS}" = "-Xmx5g" ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_hadoop_opts.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_hadoop_opts.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_hadoop_opts.bats
new file mode 100644
index 0000000..3acb1a5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_hadoop_opts.bats
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_finalize_hadoop_opts (raw)" {
+  local j
+
+  HADOOP_IS_CYGWIN=false
+  HADOOP_OPTS=""
+  hadoop_finalize_hadoop_opts
+  for j in hadoop.log.dir \
+        hadoop.log.file \
+        hadoop.home.dir \
+        hadoop.root.logger \
+        hadoop.policy.file \
+        hadoop.security.logger \
+        hadoop.id.str; do
+
+    [ "${HADOOP_OPTS#*${j}}" != "${HADOOP_OPTS}" ]
+  done
+}
+
+@test "hadoop_finalize_hadoop_opts (cygwin)" {
+  local j
+
+  HADOOP_IS_CYGWIN=true
+  HADOOP_OPTS=""
+
+  hadoop_translate_cygwin_path () {
+    eval ${1}="foobarbaz"
+  }
+
+  hadoop_finalize_hadoop_opts
+  for j in hadoop.log.dir \
+        hadoop.home.dir; do
+    echo "${j} from >${HADOOP_OPTS}<"
+    [ "${HADOOP_OPTS#*${j}=foobarbaz}" != "${HADOOP_OPTS}" ]
+  done
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_libpaths.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_libpaths.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_libpaths.bats
new file mode 100644
index 0000000..48ba773
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_finalize_libpaths.bats
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_finalize_libpaths (negative)" {
+  unset JAVA_LIBRARY_PATH
+  unset HADOOP_OPTS
+  hadoop_finalize_libpaths
+  [ -z "${HADOOP_OPTS}" ]
+}
+
+@test "hadoop_finalize_libpaths (positive)" {
+  JAVA_LIBRARY_PATH=test
+  unset HADOOP_OPTS
+  hadoop_finalize_libpaths
+  [ "${HADOOP_OPTS}" = "-Djava.library.path=test" ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_java_setup.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_java_setup.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_java_setup.bats
new file mode 100644
index 0000000..5a6ee10
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_java_setup.bats
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_java_setup (negative not set)" {
+  unset JAVA_HOME
+  run hadoop_java_setup
+  [ "${status}" -eq 1 ]
+}
+
+@test "hadoop_java_setup (negative not a dir)" {
+  touch ${TMP}/foo
+  JAVA_HOME="${TMP}/foo"
+  run hadoop_java_setup
+  [ "${status}" -eq 1 ]
+}
+
+@test "hadoop_java_setup (negative not exec)" {
+  mkdir -p "${TMP}/bin"
+  touch "${TMP}/bin/java"
+  JAVA_HOME="${TMP}"
+  chmod a-x "${TMP}/bin/java"
+  run hadoop_java_setup
+  [ "${status}" -eq 1 ]
+}
+
+@test "hadoop_java_setup (positive)" {
+  mkdir -p "${TMP}/bin"
+  touch "${TMP}/bin/java"
+  JAVA_HOME="${TMP}"
+  chmod a+x "${TMP}/bin/java"
+  run hadoop_java_setup
+  [ "${status}" -eq 0 ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_os_tricks.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_os_tricks.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_os_tricks.bats
new file mode 100644
index 0000000..ae04f72
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_os_tricks.bats
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_os_tricks (cygwin sets cygwin)" {
+  HADOOP_OS_TYPE=CYGWIN-IS-GNU-USER-LAND
+  hadoop_os_tricks
+  [ "${HADOOP_IS_CYGWIN}" = "true" ]
+}
+
+@test "hadoop_os_tricks (linux sets arena max)" {
+  HADOOP_OS_TYPE=Linux
+  hadoop_os_tricks
+  [ -n "${MALLOC_ARENA_MAX}" ]
+}
+
+@test "hadoop_os_tricks (osx sets java_home)" {
+  HADOOP_OS_TYPE=Darwin
+  hadoop_os_tricks
+  [ -n "${JAVA_HOME}" ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_rotate_log.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_rotate_log.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_rotate_log.bats
new file mode 100644
index 0000000..f73fea6
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_rotate_log.bats
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_rotate_log (defaults)" {
+  touch "${TMP}/log"
+  hadoop_rotate_log "${TMP}/log"
+  [ -f "${TMP}/log.1" ]
+  [ ! -f "${TMP}/log" ]
+}
+
+@test "hadoop_rotate_log (one archive log)" {
+  touch "${TMP}/log"
+  hadoop_rotate_log "${TMP}/log" 1
+  [ -f "${TMP}/log.1" ]
+  [ ! -f "${TMP}/log" ]
+}
+
+@test "hadoop_rotate_log (default five archive logs)" {
+  local i
+  for i in {1..5}; do
+    echo "Testing ${i}"
+    touch "${TMP}/log"
+    hadoop_rotate_log "${TMP}/log"
+    ls "${TMP}"
+    [ -f "${TMP}/log.${i}" ]
+  done
+}
+
+@test "hadoop_rotate_log (ten archive logs)" {
+  local i
+  for i in {1..10}; do
+    echo "Testing ${i}"
+    touch "${TMP}/log"
+    hadoop_rotate_log "${TMP}/log" 10
+    ls "${TMP}"
+    [ -f "${TMP}/log.${i}" ]
+  done
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_shellprofile.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_shellprofile.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_shellprofile.bats
new file mode 100644
index 0000000..d6e0a25
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_shellprofile.bats
@@ -0,0 +1,91 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+shellprofilesetup () {
+  HADOOP_LIBEXEC_DIR="${TMP}/libexec"
+  HADOOP_CONF_DIR="${TMP}/conf"
+  mkdir -p "${HADOOP_LIBEXEC_DIR}/shellprofile.d" "${HADOOP_CONF_DIR}/shellprofile.d"
+}
+
+_test_hadoop_init () {
+  unittest=init
+}
+
+_test_hadoop_classpath () {
+  unittest=classpath
+}
+
+_test_hadoop_nativelib () {
+  unittest=nativelib
+}
+
+_test_hadoop_finalize () {
+  unittest=finalize
+}
+
+@test "hadoop_import_shellprofiles (negative)" {
+  shellprofilesetup
+  unset HADOOP_LIBEXEC_DIR
+  run hadoop_import_shellprofiles
+  [ -n "${output}" ]
+}
+
+@test "hadoop_import_shellprofiles (libexec sh import)" {
+  shellprofilesetup
+  echo "unittest=libexec" > "${HADOOP_LIBEXEC_DIR}/shellprofile.d/test.sh"
+  hadoop_import_shellprofiles
+  [ "${unittest}" = libexec ]
+}
+
+@test "hadoop_import_shellprofiles (libexec conf sh import+override)" {
+  shellprofilesetup
+  echo "unittest=libexec" > "${HADOOP_LIBEXEC_DIR}/shellprofile.d/test.sh"
+  echo "unittest=conf" > "${HADOOP_CONF_DIR}/shellprofile.d/test.sh"
+  hadoop_import_shellprofiles
+  [ "${unittest}" = conf ]
+}
+
+@test "hadoop_import_shellprofiles (libexec no cmd import)" {
+  shellprofilesetup
+  echo "unittest=libexec" > "${HADOOP_LIBEXEC_DIR}/shellprofile.d/test.cmd"
+  hadoop_import_shellprofiles
+  [ -z "${unittest}" ]
+}
+
+@test "hadoop_add_profile+hadoop_shellprofiles_init" {
+  hadoop_add_profile test
+  hadoop_shellprofiles_init
+  [ "${unittest}" = init ]
+}
+
+@test "hadoop_add_profile+hadoop_shellprofiles_classpath" {
+  hadoop_add_profile test
+  hadoop_shellprofiles_classpath
+  [ "${unittest}" = classpath ]
+}
+
+@test "hadoop_add_profile+hadoop_shellprofiles_nativelib" {
+  hadoop_add_profile test
+  hadoop_shellprofiles_nativelib
+  [ "${unittest}" = nativelib ]
+}
+
+@test "hadoop_add_profile+hadoop_shellprofiles_finalize" {
+  hadoop_add_profile test
+  hadoop_shellprofiles_finalize
+  [ "${unittest}" = finalize ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_slaves.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_slaves.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_slaves.bats
new file mode 100644
index 0000000..cc33f0e
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_slaves.bats
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_populate_slaves_file (specific file)" {
+  touch "${TMP}/file"
+  hadoop_populate_slaves_file "${TMP}/file"
+  [ "${HADOOP_SLAVES}" = "${TMP}/file" ]
+}
+
+@test "hadoop_populate_slaves_file (specific conf dir file)" {
+  HADOOP_CONF_DIR=${TMP}/1
+  mkdir -p "${HADOOP_CONF_DIR}"
+  touch "${HADOOP_CONF_DIR}/file"
+  hadoop_populate_slaves_file "file"
+  echo "${HADOOP_SLAVES}"
+  [ "${HADOOP_SLAVES}" = "${HADOOP_CONF_DIR}/file" ]
+}
+
+@test "hadoop_populate_slaves_file (no file)" {
+  HADOOP_CONF_DIR=${TMP}
+  run hadoop_populate_slaves_file "foo"
+  [ "${status}" -eq 1 ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_ssh.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_ssh.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_ssh.bats
new file mode 100644
index 0000000..53e86ce
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_ssh.bats
@@ -0,0 +1,51 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_actual_ssh" {
+  skip "Not implemented"
+  hadoop_actual_ssh
+}
+
+@test "hadoop_connect_to_hosts" {
+  skip "Not implemented"
+  hadoop_connect_to_hosts
+}
+
+@test "hadoop_connect_to_hosts_without_pdsh" {
+  skip "Not implemented"
+  hadoop_connect_to_hosts_without_pdsh
+}
+
+@test "hadoop_common_slave_mode_execute (--slaves 1)" {
+  run  hadoop_common_slave_mode_execute --slaves command
+  [ ${output} = command ]
+}
+
+@test "hadoop_common_slave_mode_execute (--slaves 2)" {
+  run  hadoop_common_slave_mode_execute --slaves command1 command2
+  [ ${output} = "command1 command2" ]
+}
+
+@test "hadoop_common_slave_mode_execute (--hosts)" {
+  run  hadoop_common_slave_mode_execute --hosts filename command
+  [ ${output} = command ]
+}
+
+@test "hadoop_common_slave_mode_execute (--hostnames 2)" {
+  run  hadoop_common_slave_mode_execute --hostnames "host1,host2" command1 command2
+  [ ${output} = "command1 command2" ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_translate_cygwin_path.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_translate_cygwin_path.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_translate_cygwin_path.bats
new file mode 100644
index 0000000..e5f6aec
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_translate_cygwin_path.bats
@@ -0,0 +1,48 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_translate_cygwin_path (negative)" {
+  HADOOP_IS_CYGWIN=false
+  testvar="/this/path/is/cool"
+  hadoop_translate_cygwin_path testvar
+  [ "${testvar}" = "/this/path/is/cool" ]
+}
+
+@test "hadoop_translate_cygwin_path (positive)" {
+  HADOOP_IS_CYGWIN=true
+  testvar="/this/path/is/cool"
+
+  cygpath () {
+    echo "test"
+  }
+
+  hadoop_translate_cygwin_path testvar
+  [ "${testvar}" = "test" ]
+}
+
+
+@test "hadoop_translate_cygwin_path (path positive)" {
+  HADOOP_IS_CYGWIN=true
+  testvar="/this/path/is/cool"
+
+  cygpath () {
+    echo "test"
+  }
+
+  hadoop_translate_cygwin_path testvar true
+  [ "${testvar}" = "test" ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_validate_classname.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_validate_classname.bats b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_validate_classname.bats
new file mode 100644
index 0000000..1ba5b32
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_validate_classname.bats
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_validate_classname (negative)" {
+  run hadoop_validate_classname fakeclass
+  [ ${status} -eq 1 ]
+}
+
+@test "hadoop_validate_classname (positive)" {
+  run hadoop_validate_classname org.apache.hadoop.io.Text
+  [ ${status} -eq 0 ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a890a315/hadoop-common-project/hadoop-common/src/test/scripts/run-bats.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/run-bats.sh b/hadoop-common-project/hadoop-common/src/test/scripts/run-bats.sh
new file mode 100755
index 0000000..566f47a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/run-bats.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+targetdir=../../../target
+mkdir -p ${targetdir}/surefire-reports ${targetdir}/tap
+
+batsexe=$(which bats) 2>/dev/null
+
+if [[ -z ${batsexe} ]]; then
+  echo "not ok - no bats executable found" >  "${targetdir}/tap/shelltest.tap"
+  echo ""
+  echo ""
+  echo "ERROR: bats not installed. Skipping bash tests."
+  echo "ERROR: Please install bats as soon as possible."
+  echo ""
+  echo ""
+  exit 0
+fi
+
+for j in *.bats; do
+  echo Running bats -t "${j}"
+  bats -t "${j}" 2>&1 | tee "${targetdir}/tap/${j}.tap"
+  result=${PIPESTATUS[0]}
+  ((exitcode=exitcode+result))
+done
+
+if [[ ${exitcode} -gt 0 ]]; then
+  exit 1
+fi
+exit 0


[04/10] hadoop git commit: MAPREDUCE-6433. launchTime may be negative. Contributed by Zhihai Xu

Posted by aw...@apache.org.
MAPREDUCE-6433. launchTime may be negative. Contributed by Zhihai Xu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93d50b78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93d50b78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93d50b78

Branch: refs/heads/HADOOP-12111
Commit: 93d50b782494af7eef980c4d596a59ff4e11646e
Parents: ab80e27
Author: Zhihai Xu <zx...@apache.org>
Authored: Thu Jul 30 23:07:31 2015 -0700
Committer: Zhihai Xu <zx...@apache.org>
Committed: Thu Jul 30 23:07:31 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  2 +
 .../hadoop/mapreduce/v2/app/MRAppMaster.java    |  2 +-
 .../v2/app/job/event/JobStartEvent.java         |  2 +-
 .../mapreduce/v2/app/job/impl/JobImpl.java      |  2 +-
 .../mapreduce/v2/app/TestMRAppMaster.java       | 88 +++++++++++++++++++-
 .../mapreduce/jobhistory/EventWriter.java       | 19 ++++-
 6 files changed, 107 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d50b78/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 398ffc6..738dea5 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -542,6 +542,8 @@ Release 2.8.0 - UNRELEASED
 
     MAPREDUCE-6427. Fix typo in JobHistoryEventHandler. (Ray Chiang via cdouglas)
 
+    MAPREDUCE-6433. launchTime may be negative. (Zhihai Xu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d50b78/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index f199ecb..6dc830f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -233,7 +233,7 @@ public class MRAppMaster extends CompositeService {
   JobStateInternal forcedState = null;
   private final ScheduledExecutorService logSyncer;
 
-  private long recoveredJobStartTime = 0;
+  private long recoveredJobStartTime = -1L;
   private static boolean mainStarted = false;
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d50b78/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobStartEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobStartEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobStartEvent.java
index 39051da..a142c31 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobStartEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobStartEvent.java
@@ -25,7 +25,7 @@ public class JobStartEvent extends JobEvent {
   long recoveredJobStartTime;
 
   public JobStartEvent(JobId jobID) {
-    this(jobID, 0);
+    this(jobID, -1L);
   }
 
   public JobStartEvent(JobId jobID, long recoveredJobStartTime) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d50b78/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 4c3b3fe..9d141eb 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -1629,7 +1629,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
     @Override
     public void transition(JobImpl job, JobEvent event) {
       JobStartEvent jse = (JobStartEvent) event;
-      if (jse.getRecoveredJobStartTime() != 0) {
+      if (jse.getRecoveredJobStartTime() != -1L) {
         job.startTime = jse.getRecoveredJobStartTime();
       } else {
         job.startTime = job.clock.getTime();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d50b78/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
index 63b201d..9e0dafc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
@@ -31,9 +31,11 @@ import static org.mockito.Mockito.times;
 
 import java.io.File;
 import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.lang.reflect.Field;
 import java.util.Collections;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -44,16 +46,21 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.EventType;
+import org.apache.hadoop.mapreduce.jobhistory.EventWriter;
 import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
+import org.apache.hadoop.mapreduce.jobhistory.JobInitedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
+import org.apache.hadoop.mapreduce.split.JobSplitWriter;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
 import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEvent;
@@ -61,6 +68,8 @@ import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler;
 import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.AccessControlException;
@@ -111,7 +120,7 @@ public class TestMRAppMaster {
     }
     dir.mkdirs();
   }
-  
+
   @Test
   public void testMRAppMasterForDifferentUser() throws IOException,
       InterruptedException {
@@ -170,7 +179,46 @@ public class TestMRAppMaster {
     // verify the final status is FAILED
     verifyFailedStatus((MRAppMasterTest)appMaster, "FAILED");
   }
-  
+
+  @Test
+  public void testMRAppMasterJobLaunchTime() throws IOException,
+      InterruptedException {
+    String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
+    String containerIdStr = "container_1317529182569_0004_000002_1";
+    String userName = "TestAppMasterUser";
+    JobConf conf = new JobConf();
+    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
+    conf.setInt(MRJobConfig.NUM_REDUCES, 0);
+    conf.set(JHAdminConfig.MR_HS_JHIST_FORMAT, "json");
+    ApplicationAttemptId applicationAttemptId = ConverterUtils
+        .toApplicationAttemptId(applicationAttemptIdStr);
+    JobId jobId = TypeConverter.toYarn(
+        TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
+
+    File dir = new File(MRApps.getStagingAreaDir(conf, userName).toString(),
+        jobId.toString());
+    dir.mkdirs();
+    File historyFile = new File(JobHistoryUtils.getStagingJobHistoryFile(
+        new Path(dir.toURI().toString()), jobId,
+        (applicationAttemptId.getAttemptId() - 1)).toUri().getRawPath());
+    historyFile.createNewFile();
+    FSDataOutputStream out = new FSDataOutputStream(
+        new FileOutputStream(historyFile), null);
+    EventWriter writer = new EventWriter(out, EventWriter.WriteMode.JSON);
+    writer.close();
+    FileSystem fs = FileSystem.get(conf);
+    JobSplitWriter.createSplitFiles(new Path(dir.getAbsolutePath()), conf,
+        fs, new org.apache.hadoop.mapred.InputSplit[0]);
+    ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
+    MRAppMasterTestLaunchTime appMaster =
+        new MRAppMasterTestLaunchTime(applicationAttemptId, containerId,
+            "host", -1, -1, System.currentTimeMillis());
+    MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
+    appMaster.stop();
+    assertTrue("Job launch time should not be negative.",
+            appMaster.jobLaunchTime.get() >= 0);
+  }
+
   @Test
   public void testMRAppMasterSuccessLock() throws IOException,
       InterruptedException {
@@ -585,3 +633,39 @@ class MRAppMasterTest extends MRAppMaster {
     return spyHistoryService;
   }
 }
+
+class MRAppMasterTestLaunchTime extends MRAppMasterTest {
+  final AtomicLong jobLaunchTime = new AtomicLong(0L);
+  public MRAppMasterTestLaunchTime(ApplicationAttemptId applicationAttemptId,
+      ContainerId containerId, String host, int port, int httpPort,
+      long submitTime) {
+    super(applicationAttemptId, containerId, host, port, httpPort,
+        submitTime, false, false);
+  }
+
+  @Override
+  protected EventHandler<CommitterEvent> createCommitterEventHandler(
+      AppContext context, OutputCommitter committer) {
+    return new CommitterEventHandler(context, committer,
+        getRMHeartbeatHandler()) {
+      @Override
+      public void handle(CommitterEvent event) {
+      }
+    };
+  }
+
+  @Override
+  protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
+      AppContext context) {
+    return new JobHistoryEventHandler(context, getStartCount()) {
+      @Override
+      public void handle(JobHistoryEvent event) {
+        if (event.getHistoryEvent().getEventType() == EventType.JOB_INITED) {
+          JobInitedEvent jie = (JobInitedEvent) event.getHistoryEvent();
+          jobLaunchTime.set(jie.getLaunchTime());
+        }
+        super.handle(event);
+      }
+    };
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d50b78/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
index 29489a5..b1cb6dc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
@@ -29,19 +29,25 @@ import org.apache.avro.specific.SpecificDatumWriter;
 import org.apache.avro.util.Utf8;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.mapreduce.Counter;
 import org.apache.hadoop.mapreduce.CounterGroup;
 import org.apache.hadoop.mapreduce.Counters;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Event Writer is an utility class used to write events to the underlying
  * stream. Typically, one event writer (which translates to one stream) 
  * is created per job 
  * 
  */
-class EventWriter {
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class EventWriter {
   static final String VERSION = "Avro-Json";
   static final String VERSION_BINARY = "Avro-Binary";
 
@@ -50,11 +56,17 @@ class EventWriter {
     new SpecificDatumWriter<Event>(Event.class);
   private Encoder encoder;
   private static final Log LOG = LogFactory.getLog(EventWriter.class);
+
+  /**
+   * avro encoding format supported by EventWriter.
+   */
   public enum WriteMode { JSON, BINARY }
   private final WriteMode writeMode;
   private final boolean jsonOutput;  // Cache value while we have 2 modes
 
-  EventWriter(FSDataOutputStream out, WriteMode mode) throws IOException {
+  @VisibleForTesting
+  public EventWriter(FSDataOutputStream out, WriteMode mode)
+      throws IOException {
     this.out = out;
     this.writeMode = mode;
     if (this.writeMode==WriteMode.JSON) {
@@ -93,7 +105,8 @@ class EventWriter {
     out.hflush();
   }
 
-  void close() throws IOException {
+  @VisibleForTesting
+  public void close() throws IOException {
     try {
       encoder.flush();
       out.close();


[02/10] hadoop git commit: HADOOP-12271. Hadoop Jar Error Should Be More Explanatory. Contributed by Josh Elser.

Posted by aw...@apache.org.
HADOOP-12271. Hadoop Jar Error Should Be More Explanatory. Contributed by Josh Elser.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2087eaf6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2087eaf6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2087eaf6

Branch: refs/heads/HADOOP-12111
Commit: 2087eaf684d9fb14b5390e21bf17e93ac8fea7f8
Parents: c5caa25
Author: Harsh J <ha...@cloudera.com>
Authored: Fri Jul 31 10:12:32 2015 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Fri Jul 31 10:12:32 2015 +0530

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../src/main/java/org/apache/hadoop/util/RunJar.java              | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2087eaf6/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 098194c..3c7e5c3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -536,6 +536,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    HADOOP-12271. Hadoop Jar Error Should Be More Explanatory
+    (Josh Elser via harsh)
+
     HADOOP-6842. "hadoop fs -text" does not give a useful text representation
     of MapWritable objects (Akira Ajisaka via bobby)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2087eaf6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 4b26b76..ccb114b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -148,7 +148,8 @@ public class RunJar {
     String fileName = args[firstArg++];
     File file = new File(fileName);
     if (!file.exists() || !file.isFile()) {
-      System.err.println("Not a valid JAR: " + file.getCanonicalPath());
+      System.err.println("JAR does not exist or is not a normal file: " +
+          file.getCanonicalPath());
       System.exit(-1);
     }
     String mainClassName = null;


[10/10] hadoop git commit: HADOOP-12130. document features added in 12113 (aw)

Posted by aw...@apache.org.
HADOOP-12130. document features added in 12113 (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21e21b99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21e21b99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21e21b99

Branch: refs/heads/HADOOP-12111
Commit: 21e21b990902c99c5bb33464b9b096c28c30edf4
Parents: 659deae
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Jul 31 14:53:29 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Fri Jul 31 14:53:29 2015 -0700

----------------------------------------------------------------------
 dev-support/docs/precommit-advanced.md     | 68 ++++++++++++++----
 dev-support/docs/precommit-architecture.md | 31 ++++----
 dev-support/docs/precommit-basic.md        | 94 +++++++++++++++++++++----
 3 files changed, 149 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21e21b99/dev-support/docs/precommit-advanced.md
----------------------------------------------------------------------
diff --git a/dev-support/docs/precommit-advanced.md b/dev-support/docs/precommit-advanced.md
index 0a7eac5..a424199 100644
--- a/dev-support/docs/precommit-advanced.md
+++ b/dev-support/docs/precommit-advanced.md
@@ -16,22 +16,35 @@ test-patch
 ==========
 
 * [Docker Support](#Docker_Support)
-* [Maven Profiles](#Maven_Profiles)
+* [Maven Specific](#Maven_Specific)
+* [Ant Specific](#Ant_Specific)
 * [Plug-ins](#Plug-ins)
 * [Configuring for Other Projects](#Configuring_for_Other_Projects)
+* [Important Variables](#Important_Variables)
 
 # Docker Support
 
-By default, test-patch runs in the same shell where it was launched.  It can alternatively use Docker to launch itself into a container.  This is particularly useful if running under a QA environment that does not provide all the necessary binaries. For example, the patch requires a newer version of Java.
+By default, test-patch runs in the same shell where it was launched.  It can alternatively use Docker to launch itself into a container.  This is particularly useful if running under a QA environment that does not provide all the necessary binaries. For example, if the patch requires a newer version of Java.
 
-The `--docker` parameter tells test-patch to run in Docker mode. The `--dockerfile` parameter allows one to provide a custom Dockerfile.  Be aware that test-patch will copy this file and append its necessary hooks in order to execute.
+The `--docker` parameter tells test-patch to run in Docker mode. The `--dockerfile` parameter allows one to provide a custom Dockerfile. The Dockerfile should contain all of the necessary binaries and tooling needed to run the test.  However be aware that test-patch will copy this file and append its necessary hooks to re-launch itself prior to executing docker.
 
-test-patch includes code to automatically manage broken/stale container images that are hanging around if it is run in --jenkins mode.  In this way, if Docker fails to build the image, the disk space should eventually return.
+Dockerfile images will be named with a test-patch prefix and suffix with either a date or a git commit hash. By using this information, test-patch will automatically manage broken/stale container images that are hanging around if it is run in --jenkins mode.  In this way, if Docker fails to build the image, the disk space should eventually be cleaned and returned back to the system.
 
-# Maven Profiles
+# Maven Specific
 
-By default, test-patch will pass -Ptest-patch and -D${PROJECT_NAME}PatchProcess to Maven. This will allow you to configure special actions that should only happen when running underneath test-patch.
+## Command Arguments
 
+test-patch always passes --batch-mode to maven to force it into non-interactive mode.  Additionally, some tests will also force -fae in order to get all of messages/errors during that mode.  It *does not* pass -DskipTests.  Additional arguments should be handled via the personality.
+
+## Test Profile
+
+By default, test-patch will pass -Ptest-patch to Maven. This will allow you to configure special actions that should only happen when running underneath test-patch.
+
+# Ant Specific
+
+## Command Arguments
+
+test-patch always passes -noinput to Ant.  This force ant to be non-interactive.
 
 # Plug-ins
 
@@ -58,7 +71,7 @@ Similarly, there are other functions that may be defined during the test-patch r
     - executed prior to the patch being applied but after the git repository is setup.  This is useful for any early error checking that might need to be done before any heavier work.
 
 * pluginname_preapply
-    - executed prior to the patch being applied.  This is useful for any "before"-type data collection for later comparisons
+    - executed prior to the patch being applied.  This is useful for any "before"-type data collection for later comparisons.
 
 * pluginname_postapply
     - executed after the patch has been applied.  This is useful for any "after"-type data collection.
@@ -79,12 +92,12 @@ If the plug-in has some specific options, one can use following functions:
 
     - executed prior to any other above functions except for pluginname_usage. This is useful for parsing the arguments passed from the user and setting up the execution environment.
 
-    HINT: It is recommend to make the pluginname relatively small, 10 characters at the most.  Otherwise the ASCII output table may be skewed.
+    HINT: It is recommended to make the pluginname relatively small, 10 characters at the most.  Otherwise, the ASCII output table may be skewed.
 
 
 # Configuring for Other Projects
 
-It is impossible for any general framework to be predictive about what types of special rules any given project may have, especially when it comes to ordering and Maven profiles.  In order to assist non-Hadoop projects, a project `personality` should be added that enacts these custom rules.
+It is impossible for any general framework to be predictive about what types of special rules any given project may have, especially when it comes to ordering and Maven profiles.  In order to direct test-patch to do the correct action, a project `personality` should be added that enacts these custom rules.
 
 A personality consists of two functions. One that determines which test types to run and another that allows a project to dictate ordering rules, flags, and profiles on a per-module, per-test run.
 
@@ -92,7 +105,7 @@ There can be only **one** of each personality function defined.
 
 ## Test Determination
 
-The `personality_file_tests` function determines which tests to turn on based upon the file name.  It is realtively simple.  For example, to turn on a full suite of tests for Java files:
+The `personality_file_tests` function determines which tests to turn on based upon the file name.  It is relatively simple.  For example, to turn on a full suite of tests for Java files:
 
 ```bash
 function personality_file_tests
@@ -131,19 +144,19 @@ function personality_modules
 
 It takes exactly two parameters `repostatus` and `testtype`.
 
-The `repostatus` parameter tells the `personality` function exactly what state the repository is in.  It can only be in one of two states:  `branch` or `patch`.  `branch` means the patch has not been applied.  The `patch` state is after the patch has been applied.
+The `repostatus` parameter tells the `personality` function exactly what state the source repository is in.  It can only be in one of two states:  `branch` or `patch`.  `branch` means the patch has not been applied.  The `patch` state is after the patch has been applied.
 
 The `testtype` state tells the personality exactly which test is about to be executed.
 
 In order to communicate back to test-patch, there are two functions for the personality to use.
 
-The first is `clear_personality_queue`. This removes the previous test's configuration so that a new module queue may be built.
+The first is `clear_personality_queue`. This removes the previous test's configuration so that a new module queue may be built. Custom personality_modules will almost always want to do this as the first action.
 
 The second is `personality_enqueue_module`.  This function takes two parameters.  The first parameter is the name of the module to add to this test's queue.  The second parameter is an option list of additional flags to pass to Maven when processing it. `personality_enqueue_module` may be called as many times as necessary for your project.
 
     NOTE: A module name of . signifies the root of the repository.
 
-For example, let's say your project uses a special configuration to skip unit tests (-DskipTests).  Running unit tests during a javadoc build isn't very interesting. We can write a simple personality check to disable the unit tests:
+For example, let's say your project uses a special configuration to skip unit tests (-DskipTests).  Running unit tests during a javadoc build isn't very useful and wastes a lot of time. We can write a simple personality check to disable the unit tests:
 
 
 ```bash
@@ -160,5 +173,32 @@ function personality_modules
 
 ```
 
-This function will tell test-patch that when the javadoc test is being run, do the documentation test at the base of the repository and make sure the -DskipTests flag is passed to Maven.
+This function will tell test-patch that when the javadoc test is being run, do the documentation build at the base of the source repository and make sure the -DskipTests flag is passed to our build tool.
+
+
+
+# Important Variables
+
+There are a handful of extremely important variables that make life easier for personality and plug-in writers:
+
+* BUILD\_NATIVE will be set to true if the system has requested that non-JVM-based code be built (e.g., JNI or other compiled C code). Under Jenkins, this is always true.
+
+* BUILDTOOL specifies which tool is currently being used to drive compilation.  Additionally, many build tools define xyz\_ARGS to pass on to the build tool command line. (e.g., MAVEN\_ARGS if maven is in use).  Projects may set this in their personality.  NOTE: today, only one build tool at a time is supported.  This may change in the future.
+
+* CHANGED\_FILES is a list of all files that appear to be added, deleted, or modified in the patch.
+
+* CHANGED\_UNFILTERED\_MODULES is a list of all modules that house all of the CHANGED\_FILES.  Be aware that the root of the source tree is reported as '.'.
+
+* CHANGED\_MODULES reports which modules that appear to have source code in them.
+
+* HOW\_TO\_CONTRIBUTE should be a URL that points to a project's on-boarding documentation for new users. Currently, it is used to suggest a review of patch naming guidelines. Since this should be project specific information, it is useful to set in a project's personality.
+
+* ISSUE\_RE is to help test-patch when talking to JIRA.  It helps determine if the given project is appropriate for the given JIRA issue.
+
+* MODULE and other MODULE\_\* are arrays that contain which modules, the status, etc, to be operated upon. These should be treated as read-only by plug-ins.
+
+* PATCH\_BRANCH\_DEFAULT is the name of the branch in the git repo that is considered the master.  This is useful to set in personalities.
+
+* PATCH\_DIR is the name of the temporary directory that houses test-patch artifacts (such as logs and the patch file itself)
 
+* TEST\_PARALLEL if parallel unit tests have been requested. Project personalities are responsible for actually enabling or ignoring the request. TEST\_THREADS is the number of threads that have been requested to run in parallel.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21e21b99/dev-support/docs/precommit-architecture.md
----------------------------------------------------------------------
diff --git a/dev-support/docs/precommit-architecture.md b/dev-support/docs/precommit-architecture.md
index c134728..cd527ae 100644
--- a/dev-support/docs/precommit-architecture.md
+++ b/dev-support/docs/precommit-architecture.md
@@ -14,17 +14,17 @@
 
 # Some Philosophy
 
-* Everyone's time is valuable.  The quicker contributors can get feedback and iterate, the more likely their contribution will get checked in.  A committer should be able to focus on the core issues of a contribution rather than details that might be able to be determined automatically.
+* Everyone's time is valuable.  The quicker contributors can get feedback and iterate, the more likely and faster their contribution will get checked in.  A committer should be able to focus on the core issues of a contribution rather than details that can be determined automatically.
 
-* Precommit checks should be fast.  There is no value in testing parts of the source tree that are not immediately impacted by a change.  Unit testing is the target. They are not a replacement for full builds, which is where integration tests should happen.
+* Precommit checks should be fast.  There is no value in testing parts of the source tree that are not immediately impacted by a change.  Unit testing is the target. They are not a replacement for full builds or integration tests.
 
 * Many open source projects have a desire to have this capability.  Why not generalize a solution?
 
 * In many build systems (especially with maven), a modular design has been picked.  Why not leverage that design to make checks faster?
 
-* Projects that use the same language will, with a high degree of certainity, benefit from the same types of checks.
+* Projects that use the same language will, with a high degree of certainty, benefit from the same types of checks.
 
-* Portability matters.
+* Portability matters.  Tooling should be as operating system and language agnostic as possible.
 
 # Phases
 
@@ -32,7 +32,7 @@ test-patch works effectively under several different phases:
 
 ## Setup
 
-This is where test-patch configures and validates the environemnt.  Some things done in this phase:
+This is where test-patch configures and validates the environment.  Some things done in this phase:
 
 * Defaults
 * Parameter handling
@@ -50,12 +50,12 @@ This acts as a verification of all of the setup parts and is the final place to
 
 ## Pre-apply
 
-This is where the 'before' work is handled.  Some things done in this phase:
+This is where the 'before' work is handled.  Some things that typically get checked in this phase:
 
 * The first pass of files and modules that will get patched
-* Validation and information gathering of java, javadoc, site, the mvn repo, findbugs, etc.
+* Validation and information gathering of the source tree pre-patch
 * Author checks
-* check for modified unit tests
+* Check for modified unit tests
 
 ## Patch is Applied
 
@@ -65,15 +65,14 @@ The patch gets applied.  Then a second pass to determine which modules and files
 
 Now that the patch has been applied, many of the same checks performed in the Pre-apply step are done again to build an 'after' picture.
 
-* Validation and information gathering of java, javadoc, site, the mvn repo, findbugs, etc.
-
 ## Post-install
 
 Some tests only work correctly when the repo is up-to-date. So
-mvn install is run to update the local repo and we enter this phase.  Tests performed here:
+mvn install is run to update the local repo and we enter this phase.  Some example tests performed here:
 
-* Verification that maven eclipse integration still works
-* FindBugs
+* javadoc
+* Findbugs
+* Maven eclipse integration still works
 
 ## Unit Tests
 
@@ -81,7 +80,7 @@ Since unit tests are generally the slowest part of the precommit process, they a
 
 ## Reporting
 
-Finally, the results are reported to the screen and, optionally, to JIRA.
+Finally, the results are reported to the screen and, optionally, to JIRA and/or whatever bug system has been configured.
 
 # Test Flow
 
@@ -90,8 +89,8 @@ The basic workflow for many of the sub-items in individual phases are:
 1. print a header, so the end user knows that something is happening
 1. verify if the test is needed.  If so, continue on.  Otherwise, return success and let the next part of the phase execute.
 1. Ask the personality about what modules and what flags should get used
-1. Execute maven in the given modules with the given flags. Log the output and record the time and result code.
-1. Do any extra work as appropriate (diffs, counts, etc) and either accept the status and message given by the maven run or change the vote, message, log file, etc.
+1. Execute maven (or some other build tool) in the given modules with the given flags. Log the output and record the time and result code.
+1. Do any extra work as appropriate (diffs, counts, etc) and either accept the status and message given by the maven run or change the vote, message, log file, etc, based upon this extra work.
 1. Add the outcome(s) to the report generator
 
 As one can see, the modules list is one of the key inputs into what actually gets executed.  As a result, projects must full flexibility in either adding, modifying, or even removing modules from the test list.  If a personality removes the entire list of modules, then that test should just be ignored.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21e21b99/dev-support/docs/precommit-basic.md
----------------------------------------------------------------------
diff --git a/dev-support/docs/precommit-basic.md b/dev-support/docs/precommit-basic.md
index a830cdb..e68ad07 100644
--- a/dev-support/docs/precommit-basic.md
+++ b/dev-support/docs/precommit-basic.md
@@ -18,25 +18,30 @@ test-patch
 * [Purpose](#Purpose)
 * [Pre-requisites](#Pre-requisites)
 * [Basic Usage](#Basic_Usage)
+* [Build Tool](#Build_Tool)
+* [Providing Patch Files](#Providing_Patch_Files)
+* [Project-Specific Capabilities](#Project-Specific_Capabilities)
+* [MultiJDK](#MultiJDK)
+* [Docker](#Docker)
 
-## Purpose
+# Purpose
 
-As part of Hadoop's commit process, all patches to the source base go through a precommit test that does some (usually) light checking to make sure the proposed change does not break unit tests and/or passes some other prerequisites.  This is meant as a preliminary check for committers so that the basic patch is in a known state.  This check, called test-patch, may also be used by individual developers to verify a patch prior to sending to the Hadoop QA systems.
+As part of Hadoop's commit process, all patches to the source base go through a precommit test that does some (relatively) light checking to make sure the proposed change does not break unit tests and/or passes some other prerequisites such as code formatting guidelines.  This is meant as a preliminary check for committers so that the basic patch is in a known state and for contributors to know if they have followed the project's guidelines.  This check, called test-patch, may also be used by individual developers to verify a patch prior to sending to the Hadoop QA systems.
 
 Other projects have adopted a similar methodology after seeing great success in the Hadoop model.  Some have even gone as far as forking Hadoop's precommit code and modifying it to meet their project's needs.
 
 This is a modification to Hadoop's version of test-patch so that we may bring together all of these forks under a common code base to help the community as a whole.
 
 
-## Pre-requisites
+# Pre-requisites
 
 test-patch has the following requirements:
 
 * Ant- or Maven-based project (and ant/maven installed)
-* git-based project (and git installed)
+* git-based project (and git 1.7.3 or higher installed)
 * bash v3.2 or higher
 * findbugs 3.x installed
-* shellcheck installed
+* shellcheck installed, preferably 0.3.6 or higher
 * pylint installed
 * GNU diff
 * GNU patch
@@ -57,21 +62,21 @@ Optional:
 * Apache JIRA-based issue tracking
 * JIRA cli tools
 
-The locations of these files are (mostly) assumed to be in the file path, but may be overridden via command line options.  For Solaris and Solaris-like operating systems, the default location for the POSIX binaries is in /usr/xpg4/bin.
+The locations of these files are (mostly) assumed to be in the file path, but may be overridden via command line options.  For Solaris and Solaris-like operating systems, the default location for the POSIX binaries is in /usr/xpg4/bin and the default location for the GNU binaries is /usr/gnu/bin.
 
 
-## Basic Usage
+# Basic Usage
 
-This command will execute basic patch testing against a patch file stored in filename:
+This command will execute basic patch testing against a patch file stored in "filename":
 
 ```bash
 $ cd <your repo>
 $ dev-support/test-patch.sh --dirty-workspace --project=projectname <filename>
 ```
 
-The `--dirty-workspace` flag tells test-patch that the repository is not clean and it is ok to continue.  This version command does not run the unit tests.
+The `--dirty-workspace` flag tells test-patch that the repository is not clean and it is ok to continue.  By default, unit tests are not run since they may take a significant amount of time.
 
-To do that, we need to provide the --run-tests command:
+To do turn them on, we need to provide the --run-tests option:
 
 
 ```bash
@@ -85,16 +90,34 @@ A typical configuration is to have two repositories.  One with the code you are
 
 ```bash
 $ cd <workrepo>
-$ git diff --no-prefix trunk > /tmp/patchfile
+$ git diff master > /tmp/patchfile
 $ cd ../<testrepo>
 $ <workrepo>/dev-support/test-patch.sh --basedir=<testrepo> --resetrepo /tmp/patchfile
 ```
 
 We used two new options here.  --basedir sets the location of the repository to use for testing.  --resetrepo tells test patch that it can go into **destructive** mode.  Destructive mode will wipe out any changes made to that repository, so use it with care!
 
-After the tests have run, there is a directory that contains all of the test-patch related artifacts.  This is generally referred to as the patchprocess directory.  By default, test-patch tries to make something off of /tmp to contain this content.  Using the `--patchdir` command, one can specify exactly which directory to use.  This is helpful for automated precommit testing so that the Jenkins or other automated workflow system knows where to look to gather up the output.
+After the tests have run, there is a directory that contains all of the test-patch related artifacts.  This is generally referred to as the patchprocess directory.  By default, test-patch tries to make something off of /tmp to contain this content.  Using the `--patch-dir` option, one can specify exactly which directory to use.  This is helpful for automated precommit testing so that Jenkins or other automated workflow system knows where to look to gather up the output.
 
-## Providing Patch Files
+For example:
+
+```bash
+$ test-patch.sh --jenkins --patch-dir=${WORKSPACE}/patchprocess --basedir=${WORKSPACE}/source ${WORKSPACE}/patchfile
+```
+
+... will trigger test-patch to run in fully automated Jenkins mode, using ${WORKSPACE}/patchprocess as its scratch space, ${WORKSPACE}/source as the source repository, and ${WORKSPACE}/patchfile as the name of the patch to test against.
+
+# Build Tool
+
+Out of the box, test-patch is built to use maven.  But what if the project is built using something else, such as ant?
+
+```bash
+$ test-patch.sh (other options) --build-tool=ant
+```
+
+will tell test-patch to use ant instead of maven to drive the project.
+
+# Providing Patch Files
 
 It is a fairly common practice within the Apache community to use Apache's JIRA instance to store potential patches.  As a result, test-patch supports providing just a JIRA issue number.  test-patch will find the *last* attachment, download it, then process it.
 
@@ -106,7 +129,6 @@ $ test-patch.sh (other options) HADOOP-9905
 
 ... will process the patch file associated with this JIRA issue.
 
-
 A new practice is to use a service such as GitHub and its Pull Request (PR) feature.  Luckily, test-patch supports URLs and many services like GitHub provide ways to provide unified diffs via URLs.
 
 For example:
@@ -117,6 +139,50 @@ $ test-patch.sh (other options) https://github.com/apache/flink/pull/773.patch
 
 ... will grab a unified diff of PR #773 and process it.
 
+# Project-specific Capabilities
+
+Due to the extensible nature of the system, test-patch allows for projects to define project-specific rules which we call personalities.  (How to build those rules is covered elsewhere.) There are two ways to specify which personality to use:
+
+## Direct Method
+
+```bash
+$ test-patch.sh (other options) --personality=(filename)
+```
+
+This tells test-patch to use the personality in the given file.
+
+## Project Method
+
+However, test-patch can detect if it is a personality that is in its "personality" directory based upon the project name:
+
+```bash
+$ test-patch.sh (other options) --project=(project)
+```
+
+# MultiJDK
+
+For many projects, it is useful to test Java code against multiple versions of JDKs at the same time.  test-patch can do this with the --multijdkdirs option:
+
+```bash
+$ test-patch.sh (other options) --multijdkdirs="/j/d/k/1,/j/d/k/2"
+```
+
+Not all Java tests support this mode, but those that do will now run their tests with all of the given versions of Java consecutively (e.g., javac--the Java compliation test).  Tests that do not support MultiJDK mode (e.g., checkstyle, mvn install) will use JAVA\_HOME.
+
+NOTE: JAVA\_HOME is always appended to the list of JDKs in MultiJDK mode.  If JAVA\_HOME is in the list, it will be moved to the end.
+
+# Docker
+
+test-patch also has a mode to utilize Docker:
+
+```bash
+$ test-patch.sh (other options) --docker
+```
+
+This will do some preliminary setup and then re-execute itself inside a Docker container.  For more information on how to provide a custom Dockerfile, see the advanced guide.
+
+
+
 ## In Closing
 
 test-patch has many other features and command line options for the basic user.  Many of these are self-explanatory.  To see the list of options, run test-patch.sh without any options or with --help.


[09/10] hadoop git commit: Merge branch 'trunk' into HADOOP-12111

Posted by aw...@apache.org.
Merge branch 'trunk' into HADOOP-12111


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/659deae2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/659deae2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/659deae2

Branch: refs/heads/HADOOP-12111
Commit: 659deae237eca42bd177f5cedb324ab3a22f42db
Parents: e395a3a a890a31
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Jul 31 14:44:17 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Fri Jul 31 14:44:17 2015 -0700

----------------------------------------------------------------------
 BUILDING.txt                                    |   4 +-
 dev-support/docker/Dockerfile                   |   8 +
 hadoop-common-project/hadoop-common/CHANGES.txt |   7 +
 hadoop-common-project/hadoop-common/pom.xml     |  33 ++
 .../hadoop-common/src/main/bin/hadoop           |   8 +-
 .../hadoop-common/src/main/bin/hadoop-config.sh |  73 +---
 .../src/main/bin/hadoop-functions.sh            | 369 +++++++++++++++----
 .../hadoop-common/src/main/bin/slaves.sh        |   3 +-
 .../java/org/apache/hadoop/util/RunJar.java     |   3 +-
 .../scripts/hadoop-functions_test_helper.bash   |  56 +++
 .../src/test/scripts/hadoop_add_classpath.bats  | 100 +++++
 .../src/test/scripts/hadoop_add_colonpath.bats  |  96 +++++
 .../scripts/hadoop_add_common_to_classpath.bats |  71 ++++
 .../test/scripts/hadoop_add_javalibpath.bats    |  98 +++++
 .../src/test/scripts/hadoop_add_ldlibpath.bats  |  97 +++++
 .../src/test/scripts/hadoop_add_param.bats      |  49 +++
 .../hadoop_add_to_classpath_userpath.bats       |  98 +++++
 .../src/test/scripts/hadoop_basic_init.bats     |  94 +++++
 .../src/test/scripts/hadoop_bootstrap.bats      |  51 +++
 .../src/test/scripts/hadoop_confdir.bats        |  92 +++++
 .../test/scripts/hadoop_deprecate_envvar.bats   |  32 ++
 .../src/test/scripts/hadoop_finalize.bats       | 206 +++++++++++
 .../scripts/hadoop_finalize_catalina_opts.bats  |  56 +++
 .../test/scripts/hadoop_finalize_classpath.bats |  64 ++++
 .../scripts/hadoop_finalize_hadoop_heap.bats    |  87 +++++
 .../scripts/hadoop_finalize_hadoop_opts.bats    |  52 +++
 .../test/scripts/hadoop_finalize_libpaths.bats  |  30 ++
 .../src/test/scripts/hadoop_java_setup.bats     |  47 +++
 .../src/test/scripts/hadoop_os_tricks.bats      |  34 ++
 .../src/test/scripts/hadoop_rotate_log.bats     |  52 +++
 .../src/test/scripts/hadoop_shellprofile.bats   |  91 +++++
 .../src/test/scripts/hadoop_slaves.bats         |  37 ++
 .../src/test/scripts/hadoop_ssh.bats            |  51 +++
 .../scripts/hadoop_translate_cygwin_path.bats   |  48 +++
 .../test/scripts/hadoop_validate_classname.bats |  26 ++
 .../hadoop-common/src/test/scripts/run-bats.sh  |  43 +++
 .../hadoop-kms/src/main/sbin/kms.sh             |  29 +-
 .../hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh  |  29 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hadoop-hdfs/src/main/bin/hdfs               |  10 +-
 .../hadoop-hdfs/src/main/bin/start-balancer.sh  |  12 +-
 .../hadoop-hdfs/src/main/bin/stop-balancer.sh   |  10 +-
 .../hdfs/server/namenode/ha/StandbyState.java   |   3 +-
 hadoop-mapreduce-project/CHANGES.txt            |   5 +
 hadoop-mapreduce-project/bin/mapred             |   2 +-
 .../hadoop/mapreduce/v2/app/MRAppMaster.java    |   2 +-
 .../v2/app/job/event/JobStartEvent.java         |   2 +-
 .../mapreduce/v2/app/job/impl/JobImpl.java      |   2 +-
 .../v2/api/records/TestTaskAttemptReport.java   | 131 +++++++
 .../v2/api/records/TestTaskReport.java          | 139 +++++++
 .../mapreduce/v2/app/TestMRAppMaster.java       |  88 ++++-
 .../v2/api/records/TaskAttemptReport.java       |   3 +
 .../mapreduce/v2/api/records/TaskReport.java    |   5 +-
 .../impl/pb/TaskAttemptReportPBImpl.java        |  38 +-
 .../api/records/impl/pb/TaskReportPBImpl.java   |  38 +-
 .../mapreduce/jobhistory/EventWriter.java       |  19 +-
 .../hadoop/mapreduce/v2/hs/CompletedTask.java   |   2 +-
 .../mapreduce/v2/hs/CompletedTaskAttempt.java   |   2 +-
 hadoop-yarn-project/CHANGES.txt                 |   6 +
 .../hadoop-yarn/bin/start-yarn.sh               |   7 +-
 .../hadoop-yarn/bin/stop-yarn.sh                |   7 +-
 hadoop-yarn-project/hadoop-yarn/bin/yarn        |   7 +
 .../resourcemanager/NodesListManager.java       |  28 +-
 .../rmcontainer/RMContainerImpl.java            |  27 +-
 .../resourcemanager/rmnode/RMNodeImpl.java      |   8 +
 .../resourcemanager/TestRMNodeTransitions.java  |  62 +++-
 .../rmapp/TestNodesListManager.java             | 162 ++++++++
 67 files changed, 2988 insertions(+), 266 deletions(-)
----------------------------------------------------------------------



[07/10] hadoop git commit: HADOOP-12249. pull argument parsing into a function (aw)

Posted by aw...@apache.org.
HADOOP-12249. pull argument parsing into a function (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/666cafca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/666cafca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/666cafca

Branch: refs/heads/HADOOP-12111
Commit: 666cafca8d3c928f3470a03ae9dedb27e27f8f0e
Parents: d0e0ba8
Author: Allen Wittenauer <aw...@apache.org>
Authored: Fri Jul 31 14:32:21 2015 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Fri Jul 31 14:32:21 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 .../hadoop-common/src/main/bin/hadoop           |   8 +-
 .../hadoop-common/src/main/bin/hadoop-config.sh |  73 +-----
 .../src/main/bin/hadoop-functions.sh            | 255 ++++++++++++++++---
 .../hadoop-common/src/main/bin/slaves.sh        |   3 +-
 .../hadoop-kms/src/main/sbin/kms.sh             |  29 +--
 .../hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh  |  29 +--
 .../hadoop-hdfs/src/main/bin/hdfs               |  10 +-
 .../hadoop-hdfs/src/main/bin/start-balancer.sh  |  12 +-
 .../hadoop-hdfs/src/main/bin/stop-balancer.sh   |  10 +-
 hadoop-mapreduce-project/bin/mapred             |   2 +-
 .../hadoop-yarn/bin/start-yarn.sh               |   7 +-
 .../hadoop-yarn/bin/stop-yarn.sh                |   7 +-
 hadoop-yarn-project/hadoop-yarn/bin/yarn        |   7 +
 14 files changed, 296 insertions(+), 158 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3c7e5c3..8d0795b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -234,6 +234,8 @@ Trunk (Unreleased)
 
     HADOOP-10979. Auto-entries in hadoop_usage (aw)
 
+    HADOOP-12249. pull argument parsing into a function (aw)
+
   BUG FIXES
 
     HADOOP-11473. test-patch says "-1 overall" even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-common-project/hadoop-common/src/main/bin/hadoop
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index c5444d6..ef67cc5 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -17,8 +17,14 @@
 
 MYNAME="${BASH_SOURCE-$0}"
 
-function hadoop_usage()
+function hadoop_usage
 {
+  hadoop_add_option "buildpaths" "attempt to add class files from build tree"
+  hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in slave mode"
+  hadoop_add_option "loglevel level" "set the log4j level for this command"
+  hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
+  hadoop_add_option "slaves" "turn on slave mode"
+
   hadoop_add_subcommand "archive" "create a Hadoop archive"
   hadoop_add_subcommand "checknative" "check native Hadoop and compression libraries availability"
   hadoop_add_subcommand "classpath" "prints the class path needed to get the Hadoop jar and the required libraries"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
old mode 100644
new mode 100755
index 58b871e..0b52895
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
@@ -53,7 +53,7 @@ if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
 fi
 
 # get our functions defined for usage later
-if [[ -n "${HADOOP_COMMON_HOME}" ]] && 
+if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
    [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" ]]; then
   . "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh"
 elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then
@@ -93,75 +93,8 @@ hadoop_bootstrap
 # shellcheck disable=SC2034
 HADOOP_USER_PARAMS=("$@")
 
-HADOOP_DAEMON_MODE="default"
-
-while [[ -z "${_hadoop_common_done}" ]]; do
-  case $1 in
-    --buildpaths)
-      # shellcheck disable=SC2034
-      HADOOP_ENABLE_BUILD_PATHS=true
-      shift
-    ;;
-    --config)
-      shift
-      confdir=$1
-      shift
-      if [[ -d "${confdir}" ]]; then
-        # shellcheck disable=SC2034
-        HADOOP_CONF_DIR="${confdir}"
-      elif [[ -z "${confdir}" ]]; then
-        hadoop_error "ERROR: No parameter provided for --config "
-        hadoop_exit_with_usage 1
-      else
-        hadoop_error "ERROR: Cannot find configuration directory \"${confdir}\""
-        hadoop_exit_with_usage 1
-      fi
-    ;;
-    --daemon)
-      shift
-      HADOOP_DAEMON_MODE=$1
-      shift
-      if [[ -z "${HADOOP_DAEMON_MODE}" || \
-        ! "${HADOOP_DAEMON_MODE}" =~ ^st(art|op|atus)$ ]]; then
-        hadoop_error "ERROR: --daemon must be followed by either \"start\", \"stop\", or \"status\"."
-        hadoop_exit_with_usage 1
-      fi
-    ;;
-    --debug)
-      shift
-      # shellcheck disable=SC2034
-      HADOOP_SHELL_SCRIPT_DEBUG=true
-    ;; 
-    --help|-help|-h|help|--h|--\?|-\?|\?)
-      hadoop_exit_with_usage 0
-    ;;
-    --hostnames)
-      shift
-      # shellcheck disable=SC2034
-      HADOOP_SLAVE_NAMES="$1"
-      shift
-    ;;
-    --hosts)
-      shift
-      hadoop_populate_slaves_file "$1"
-      shift
-    ;;
-    --loglevel)
-      shift
-      # shellcheck disable=SC2034
-      HADOOP_LOGLEVEL="$1"
-      shift
-    ;;
-    --slaves)
-      shift
-      # shellcheck disable=SC2034
-      HADOOP_SLAVE_MODE=true
-    ;;
-    *)
-      _hadoop_common_done=true
-    ;;
-  esac
-done
+hadoop_parse_args "$@"
+shift "${HADOOP_PARSE_COUNTER}"
 
 #
 # Setup the base-line environment

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 6ebbee1..5e2a2e8 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -16,7 +16,8 @@
 
 # we need to declare this globally as an array, which can only
 # be done outside of a function
-declare -a HADOOP_USAGE=()
+declare -a HADOOP_SUBCMD_USAGE
+declare -a HADOOP_OPTION_USAGE
 
 ## @description  Print a message to stderr
 ## @audience     public
@@ -48,53 +49,72 @@ function hadoop_debug
 ## @param        subcommanddesc
 function hadoop_add_subcommand
 {
+  local subcmd=$1
+  local text=$2
+
+  HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${text}"
+  ((HADOOP_SUBCMD_USAGE_COUNTER=HADOOP_SUBCMD_USAGE_COUNTER+1))
+}
+
+## @description  Add an option to the usage output
+## @audience     private
+## @stability    evolving
+## @replaceable  no
+## @param        subcommand
+## @param        subcommanddesc
+function hadoop_add_option
+{
   local option=$1
   local text=$2
 
-  HADOOP_USAGE[${HADOOP_USAGE_COUNTER}]="${option}@${text}"
-  ((HADOOP_USAGE_COUNTER=HADOOP_USAGE_COUNTER+1))
+  HADOOP_OPTION_USAGE[${HADOOP_OPTION_USAGE_COUNTER}]="${option}@${text}"
+  ((HADOOP_OPTION_USAGE_COUNTER=HADOOP_OPTION_USAGE_COUNTER+1))
 }
 
-## @description  generate standard usage output
-## @description  and optionally takes a class
+## @description  Reset the usage information to blank
 ## @audience     private
 ## @stability    evolving
 ## @replaceable  no
-## @param        execname
-## @param        [true|false]
-function hadoop_generate_usage
+function hadoop_reset_usage
 {
-  local cmd=$1
-  local takesclass=$2
-  local i
-  local counter
-  local line
-  local option
-  local giventext
-  local maxoptsize
-  local foldsize=75
+  HADOOP_SUBCMD_USAGE=()
+  HADOOP_OPTION_USAGE=()
+  HADOOP_SUBCMD_USAGE_COUNTER=0
+  HADOOP_OPTION_USAGE_COUNTER=0
+}
+
+## @description  Print a screen-size aware two-column output
+## @audience     private
+## @stability    evolving
+## @replaceable  no
+## @param        array
+function hadoop_generic_columnprinter
+{
+  declare -a input=("$@")
+  declare -i i=0
+  declare -i counter=0
+  declare line
+  declare text
+  declare option
+  declare giventext
+  declare -i maxoptsize
+  declare -i foldsize
   declare -a tmpa
+  declare numcols
 
-  cmd=${cmd##*/}
+  if [[ -n "${COLUMNS}" ]]; then
+    numcols=${COLUMNS}
+  else
+    numcols=$(tput cols) 2>/dev/null
+  fi
 
-  echo "Usage: ${cmd} [OPTIONS] SUBCOMMAND [SUBCOMMAND OPTIONS]"
-  if [[ ${takesclass} = true ]]; then
-    echo " or    ${cmd} [OPTIONS] CLASSNAME [CLASSNAME OPTIONS]"
-    echo "  where CLASSNAME is a user-provided Java class"
+  if [[ -z "${numcols}"
+     || ! "${numcols}" =~ ^[0-9]+$ ]]; then
+    numcols=75
+  else
+    ((numcols=numcols-5))
   fi
-  echo ""
-  echo "  OPTIONS is none or any of:"
-  echo "     --config confdir"
-  echo "     --daemon (start|stop|status)"
-  echo "     --debug"
-  echo "     --hostnames list[,of,host,names]"
-  echo "     --hosts filename"
-  echo "     --loglevel loglevel"
-  echo "     --slaves"
-  echo ""
-  echo "  SUBCOMMAND is one of:"
-
-  counter=0
+
   while read -r line; do
     tmpa[${counter}]=${line}
     ((counter=counter+1))
@@ -102,12 +122,12 @@ function hadoop_generate_usage
     if [[ ${#option} -gt ${maxoptsize} ]]; then
       maxoptsize=${#option}
     fi
-  done < <(for i in "${HADOOP_USAGE[@]}"; do
-    echo "${i}"
+  done < <(for text in "${input[@]}"; do
+    echo "${text}"
   done | sort)
 
   i=0
-  ((foldsize=75-maxoptsize))
+  ((foldsize=numcols-maxoptsize))
 
   until [[ $i -eq ${#tmpa[@]} ]]; do
     option=$(echo "${tmpa[$i]}" | cut -f1 -d'@')
@@ -119,8 +139,63 @@ function hadoop_generate_usage
     done < <(echo "${giventext}"| fold -s -w ${foldsize})
     ((i=i+1))
   done
-  echo ""
-  echo "Most subcommands print help when invoked w/o parameters or with -h."
+}
+
+## @description  generate standard usage output
+## @description  and optionally takes a class
+## @audience     private
+## @stability    evolving
+## @replaceable  no
+## @param        execname
+## @param        true|false
+## @param        [text to use in place of SUBCOMMAND]
+function hadoop_generate_usage
+{
+  local cmd=$1
+  local takesclass=$2
+  local subcmdtext=${3:-"SUBCOMMAND"}
+  local haveoptions
+  local optstring
+  local havesubs
+  local subcmdstring
+
+  cmd=${cmd##*/}
+
+  if [[ -n "${HADOOP_OPTION_USAGE_COUNTER}"
+        && "${HADOOP_OPTION_USAGE_COUNTER}" -gt 0 ]]; then
+    haveoptions=true
+    optstring=" [OPTIONS]"
+  fi
+
+  if [[ -n "${HADOOP_SUBCMD_USAGE_COUNTER}"
+        && "${HADOOP_SUBCMD_USAGE_COUNTER}" -gt 0 ]]; then
+    havesubs=true
+    subcmdstring=" ${subcmdtext} [${subcmdtext} OPTIONS]"
+  fi
+
+  echo "Usage: ${cmd}${optstring}${subcmdstring}"
+  if [[ ${takesclass} = true ]]; then
+    echo " or    ${cmd}${optstring} CLASSNAME [CLASSNAME OPTIONS]"
+    echo "  where CLASSNAME is a user-provided Java class"
+  fi
+
+  if [[ "${haveoptions}" = true ]]; then
+    echo ""
+    echo "  OPTIONS is none or any of:"
+    echo ""
+
+    hadoop_generic_columnprinter "${HADOOP_OPTION_USAGE[@]}"
+  fi
+
+  if [[ "${havesubs}" = true ]]; then
+    echo ""
+    echo "  ${subcmdtext} is one of:"
+    echo ""
+
+    hadoop_generic_columnprinter "${HADOOP_SUBCMD_USAGE[@]}"
+    echo ""
+    echo "${subcmdtext} may print help when invoked w/o parameters or with -h."
+  fi
 }
 
 ## @description  Replace `oldvar` with `newvar` if `oldvar` exists.
@@ -189,7 +264,7 @@ function hadoop_bootstrap
   TOOL_PATH=${TOOL_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
 
   # usage output set to zero
-  HADOOP_USAGE_COUNTER=0
+  hadoop_reset_usage
 
   export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
 
@@ -1730,3 +1805,101 @@ function hadoop_do_classpath_subcommand
     exit 0
   fi
 }
+
+## @description  generic shell script opton parser.  sets
+## @description  HADOOP_PARSE_COUNTER to set number the
+## @description  caller should shift
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        [parameters, typically "$@"]
+function hadoop_parse_args
+{
+  HADOOP_DAEMON_MODE="default"
+  HADOOP_PARSE_COUNTER=0
+
+  # not all of the options supported here are supported by all commands
+  # however these are:
+  hadoop_add_option "--config dir" "Hadoop config directory"
+  hadoop_add_option "--debug" "turn on shell script debug mode"
+  hadoop_add_option "--help" "usage information"
+
+  while true; do
+    hadoop_debug "hadoop_parse_args: processing $1"
+    case $1 in
+      --buildpaths)
+        # shellcheck disable=SC2034
+        HADOOP_ENABLE_BUILD_PATHS=true
+        shift
+        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
+      ;;
+      --config)
+        shift
+        confdir=$1
+        shift
+        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
+        if [[ -d "${confdir}" ]]; then
+          # shellcheck disable=SC2034
+          HADOOP_CONF_DIR="${confdir}"
+        elif [[ -z "${confdir}" ]]; then
+          hadoop_error "ERROR: No parameter provided for --config "
+          hadoop_exit_with_usage 1
+        else
+          hadoop_error "ERROR: Cannot find configuration directory \"${confdir}\""
+          hadoop_exit_with_usage 1
+        fi
+      ;;
+      --daemon)
+        shift
+        HADOOP_DAEMON_MODE=$1
+        shift
+        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
+        if [[ -z "${HADOOP_DAEMON_MODE}" || \
+          ! "${HADOOP_DAEMON_MODE}" =~ ^st(art|op|atus)$ ]]; then
+          hadoop_error "ERROR: --daemon must be followed by either \"start\", \"stop\", or \"status\"."
+          hadoop_exit_with_usage 1
+        fi
+      ;;
+      --debug)
+        shift
+        # shellcheck disable=SC2034
+        HADOOP_SHELL_SCRIPT_DEBUG=true
+        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
+      ;;
+      --help|-help|-h|help|--h|--\?|-\?|\?)
+        hadoop_exit_with_usage 0
+      ;;
+      --hostnames)
+        shift
+        # shellcheck disable=SC2034
+        HADOOP_SLAVE_NAMES="$1"
+        shift
+        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
+      ;;
+      --hosts)
+        shift
+        hadoop_populate_slaves_file "$1"
+        shift
+        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
+      ;;
+      --loglevel)
+        shift
+        # shellcheck disable=SC2034
+        HADOOP_LOGLEVEL="$1"
+        shift
+        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
+      ;;
+      --slaves)
+        shift
+        # shellcheck disable=SC2034
+        HADOOP_SLAVE_MODE=true
+        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
+      ;;
+      *)
+        break
+      ;;
+    esac
+  done
+
+  hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}"
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-common-project/hadoop-common/src/main/bin/slaves.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/slaves.sh b/hadoop-common-project/hadoop-common/src/main/bin/slaves.sh
index a8f0660..2fdf18b 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/slaves.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/slaves.sh
@@ -27,7 +27,8 @@
 #   HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
 ##
 
-function hadoop_usage {
+function hadoop_usage
+{
   echo "Usage: slaves.sh [--config confdir] command..."
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
old mode 100644
new mode 100755
index 9228d2d..1191eb9
--- a/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
+++ b/hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh
@@ -13,28 +13,27 @@
 #  limitations under the License.
 #
 
-function hadoop_usage()
+MYNAME="${BASH_SOURCE-$0}"
+
+function hadoop_usage
 {
-  echo "Usage: kms.sh [--config confdir] [--debug] --daemon start|status|stop"
-  echo "       kms.sh [--config confdir] [--debug] COMMAND"
-  echo "            where COMMAND is one of:"
-  echo "  run               Start kms in the current window"
-  echo "  run -security     Start in the current window with security manager"
-  echo "  start             Start kms in a separate window"
-  echo "  start -security   Start in a separate window with security manager"
-  echo "  status            Return the LSB compliant status"
-  echo "  stop              Stop kms, waiting up to 5 seconds for the process to end"
-  echo "  stop n            Stop kms, waiting up to n seconds for the process to end"
-  echo "  stop -force       Stop kms, wait up to 5 seconds and then use kill -KILL if still running"
-  echo "  stop n -force     Stop kms, wait up to n seconds and then use kill -KILL if still running"
+  hadoop_add_subcommand "run" "Start kms in the current window"
+  hadoop_add_subcommand "run -security" "Start in the current window with security manager"
+  hadoop_add_subcommand "start" "Start kms in a separate window"
+  hadoop_add_subcommand "start -security" "Start in a separate window with security manager"
+  hadoop_add_subcommand "status" "Return the LSB compliant status"
+  hadoop_add_subcommand "stop" "Stop kms, waiting up to 5 seconds for the process to end"
+  hadoop_add_subcommand "top n" "Stop kms, waiting up to n seconds for the process to end"
+  hadoop_add_subcommand "stop -force" "Stop kms, wait up to 5 seconds and then use kill -KILL if still running"
+  hadoop_add_subcommand "stop n -force" "Stop kms, wait up to n seconds and then use kill -KILL if still running"
+  hadoop_generate_usage "${MYNAME}" false
 }
 
 # let's locate libexec...
 if [[ -n "${HADOOP_PREFIX}" ]]; then
   DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
 else
-  this="${BASH_SOURCE-$0}"
-  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
   DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
 fi
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
old mode 100644
new mode 100755
index f51a5e6..9b819aa
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
@@ -13,28 +13,27 @@
 #  limitations under the License.
 #
 
-function hadoop_usage()
+MYNAME="${BASH_SOURCE-$0}"
+
+function hadoop_usage
 {
-  echo "Usage: httpfs.sh [--config confdir] [--debug] --daemon start|status|stop"
-  echo "       httpfs.sh [--config confdir] [--debug] COMMAND"
-  echo "            where COMMAND is one of:"
-  echo "  run               Start httpfs in the current window"
-  echo "  run -security     Start in the current window with security manager"
-  echo "  start             Start httpfs in a separate window"
-  echo "  start -security   Start in a separate window with security manager"
-  echo "  status            Return the LSB compliant status"
-  echo "  stop              Stop httpfs, waiting up to 5 seconds for the process to end"
-  echo "  stop n            Stop httpfs, waiting up to n seconds for the process to end"
-  echo "  stop -force       Stop httpfs, wait up to 5 seconds and then use kill -KILL if still running"
-  echo "  stop n -force     Stop httpfs, wait up to n seconds and then use kill -KILL if still running"
+  hadoop_add_subcommand "run" "Start kms in the current window"
+  hadoop_add_subcommand "run -security" "Start in the current window with security manager"
+  hadoop_add_subcommand "start" "Start kms in a separate window"
+  hadoop_add_subcommand "start -security" "Start in a separate window with security manager"
+  hadoop_add_subcommand "status" "Return the LSB compliant status"
+  hadoop_add_subcommand "stop" "Stop kms, waiting up to 5 seconds for the process to end"
+  hadoop_add_subcommand "top n" "Stop kms, waiting up to n seconds for the process to end"
+  hadoop_add_subcommand "stop -force" "Stop kms, wait up to 5 seconds and then use kill -KILL if still running"
+  hadoop_add_subcommand "stop n -force" "Stop kms, wait up to n seconds and then use kill -KILL if still running"
+  hadoop_generate_usage "${MYNAME}" false
 }
 
 # let's locate libexec...
 if [[ -n "${HADOOP_PREFIX}" ]]; then
   DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
 else
-  this="${BASH_SOURCE-$0}"
-  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
   DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
 fi
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 23a08be..852b040 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -19,6 +19,13 @@ MYNAME="${BASH_SOURCE-$0}"
 
 function hadoop_usage
 {
+  hadoop_add_option "--buildpaths" "attempt to add class files from build tree"
+  hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon"
+  hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in slave mode"
+  hadoop_add_option "--loglevel level" "set the log4j level for this command"
+  hadoop_add_option "--hosts filename" "list of hosts to use in slave mode"
+  hadoop_add_option "--slaves" "turn on slave mode"
+
   hadoop_add_subcommand "balancer" "run a cluster balancing utility"
   hadoop_add_subcommand "cacheadmin" "configure the HDFS cache"
   hadoop_add_subcommand "classpath" "prints the class path needed to get the hadoop jar and the required libraries"
@@ -47,8 +54,7 @@ function hadoop_usage
   hadoop_add_subcommand "storagepolicies" "list/get/set block storage policies"
   hadoop_add_subcommand "version" "print the version"
   hadoop_add_subcommand "zkfc" "run the ZK Failover Controller daemon"
-  hadoop_generate_usage "${MYNAME}"
-
+  hadoop_generate_usage "${MYNAME}" false
 }
 
 # let's locate libexec...

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh
index 321f9c9..cbf6170 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-balancer.sh
@@ -15,13 +15,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+MYNAME="${BASH_SOURCE-$0}"
+
 function hadoop_usage
 {
-  echo "Usage: start-balancer.sh [--config confdir]  [-policy <policy>] [-threshold <threshold>]"
+  hadoop_add_option "--buildpaths" "attempt to add class files from build tree"
+  hadoop_add_option "--loglevel level" "set the log4j level for this command"
+
+  hadoop_add_option "-policy <policy>" "set the balancer's policy"
+  hadoop_add_option "-threshold <threshold>" "set the threshold for balancing"
+  hadoop_generate_usage "${MYNAME}" false
 }
 
-this="${BASH_SOURCE-$0}"
-bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
 
 # let's locate libexec...
 if [[ -n "${HADOOP_PREFIX}" ]]; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh
index da25d46..268cf90 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-balancer.sh
@@ -15,13 +15,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+MYNAME="${BASH_SOURCE-$0}"
+
 function hadoop_usage
 {
-  echo "Usage: stop-balancer.sh [--config confdir]"
+  hadoop_add_option "--buildpaths" "attempt to add class files from build tree"
+  hadoop_add_option "--loglevel level" "set the log4j level for this command"
+
+  hadoop_generate_usage "${MYNAME}" false
 }
 
-this="${BASH_SOURCE-$0}"
-bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
 
 # let's locate libexec...
 if [[ -n "${HADOOP_PREFIX}" ]]; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-mapreduce-project/bin/mapred
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index 8c16369..426af80 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -29,7 +29,7 @@ function hadoop_usage
   hadoop_add_subcommand "queue" "get information regarding JobQueues"
   hadoop_add_subcommand "sampler" "sampler"
   hadoop_add_subcommand "version" "print the version"
-  hadoop_generate_usage "${MYNAME}"
+  hadoop_generate_usage "${MYNAME}" true
 }
 
 bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh b/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
old mode 100644
new mode 100755
index ac18089..1172c60
--- a/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
@@ -16,13 +16,14 @@
 # limitations under the License.
 
 
+MYNAME="${BASH_SOURCE-$0}"
+
 function hadoop_usage
 {
-  echo "Usage: start-yarn.sh [--config confdir]"
+  hadoop_generate_usage "${MYNAME}" false
 }
 
-this="${BASH_SOURCE-$0}"
-bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
 
 # let's locate libexec...
 if [[ -n "${HADOOP_PREFIX}" ]]; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh b/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
old mode 100644
new mode 100755
index d85b44e..ffa4cfc
--- a/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
@@ -16,13 +16,14 @@
 # limitations under the License.
 
 
+MYNAME="${BASH_SOURCE-$0}"
+
 function hadoop_usage
 {
-  echo "Usage: stop-yarn.sh [--config confdir]"
+  hadoop_generate_usage "${MYNAME}" false
 }
 
-this="${BASH_SOURCE-$0}"
-bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
 
 # let's locate libexec...
 if [[ -n "${HADOOP_PREFIX}" ]]; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/666cafca/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 50607c8..f0bed9b 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -19,6 +19,13 @@ MYNAME="${BASH_SOURCE-$0}"
 
 function hadoop_usage
 {
+  hadoop_add_option "--buildpaths" "attempt to add class files from build tree"
+  hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon"
+  hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in slave mode"
+  hadoop_add_option "--loglevel level" "set the log4j level for this command"
+  hadoop_add_option "--hosts filename" "list of hosts to use in slave mode"
+  hadoop_add_option "--slaves" "turn on slave mode"
+
   hadoop_add_subcommand "application" "prints application(s) report/kill application"
   hadoop_add_subcommand "applicationattempt" "prints applicationattempt(s) report"
   hadoop_add_subcommand "classpath" "prints the class path needed to get the hadoop jar and the required libraries"


[03/10] hadoop git commit: YARN-433. When RM is catching up with node updates then it should not expire acquired containers. Contributed by Xuan Gong

Posted by aw...@apache.org.
YARN-433. When RM is catching up with node updates then it should not expire acquired containers. Contributed by Xuan Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab80e277
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab80e277
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab80e277

Branch: refs/heads/HADOOP-12111
Commit: ab80e277039a586f6d6259b2511ac413e29ea4f8
Parents: 2087eaf
Author: Zhihai Xu <zx...@apache.org>
Authored: Thu Jul 30 21:56:25 2015 -0700
Committer: Zhihai Xu <zx...@apache.org>
Committed: Thu Jul 30 21:57:11 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../rmcontainer/RMContainerImpl.java            | 27 +--------
 .../resourcemanager/rmnode/RMNodeImpl.java      |  8 +++
 .../resourcemanager/TestRMNodeTransitions.java  | 62 ++++++++++++++++----
 4 files changed, 63 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab80e277/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f027c29..1da2dbc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -716,6 +716,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3971. Skip RMNodeLabelsManager#checkRemoveFromClusterNodeLabelsOfQueue 
     on nodelabel recovery. (Bibin A Chundatt via wangda)
 
+    YARN-433. When RM is catching up with node updates then it should not expire
+    acquired containers. (Xuan Gong via zxu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab80e277/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index f7d3f56..940f76f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -99,9 +99,9 @@ public class RMContainerImpl implements RMContainer, Comparable<RMContainer> {
 
     // Transitions from ACQUIRED state
     .addTransition(RMContainerState.ACQUIRED, RMContainerState.RUNNING,
-        RMContainerEventType.LAUNCHED, new LaunchedTransition())
+        RMContainerEventType.LAUNCHED)
     .addTransition(RMContainerState.ACQUIRED, RMContainerState.COMPLETED,
-        RMContainerEventType.FINISHED, new ContainerFinishedAtAcquiredState())
+        RMContainerEventType.FINISHED, new FinishedTransition())
     .addTransition(RMContainerState.ACQUIRED, RMContainerState.RELEASED,
         RMContainerEventType.RELEASED, new KillTransition())
     .addTransition(RMContainerState.ACQUIRED, RMContainerState.EXPIRED,
@@ -486,16 +486,6 @@ public class RMContainerImpl implements RMContainer, Comparable<RMContainer> {
     }
   }
 
-  private static final class LaunchedTransition extends BaseTransition {
-
-    @Override
-    public void transition(RMContainerImpl container, RMContainerEvent event) {
-      // Unregister from containerAllocationExpirer.
-      container.containerAllocationExpirer.unregister(container
-          .getContainerId());
-    }
-  }
-
   private static final class ContainerRescheduledTransition extends
       FinishedTransition {
 
@@ -554,19 +544,6 @@ public class RMContainerImpl implements RMContainer, Comparable<RMContainer> {
     }
   }
 
-  private static final class ContainerFinishedAtAcquiredState extends
-      FinishedTransition {
-    @Override
-    public void transition(RMContainerImpl container, RMContainerEvent event) {
-      // Unregister from containerAllocationExpirer.
-      container.containerAllocationExpirer.unregister(container
-          .getContainerId());
-
-      // Inform AppAttempt
-      super.transition(container, event);
-    }
-  }
-
   private static final class KillTransition extends FinishedTransition {
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab80e277/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 09b9278..f182d02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent;
@@ -107,6 +108,7 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
   private long lastHealthReportTime;
   private String nodeManagerVersion;
 
+  private final ContainerAllocationExpirer containerAllocationExpirer;
   /* set of containers that have just launched */
   private final Set<ContainerId> launchedContainers =
     new HashSet<ContainerId>();
@@ -265,6 +267,8 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
     this.stateMachine = stateMachineFactory.make(this);
     
     this.nodeUpdateQueue = new ConcurrentLinkedQueue<UpdatedContainerInfo>();
+
+    this.containerAllocationExpirer = context.getContainerAllocationExpirer();
   }
 
   @Override
@@ -953,11 +957,15 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
           // Just launched container. RM knows about it the first time.
           launchedContainers.add(containerId);
           newlyLaunchedContainers.add(remoteContainer);
+          // Unregister from containerAllocationExpirer.
+          containerAllocationExpirer.unregister(containerId);
         }
       } else {
         // A finished container
         launchedContainers.remove(containerId);
         completedContainers.add(remoteContainer);
+        // Unregister from containerAllocationExpirer.
+        containerAllocationExpirer.unregister(containerId);
       }
     }
     if (newlyLaunchedContainers.size() != 0 || completedContainers.size() != 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab80e277/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
index ece896b..4964c59 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
@@ -31,6 +31,7 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.util.HostsFileReader;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
@@ -43,6 +44,7 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.InlineDispatcher;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
@@ -105,8 +107,9 @@ public class TestRMNodeTransitions {
     InlineDispatcher rmDispatcher = new InlineDispatcher();
     
     rmContext =
-        new RMContextImpl(rmDispatcher, null, null, null,
-            mock(DelegationTokenRenewer.class), null, null, null, null, null);
+        new RMContextImpl(rmDispatcher, mock(ContainerAllocationExpirer.class),
+          null, null, mock(DelegationTokenRenewer.class), null, null, null,
+          null, null);
     NodesListManager nodesListManager = mock(NodesListManager.class);
     HostsFileReader reader = mock(HostsFileReader.class);
     when(nodesListManager.getHostsReader()).thenReturn(reader);
@@ -147,7 +150,8 @@ public class TestRMNodeTransitions {
   public void tearDown() throws Exception {
   }
   
-  private RMNodeStatusEvent getMockRMNodeStatusEvent() {
+  private RMNodeStatusEvent getMockRMNodeStatusEvent(
+      List<ContainerStatus> containerStatus) {
     NodeHeartbeatResponse response = mock(NodeHeartbeatResponse.class);
 
     NodeHealthStatus healthStatus = mock(NodeHealthStatus.class);
@@ -158,6 +162,9 @@ public class TestRMNodeTransitions {
     doReturn(healthStatus).when(event).getNodeHealthStatus();
     doReturn(response).when(event).getLatestResponse();
     doReturn(RMNodeEventType.STATUS_UPDATE).when(event).getType();
+    if (containerStatus != null) {
+      doReturn(containerStatus).when(event).getContainers();
+    }
     return event;
   }
   
@@ -176,7 +183,7 @@ public class TestRMNodeTransitions {
     
     // Now verify that scheduler isn't notified of an expired container
     // by checking number of 'completedContainers' it got in the previous event
-    RMNodeStatusEvent statusEvent = getMockRMNodeStatusEvent();
+    RMNodeStatusEvent statusEvent = getMockRMNodeStatusEvent(null);
     ContainerStatus containerStatus = mock(ContainerStatus.class);
     doReturn(completedContainerId).when(containerStatus).getContainerId();
     doReturn(Collections.singletonList(containerStatus)).
@@ -207,11 +214,11 @@ public class TestRMNodeTransitions {
     ContainerId completedContainerIdFromNode2_2 = BuilderUtils.newContainerId(
         BuilderUtils.newApplicationAttemptId(
             BuilderUtils.newApplicationId(1, 1), 1), 2);
- 
-    RMNodeStatusEvent statusEventFromNode1 = getMockRMNodeStatusEvent();
-    RMNodeStatusEvent statusEventFromNode2_1 = getMockRMNodeStatusEvent();
-    RMNodeStatusEvent statusEventFromNode2_2 = getMockRMNodeStatusEvent();
-    
+
+    RMNodeStatusEvent statusEventFromNode1 = getMockRMNodeStatusEvent(null);
+    RMNodeStatusEvent statusEventFromNode2_1 = getMockRMNodeStatusEvent(null);
+    RMNodeStatusEvent statusEventFromNode2_2 = getMockRMNodeStatusEvent(null);
+
     ContainerStatus containerStatusFromNode1 = mock(ContainerStatus.class);
     ContainerStatus containerStatusFromNode2_1 = mock(ContainerStatus.class);
     ContainerStatus containerStatusFromNode2_2 = mock(ContainerStatus.class);
@@ -263,8 +270,8 @@ public class TestRMNodeTransitions {
         BuilderUtils.newApplicationAttemptId(
             BuilderUtils.newApplicationId(1, 1), 1), 1);
         
-    RMNodeStatusEvent statusEvent1 = getMockRMNodeStatusEvent();
-    RMNodeStatusEvent statusEvent2 = getMockRMNodeStatusEvent();
+    RMNodeStatusEvent statusEvent1 = getMockRMNodeStatusEvent(null);
+    RMNodeStatusEvent statusEvent2 = getMockRMNodeStatusEvent(null);
 
     ContainerStatus containerStatus1 = mock(ContainerStatus.class);
     ContainerStatus containerStatus2 = mock(ContainerStatus.class);
@@ -499,7 +506,7 @@ public class TestRMNodeTransitions {
 
     // Verify status update does not clear containers/apps to cleanup
     // but updating heartbeat response for cleanup does
-    RMNodeStatusEvent statusEvent = getMockRMNodeStatusEvent();
+    RMNodeStatusEvent statusEvent = getMockRMNodeStatusEvent(null);
     node.handle(statusEvent);
     Assert.assertEquals(1, node.getContainersToCleanUp().size());
     Assert.assertEquals(1, node.getAppsToCleanup().size());
@@ -706,4 +713,35 @@ public class TestRMNodeTransitions {
         null, null));
     Assert.assertEquals(nmVersion2, node.getNodeManagerVersion());
   }
+
+  @Test
+  public void testContainerExpire() throws Exception {
+    ContainerAllocationExpirer mockExpirer =
+        mock(ContainerAllocationExpirer.class);
+    ApplicationId appId =
+        ApplicationId.newInstance(System.currentTimeMillis(), 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    ContainerId containerId1 = ContainerId.newContainerId(appAttemptId, 1L);
+    ContainerId containerId2 = ContainerId.newContainerId(appAttemptId, 2L);
+    mockExpirer.register(containerId1);
+    mockExpirer.register(containerId2);
+    verify(mockExpirer).register(containerId1);
+    verify(mockExpirer).register(containerId2);
+    ((RMContextImpl) rmContext).setContainerAllocationExpirer(mockExpirer);
+    RMNodeImpl rmNode = getRunningNode();
+    ContainerStatus status1 =
+        ContainerStatus
+          .newInstance(containerId1, ContainerState.RUNNING, "", 0);
+    ContainerStatus status2 =
+        ContainerStatus.newInstance(containerId2, ContainerState.COMPLETE, "",
+          0);
+    List<ContainerStatus> statusList = new ArrayList<ContainerStatus>();
+    statusList.add(status1);
+    statusList.add(status2);
+    RMNodeStatusEvent statusEvent = getMockRMNodeStatusEvent(statusList);
+    rmNode.handle(statusEvent);
+    verify(mockExpirer).unregister(containerId1);
+    verify(mockExpirer).unregister(containerId2);
+  }
 }


[06/10] hadoop git commit: MAPREDUCE-6394. Speed up Task processing loop in HsTasksBlock#render(). Contributed by Ray Chiang

Posted by aw...@apache.org.
MAPREDUCE-6394. Speed up Task processing loop in HsTasksBlock#render(). Contributed by Ray Chiang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0e0ba80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0e0ba80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0e0ba80

Branch: refs/heads/HADOOP-12111
Commit: d0e0ba8010b72f58ddede5303f2b88404263d4bf
Parents: 32e490b
Author: Jason Lowe <jl...@apache.org>
Authored: Fri Jul 31 18:17:54 2015 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri Jul 31 18:17:54 2015 +0000

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |   3 +
 .../v2/api/records/TestTaskAttemptReport.java   | 131 +++++++++++++++++
 .../v2/api/records/TestTaskReport.java          | 139 +++++++++++++++++++
 .../v2/api/records/TaskAttemptReport.java       |   3 +
 .../mapreduce/v2/api/records/TaskReport.java    |   5 +-
 .../impl/pb/TaskAttemptReportPBImpl.java        |  38 ++++-
 .../api/records/impl/pb/TaskReportPBImpl.java   |  38 +++--
 .../hadoop/mapreduce/v2/hs/CompletedTask.java   |   2 +-
 .../mapreduce/v2/hs/CompletedTaskAttempt.java   |   2 +-
 9 files changed, 343 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0e0ba80/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 738dea5..2001c57 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -378,6 +378,9 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6376. Add avro binary support for jhist files (Ray Chiang via
     jlowe)
 
+    MAPREDUCE-6394. Speed up Task processing loop in HsTasksBlock#render()
+    (Ray Chiang via jlowe)
+
   BUG FIXES
 
     MAPREDUCE-6314. TestPipeApplication fails on trunk.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0e0ba80/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestTaskAttemptReport.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestTaskAttemptReport.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestTaskAttemptReport.java
new file mode 100644
index 0000000..cd7f758
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestTaskAttemptReport.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptReportPBImpl;
+import org.apache.hadoop.mapreduce.v2.app.MockJobs;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos;
+import org.apache.hadoop.yarn.util.Records;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestTaskAttemptReport {
+
+  @Test
+  public void testSetRawCounters() {
+    TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
+    org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
+    report.setRawCounters(rCounters);
+    Counters counters = report.getCounters();
+    assertNotEquals(null, counters);
+  }
+
+  @Test
+  public void testBuildImplicitRawCounters() {
+    TaskAttemptReportPBImpl report = new TaskAttemptReportPBImpl();
+    org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
+    report.setRawCounters(rCounters);
+    MRProtos.TaskAttemptReportProto protoVal = report.getProto();
+    Counters counters = report.getCounters();
+    assertTrue(protoVal.hasCounters());
+  }
+
+  @Test
+  public void testCountersOverRawCounters() {
+    TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
+    org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
+    Counters altCounters = TypeConverter.toYarn(rCounters);
+    report.setRawCounters(rCounters);
+    report.setCounters(altCounters);
+    Counters counters = report.getCounters();
+    assertNotEquals(null, counters);
+    assertNotEquals(rCounters, altCounters);
+    assertEquals(counters, altCounters);
+  }
+
+  @Test
+  public void testUninitializedCounters() {
+    // Create basic class
+    TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
+    // Verify properties initialized to null
+    assertEquals(null, report.getCounters());
+    assertEquals(null, report.getRawCounters());
+  }
+
+  @Test
+  public void testSetRawCountersToNull() {
+    // Create basic class
+    TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
+    // Set raw counters to null
+    report.setRawCounters(null);
+    // Verify properties still null
+    assertEquals(null, report.getCounters());
+    assertEquals(null, report.getRawCounters());
+
+  }
+
+  @Test
+  public void testSetCountersToNull() {
+    // Create basic class
+    TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
+    // Set raw counters to null
+    report.setCounters(null);
+    // Verify properties still null
+    assertEquals(null, report.getCounters());
+    assertEquals(null, report.getRawCounters());
+  }
+
+  @Test
+  public void testSetNonNullCountersToNull() {
+    // Create basic class
+    TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
+    // Set raw counters
+    org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
+    report.setRawCounters(rCounters);
+    // Verify getCounters converts properly from raw to real
+    Counters counters = report.getCounters();
+    assertNotEquals(null, counters);
+    // Clear counters to null and then verify
+    report.setCounters(null);
+    assertEquals(null, report.getCounters());
+    assertEquals(null, report.getRawCounters());
+  }
+
+  @Test
+  public void testSetNonNullRawCountersToNull() {
+    // Create basic class
+    TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
+    // Set raw counters
+    org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
+    report.setRawCounters(rCounters);
+    // Verify getCounters converts properly from raw to real
+    Counters counters = report.getCounters();
+    assertNotEquals(null, counters);
+    // Clear counters to null and then verify
+    report.setRawCounters(null);
+    assertEquals(null, report.getCounters());
+    assertEquals(null, report.getRawCounters());
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0e0ba80/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestTaskReport.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestTaskReport.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestTaskReport.java
new file mode 100644
index 0000000..6801661
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestTaskReport.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.api.records;
+
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskReportPBImpl;
+import org.apache.hadoop.mapreduce.v2.app.MockJobs;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos;
+import org.apache.hadoop.yarn.util.Records;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestTaskReport {
+
+  @Test
+  public void testSetRawCounters() {
+    // Create basic class
+    TaskReport report = Records.newRecord(TaskReport.class);
+    org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
+    // Set raw counters
+    report.setRawCounters(rCounters);
+    // Verify getCounters converts properly from raw to real
+    Counters counters = report.getCounters();
+    assertNotEquals(null, counters);
+  }
+
+  @Test
+  public void testBuildImplicitRawCounters() {
+    // Create basic class
+    TaskReportPBImpl report = new TaskReportPBImpl();
+    org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
+    // Set raw counters
+    report.setRawCounters(rCounters);
+    // Verify getProto method implicitly converts/sets real counters
+    MRProtos.TaskReportProto protoVal = report.getProto();
+    assertTrue(protoVal.hasCounters());
+  }
+
+  @Test
+  public void testCountersOverRawCounters() {
+    // Create basic class
+    TaskReport report = Records.newRecord(TaskReport.class);
+    org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
+    Counters altCounters = TypeConverter.toYarn(rCounters);
+    // Set raw counters
+    report.setRawCounters(rCounters);
+    // Set real counters
+    report.setCounters(altCounters);
+    // Verify real counters has priority over raw
+    Counters counters = report.getCounters();
+    assertNotEquals(null, counters);
+    assertNotEquals(rCounters, altCounters);
+    assertEquals(counters, altCounters);
+  }
+
+  @Test
+  public void testUninitializedCounters() {
+    // Create basic class
+    TaskReport report = Records.newRecord(TaskReport.class);
+    // Verify properties initialized to null
+    assertEquals(null, report.getCounters());
+    assertEquals(null, report.getRawCounters());
+  }
+
+  @Test
+  public void testSetRawCountersToNull() {
+    // Create basic class
+    TaskReport report = Records.newRecord(TaskReport.class);
+    // Set raw counters to null
+    report.setRawCounters(null);
+    // Verify properties still null
+    assertEquals(null, report.getCounters());
+    assertEquals(null, report.getRawCounters());
+
+  }
+
+  @Test
+  public void testSetCountersToNull() {
+    // Create basic class
+    TaskReport report = Records.newRecord(TaskReport.class);
+    // Set raw counters to null
+    report.setCounters(null);
+    // Verify properties still null
+    assertEquals(null, report.getCounters());
+    assertEquals(null, report.getRawCounters());
+  }
+
+  @Test
+  public void testSetNonNullCountersToNull() {
+    // Create basic class
+    TaskReport report = Records.newRecord(TaskReport.class);
+    // Set raw counters
+    org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
+    report.setRawCounters(rCounters);
+    // Verify getCounters converts properly from raw to real
+    Counters counters = report.getCounters();
+    assertNotEquals(null, counters);
+    // Clear counters to null and then verify
+    report.setCounters(null);
+    assertEquals(null, report.getCounters());
+    assertEquals(null, report.getRawCounters());
+  }
+
+  @Test
+  public void testSetNonNullRawCountersToNull() {
+    // Create basic class
+    TaskReport report = Records.newRecord(TaskReport.class);
+    // Set raw counters
+    org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
+    report.setRawCounters(rCounters);
+    // Verify getCounters converts properly from raw to real
+    Counters counters = report.getCounters();
+    assertNotEquals(null, counters);
+    // Clear counters to null and then verify
+    report.setRawCounters(null);
+    assertEquals(null, report.getCounters());
+    assertEquals(null, report.getRawCounters());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0e0ba80/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java
index bc0a4c6..810887b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java
@@ -31,6 +31,7 @@ public interface TaskAttemptReport {
   /** @return the sort/merge finish time. Applicable only for reduce attempts */
   public abstract long getSortFinishTime();
   public abstract Counters getCounters();
+  public abstract org.apache.hadoop.mapreduce.Counters getRawCounters();
   public abstract String getDiagnosticInfo();
   public abstract String getStateString();
   public abstract Phase getPhase();
@@ -45,6 +46,8 @@ public interface TaskAttemptReport {
   public abstract void setStartTime(long startTime);
   public abstract void setFinishTime(long finishTime);
   public abstract void setCounters(Counters counters);
+  public abstract void
+      setRawCounters(org.apache.hadoop.mapreduce.Counters rCounters);
   public abstract void setDiagnosticInfo(String diagnosticInfo);
   public abstract void setStateString(String stateString);
   public abstract void setPhase(Phase phase);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0e0ba80/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskReport.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskReport.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskReport.java
index 1444a53..b7300c6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskReport.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskReport.java
@@ -28,6 +28,7 @@ public interface TaskReport {
   public abstract long getStartTime();
   public abstract long getFinishTime();
   public abstract Counters getCounters();
+  public abstract org.apache.hadoop.mapreduce.Counters getRawCounters();
   public abstract List<TaskAttemptId> getRunningAttemptsList();
   public abstract TaskAttemptId getRunningAttempt(int index);
   public abstract int getRunningAttemptsCount();
@@ -46,7 +47,9 @@ public interface TaskReport {
   public abstract void setStartTime(long startTime);
   public abstract void setFinishTime(long finishTime);
   public abstract void setCounters(Counters counters);
-  
+  public abstract void
+      setRawCounters(org.apache.hadoop.mapreduce.Counters rCounters);
+
   public abstract void addAllRunningAttempts(List<TaskAttemptId> taskAttempts);
   public abstract void addRunningAttempt(TaskAttemptId taskAttempt);
   public abstract void removeRunningAttempt(int index);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0e0ba80/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java
index 96be84a..4677512 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
 
-
+import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.Phase;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
@@ -42,12 +42,12 @@ public class TaskAttemptReportPBImpl extends ProtoBase<TaskAttemptReportProto> i
   TaskAttemptReportProto proto = TaskAttemptReportProto.getDefaultInstance();
   TaskAttemptReportProto.Builder builder = null;
   boolean viaProto = false;
-  
+
   private TaskAttemptId taskAttemptId = null;
   private Counters counters = null;
+  private org.apache.hadoop.mapreduce.Counters rawCounters = null;
   private ContainerId containerId = null;
-  
-  
+
   public TaskAttemptReportPBImpl() {
     builder = TaskAttemptReportProto.newBuilder();
   }
@@ -68,6 +68,7 @@ public class TaskAttemptReportPBImpl extends ProtoBase<TaskAttemptReportProto> i
     if (this.taskAttemptId != null) {
       builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
     }
+    convertRawCountersToCounters();
     if (this.counters != null) {
       builder.setCounters(convertToProtoFormat(this.counters));
     }
@@ -90,11 +91,12 @@ public class TaskAttemptReportPBImpl extends ProtoBase<TaskAttemptReportProto> i
     }
     viaProto = false;
   }
-    
-  
+
+
   @Override
   public Counters getCounters() {
     TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+    convertRawCountersToCounters();
     if (this.counters != null) {
       return this.counters;
     }
@@ -108,10 +110,32 @@ public class TaskAttemptReportPBImpl extends ProtoBase<TaskAttemptReportProto> i
   @Override
   public void setCounters(Counters counters) {
     maybeInitBuilder();
-    if (counters == null) 
+    if (counters == null) {
       builder.clearCounters();
+    }
     this.counters = counters;
+    this.rawCounters = null;
   }
+
+  @Override
+  public org.apache.hadoop.mapreduce.Counters
+        getRawCounters() {
+    return this.rawCounters;
+  }
+
+  @Override
+  public void setRawCounters(org.apache.hadoop.mapreduce.Counters rCounters) {
+    setCounters(null);
+    this.rawCounters = rCounters;
+  }
+
+  private void convertRawCountersToCounters() {
+    if (this.counters == null && this.rawCounters != null) {
+      this.counters = TypeConverter.toYarn(rawCounters);
+      this.rawCounters = null;
+    }
+  }
+
   @Override
   public long getStartTime() {
     TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0e0ba80/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskReportPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskReportPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskReportPBImpl.java
index ba1245c..638bb66 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskReportPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskReportPBImpl.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
@@ -37,21 +38,19 @@ import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskStateProto;
 import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
 
-
-    
 public class TaskReportPBImpl extends ProtoBase<TaskReportProto> implements TaskReport {
   TaskReportProto proto = TaskReportProto.getDefaultInstance();
   TaskReportProto.Builder builder = null;
   boolean viaProto = false;
-  
+
   private TaskId taskId = null;
   private Counters counters = null;
+  private org.apache.hadoop.mapreduce.Counters rawCounters = null;
   private List<TaskAttemptId> runningAttempts = null;
   private TaskAttemptId successfulAttemptId = null;
   private List<String> diagnostics = null;
   private String status;
-  
-  
+
   public TaskReportPBImpl() {
     builder = TaskReportProto.newBuilder();
   }
@@ -72,6 +71,7 @@ public class TaskReportPBImpl extends ProtoBase<TaskReportProto> implements Task
     if (this.taskId != null) {
       builder.setTaskId(convertToProtoFormat(this.taskId));
     }
+    convertRawCountersToCounters();
     if (this.counters != null) {
       builder.setCounters(convertToProtoFormat(this.counters));
     }
@@ -100,11 +100,11 @@ public class TaskReportPBImpl extends ProtoBase<TaskReportProto> implements Task
     }
     viaProto = false;
   }
-    
-  
+
   @Override
   public Counters getCounters() {
     TaskReportProtoOrBuilder p = viaProto ? proto : builder;
+    convertRawCountersToCounters();
     if (this.counters != null) {
       return this.counters;
     }
@@ -118,10 +118,32 @@ public class TaskReportPBImpl extends ProtoBase<TaskReportProto> implements Task
   @Override
   public void setCounters(Counters counters) {
     maybeInitBuilder();
-    if (counters == null) 
+    if (counters == null) {
       builder.clearCounters();
+    }
     this.counters = counters;
+    this.rawCounters = null;
   }
+
+  @Override
+  public org.apache.hadoop.mapreduce.Counters
+      getRawCounters() {
+    return this.rawCounters;
+  }
+
+  @Override
+  public void setRawCounters(org.apache.hadoop.mapreduce.Counters rCounters) {
+    setCounters(null);
+    this.rawCounters = rCounters;
+  }
+
+  private void convertRawCountersToCounters() {
+    if (this.counters == null && this.rawCounters != null) {
+      this.counters = TypeConverter.toYarn(rawCounters);
+      this.rawCounters = null;
+    }
+  }
+
   @Override
   public long getStartTime() {
     TaskReportProtoOrBuilder p = viaProto ? proto : builder;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0e0ba80/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
index 8469b27..81fddaf 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
@@ -135,7 +135,7 @@ public class CompletedTask implements Task {
     if (counters == null) {
       counters = EMPTY_COUNTERS;
     }
-    report.setCounters(TypeConverter.toYarn(counters));
+    report.setRawCounters(counters);
     if (successfulAttempt != null) {
       report.setSuccessfulAttempt(successfulAttempt);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0e0ba80/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
index 0aa2e0b..c87d82b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
@@ -170,7 +170,7 @@ public class CompletedTaskAttempt implements TaskAttempt {
     }
     // report.setPhase(attemptInfo.get); //TODO
     report.setStateString(attemptInfo.getState());
-    report.setCounters(TypeConverter.toYarn(getCounters()));
+    report.setRawCounters(getCounters());
     report.setContainerId(attemptInfo.getContainerId());
     if (attemptInfo.getHostname() == null) {
       report.setNodeManagerHost("UNKNOWN");


[05/10] hadoop git commit: YARN-3990. AsyncDispatcher may overloaded with RMAppNodeUpdateEvent when Node is connected/disconnected. Contributed by Bibin A Chundatt

Posted by aw...@apache.org.
YARN-3990. AsyncDispatcher may overloaded with RMAppNodeUpdateEvent when Node is connected/disconnected. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32e490b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32e490b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32e490b6

Branch: refs/heads/HADOOP-12111
Commit: 32e490b6c035487e99df30ce80366446fe09bd6c
Parents: 93d50b7
Author: Jason Lowe <jl...@apache.org>
Authored: Fri Jul 31 17:37:24 2015 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri Jul 31 17:37:24 2015 +0000

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../resourcemanager/NodesListManager.java       |  28 ++--
 .../rmapp/TestNodesListManager.java             | 162 +++++++++++++++++++
 3 files changed, 181 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32e490b6/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1da2dbc..61b3cce 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -760,6 +760,9 @@ Release 2.7.2 - UNRELEASED
     YARN-3925. ContainerLogsUtils#getContainerLogFile fails to read container
     log files from full disks. (zhihai xu via jlowe)
 
+    YARN-3990. AsyncDispatcher may overloaded with RMAppNodeUpdateEvent when
+    Node is connected/disconnected (Bibin A Chundatt via jlowe)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32e490b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
index 1ad74bf..b9c76fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java
@@ -178,12 +178,14 @@ public class NodesListManager extends AbstractService implements
       LOG.debug(eventNode + " reported unusable");
       unusableRMNodesConcurrentSet.add(eventNode);
       for(RMApp app: rmContext.getRMApps().values()) {
-        this.rmContext
-            .getDispatcher()
-            .getEventHandler()
-            .handle(
-                new RMAppNodeUpdateEvent(app.getApplicationId(), eventNode,
-                    RMAppNodeUpdateType.NODE_UNUSABLE));
+        if (!app.isAppFinalStateStored()) {
+          this.rmContext
+              .getDispatcher()
+              .getEventHandler()
+              .handle(
+                  new RMAppNodeUpdateEvent(app.getApplicationId(), eventNode,
+                      RMAppNodeUpdateType.NODE_UNUSABLE));
+        }
       }
       break;
     case NODE_USABLE:
@@ -192,12 +194,14 @@ public class NodesListManager extends AbstractService implements
         unusableRMNodesConcurrentSet.remove(eventNode);
       }
       for (RMApp app : rmContext.getRMApps().values()) {
-        this.rmContext
-            .getDispatcher()
-            .getEventHandler()
-            .handle(
-                new RMAppNodeUpdateEvent(app.getApplicationId(), eventNode,
-                    RMAppNodeUpdateType.NODE_USABLE));
+        if (!app.isAppFinalStateStored()) {
+          this.rmContext
+              .getDispatcher()
+              .getEventHandler()
+              .handle(
+                  new RMAppNodeUpdateEvent(app.getApplicationId(), eventNode,
+                      RMAppNodeUpdateType.NODE_USABLE));
+        }
       }
       break;
     default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32e490b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
new file mode 100644
index 0000000..5330976
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
+
+import java.util.ArrayList;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AbstractEvent;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.NodesListManager;
+import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.NodesListManagerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.ArgumentMatcher;
+
+public class TestNodesListManager {
+  // To hold list of application for which event was received
+  ArrayList<ApplicationId> applist = new ArrayList<ApplicationId>();
+
+  @Test(timeout = 300000)
+  public void testNodeUsableEvent() throws Exception {
+    Logger rootLogger = LogManager.getRootLogger();
+    rootLogger.setLevel(Level.DEBUG);
+    final Dispatcher dispatcher = getDispatcher();
+    YarnConfiguration conf = new YarnConfiguration();
+    MockRM rm = new MockRM(conf) {
+      @Override
+      protected Dispatcher createDispatcher() {
+        return dispatcher;
+      }
+    };
+    rm.start();
+    MockNM nm1 = rm.registerNode("h1:1234", 28000);
+    NodesListManager nodesListManager = rm.getNodesListManager();
+    Resource clusterResource = Resource.newInstance(28000, 8);
+    RMNode rmnode = MockNodes.newNodeInfo(1, clusterResource);
+
+    // Create killing APP
+    RMApp killrmApp = rm.submitApp(200);
+    rm.killApp(killrmApp.getApplicationId());
+    rm.waitForState(killrmApp.getApplicationId(), RMAppState.KILLED);
+
+    // Create finish APP
+    RMApp finshrmApp = rm.submitApp(2000);
+    nm1.nodeHeartbeat(true);
+    RMAppAttempt attempt = finshrmApp.getCurrentAppAttempt();
+    MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
+    am.registerAppAttempt();
+    am.unregisterAppAttempt();
+    nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
+    am.waitForState(RMAppAttemptState.FINISHED);
+
+    // Create submitted App
+    RMApp subrmApp = rm.submitApp(200);
+
+    // Fire Event for NODE_USABLE
+    nodesListManager.handle(new NodesListManagerEvent(
+        NodesListManagerEventType.NODE_USABLE, rmnode));
+    if (applist.size() > 0) {
+      Assert.assertTrue(
+          "Event based on running app expected " + subrmApp.getApplicationId(),
+          applist.contains(subrmApp.getApplicationId()));
+      Assert.assertFalse(
+          "Event based on finish app not expected "
+              + finshrmApp.getApplicationId(),
+          applist.contains(finshrmApp.getApplicationId()));
+      Assert.assertFalse(
+          "Event based on killed app not expected "
+              + killrmApp.getApplicationId(),
+          applist.contains(killrmApp.getApplicationId()));
+    } else {
+      Assert.fail("Events received should have beeen more than 1");
+    }
+    applist.clear();
+
+    // Fire Event for NODE_UNUSABLE
+    nodesListManager.handle(new NodesListManagerEvent(
+        NodesListManagerEventType.NODE_UNUSABLE, rmnode));
+    if (applist.size() > 0) {
+      Assert.assertTrue(
+          "Event based on running app expected " + subrmApp.getApplicationId(),
+          applist.contains(subrmApp.getApplicationId()));
+      Assert.assertFalse(
+          "Event based on finish app not expected "
+              + finshrmApp.getApplicationId(),
+          applist.contains(finshrmApp.getApplicationId()));
+      Assert.assertFalse(
+          "Event based on killed app not expected "
+              + killrmApp.getApplicationId(),
+          applist.contains(killrmApp.getApplicationId()));
+    } else {
+      Assert.fail("Events received should have beeen more than 1");
+    }
+
+  }
+
+  /*
+   * Create dispatcher object
+   */
+  private Dispatcher getDispatcher() {
+    Dispatcher dispatcher = new AsyncDispatcher() {
+      @SuppressWarnings({ "rawtypes", "unchecked" })
+      @Override
+      public EventHandler getEventHandler() {
+
+        class EventArgMatcher extends ArgumentMatcher<AbstractEvent> {
+          @Override
+          public boolean matches(Object argument) {
+            if (argument instanceof RMAppNodeUpdateEvent) {
+              ApplicationId appid =
+                  ((RMAppNodeUpdateEvent) argument).getApplicationId();
+              applist.add(appid);
+            }
+            return false;
+          }
+        }
+
+        EventHandler handler = spy(super.getEventHandler());
+        doNothing().when(handler).handle(argThat(new EventArgMatcher()));
+        return handler;
+      }
+    };
+    return dispatcher;
+  }
+
+}