You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/12/01 21:21:36 UTC

[2/2] ambari git commit: AMBARI-8498. Run HDFS as non-root (aonishuk)

AMBARI-8498. Run HDFS as non-root  (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/716b9b78
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/716b9b78
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/716b9b78

Branch: refs/heads/trunk
Commit: 716b9b783134a0fc8cb770b78277b8c9938111a5
Parents: 3506e4e
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Dec 1 22:21:26 2014 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Dec 1 22:21:26 2014 +0200

----------------------------------------------------------------------
 .../core/providers/system.py                    |  9 --
 .../python/resource_management/core/shell.py    | 12 ++-
 .../libraries/functions/check_process_status.py | 13 ++-
 .../libraries/providers/hdfs_directory.py       |  3 +-
 .../scripts/shared_initialization.py            | 16 +++-
 .../hooks/before-START/files/checkForFormat.sh  |  6 +-
 .../scripts/shared_initialization.py            | 21 +++--
 .../HDFS/package/files/checkForFormat.sh        |  6 +-
 .../HDFS/package/scripts/hdfs_namenode.py       | 11 ++-
 .../services/HDFS/package/scripts/params.py     |  2 +-
 .../HDFS/package/scripts/service_check.py       | 16 ++--
 .../services/HDFS/package/scripts/utils.py      |  4 +-
 .../scripts/shared_initialization.py            | 16 +++-
 .../hooks/before-START/files/checkForFormat.sh  |  7 +-
 .../scripts/shared_initialization.py            | 22 +++--
 .../HDFS/package/files/checkForFormat.sh        |  6 +-
 .../HDFS/package/scripts/hdfs_namenode.py       | 13 ++-
 .../services/HDFS/package/scripts/namenode.py   |  1 -
 .../services/HDFS/package/scripts/params.py     |  2 +-
 .../HDFS/package/scripts/service_check.py       | 16 ++--
 .../services/HDFS/package/scripts/utils.py      | 23 +++--
 .../python/stacks/1.3.2/HDFS/test_datanode.py   | 26 +++---
 .../python/stacks/1.3.2/HDFS/test_namenode.py   | 48 ++++++-----
 .../stacks/1.3.2/HDFS/test_service_check.py     |  4 +-
 .../python/stacks/1.3.2/HDFS/test_snamenode.py  | 28 +++---
 .../hooks/before-INSTALL/test_before_install.py |  8 +-
 .../hooks/before-START/test_before_start.py     | 16 +++-
 .../python/stacks/2.0.6/HDFS/test_datanode.py   | 58 +++++++------
 .../stacks/2.0.6/HDFS/test_journalnode.py       | 28 +++---
 .../python/stacks/2.0.6/HDFS/test_namenode.py   | 90 ++++++++++++--------
 .../stacks/2.0.6/HDFS/test_service_check.py     |  2 +-
 .../python/stacks/2.0.6/HDFS/test_snamenode.py  | 28 +++---
 .../test/python/stacks/2.0.6/HDFS/test_zkfc.py  | 28 +++---
 .../hooks/before-INSTALL/test_before_install.py |  8 +-
 .../hooks/before-START/test_before_start.py     | 16 +++-
 35 files changed, 364 insertions(+), 249 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-common/src/main/python/resource_management/core/providers/system.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/system.py b/ambari-common/src/main/python/resource_management/core/providers/system.py
index db58898..e9121a3 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/system.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/system.py
@@ -228,15 +228,6 @@ class ExecuteProvider(Provider):
     Logger.debug("Executing %s" % self.resource)
 
     env = self.resource.environment
-    
-    # append current PATH, to self.resource.environment['PATH'] and self.resource.path
-    if 'PATH' in env:
-      env['PATH'] = os.pathsep.join([os.environ['PATH'], env['PATH']])
-    if self.resource.path:
-      if not 'PATH' in env:
-        env['PATH'] = ''
-      path = os.pathsep.join(self.resource.path) if isinstance(self.resource.path, (list, tuple)) else self.resource.path
-      env['PATH'] = os.pathsep.join([os.environ['PATH'], path])
           
     for i in range (0, self.resource.tries):
       try:

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-common/src/main/python/resource_management/core/shell.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/shell.py b/ambari-common/src/main/python/resource_management/core/shell.py
index 7dbdc79..d7a1fb4 100644
--- a/ambari-common/src/main/python/resource_management/core/shell.py
+++ b/ambari-common/src/main/python/resource_management/core/shell.py
@@ -67,6 +67,16 @@ def _call(command, logoutput=False, throw_on_failure=True,
     # In case of need to create more complicated commands with sudo use as_sudo(command) function.
     err_msg = Logger.get_protected_text(("String command '%s' cannot be run as sudo. Please supply the command as a tuple of arguments") % (command))
     raise Fail(err_msg)
+  
+  
+  # append current PATH, to env['PATH'] and path
+  if 'PATH' in env:
+    env['PATH'] = os.pathsep.join([os.environ['PATH'], env['PATH']])
+  if path:
+    if not 'PATH' in env:
+      env['PATH'] = ''
+    path = os.pathsep.join(path) if isinstance(path, (list, tuple)) else path
+    env['PATH'] = os.pathsep.join([os.environ['PATH'], path])
 
   # In case we will use sudo, we have to put all the environment inside the command, 
   # since Popen environment gets reset within sudo.
@@ -78,7 +88,7 @@ def _call(command, logoutput=False, throw_on_failure=True,
   if user:
     # Outter environment gets reset within su. That's why we can't use environment passed to Popen.
     su_export_command = "export {0} ; ".format(environment_str) if environment_str else ""
-    subprocess_command = ["/usr/bin/sudo","-Hi","su", "-", user, "-s", "/bin/bash", "-c", su_export_command + bash_run_command]
+    subprocess_command = ["/usr/bin/sudo","-Hi","su", user, "-", "-s", "/bin/bash", "-c", su_export_command + bash_run_command]
   else:
     subprocess_command = ["/bin/bash","--login","-c", bash_run_command]
     

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-common/src/main/python/resource_management/libraries/functions/check_process_status.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/check_process_status.py b/ambari-common/src/main/python/resource_management/libraries/functions/check_process_status.py
index 7fdecdc..fd6b900 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/check_process_status.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/check_process_status.py
@@ -22,6 +22,7 @@ Ambari Agent
 
 from resource_management.core.exceptions import ComponentIsNotRunning
 from resource_management.core.logger import Logger
+from resource_management.core import shell
 __all__ = ["check_process_status"]
 
 import os
@@ -43,14 +44,10 @@ def check_process_status(pid_file):
     except:
       Logger.debug("Pid file {0} does not exist".format(pid_file))
       raise ComponentIsNotRunning()
-    try:
-      # Kill will not actually kill the process
-      # From the doc:
-      # If sig is 0, then no signal is sent, but error checking is still
-      # performed; this can be used to check for the existence of a
-      # process ID or process group ID.
-      os.kill(pid, 0)
-    except OSError:
+
+    code, out = shell.call(["ps","-p", str(pid)])
+    
+    if code:
       Logger.debug("Process with pid {0} is not running. Stale pid file"
                 " at {1}".format(pid, pid_file))
       raise ComponentIsNotRunning()

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py
index e09308d..9f87ccf 100644
--- a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py
+++ b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py
@@ -104,8 +104,7 @@ class HdfsDirectoryProvider(Provider):
                    chown_cmd=' && '.join(chown_commands)),
             user=hdp_hdfs_user,
             path=bin_dir,
-            not_if=format("sudo -Hsu {hdp_hdfs_user} <<< "
-                          "'export PATH=$PATH:{bin_dir} ; hadoop --config {hdp_conf_dir} fs -ls {dir_list_str}'")
+            not_if=as_user("hadoop --config {hdp_conf_dir} fs -ls {dir_list_str}", hdp_hdfs_user)
     )
 
     directories_list[:] = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
index 27993cd..cfebedf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -30,6 +30,7 @@ def setup_java():
   jdk_curl_target = format("{artifact_dir}/{jdk_name}")
   java_dir = os.path.dirname(params.java_home)
   java_exec = format("{java_home}/bin/java")
+  tmp_java_dir = format("{tmp_dir}/jdk")
 
   if not params.jdk_name:
     return
@@ -45,12 +46,21 @@ def setup_java():
           environment = environment)
 
   if params.jdk_name.endswith(".bin"):
-    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
+    chmod_cmd = ("chmod", "+x", jdk_curl_target)
+    install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && sudo cp -r {tmp_java_dir}/* {java_dir}")
   elif params.jdk_name.endswith(".gz"):
-    install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
+    chmod_cmd = ("chmod","a+x", java_dir)
+    install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && tar -xf {jdk_curl_target} && sudo cp -r {tmp_java_dir}/* {java_dir}")
+
+  Directory(java_dir
+  )
+  
+  Execute(chmod_cmd,
+          not_if = format("test -e {java_exec}"),
+          sudo = True    
+  )
 
   Execute(install_cmd,
-          path = ["/bin","/usr/bin/"],
           not_if = format("test -e {java_exec}")
   )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/checkForFormat.sh
index 773510e..c5af7de 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/checkForFormat.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/files/checkForFormat.sh
@@ -34,8 +34,8 @@ export list_of_non_empty_dirs=""
 
 mark_file=/var/run/hadoop/hdfs/namenode-formatted
 if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
+  sudo rm -f ${mark_file}
+  sudo mkdir -p ${mark_dir}
 fi
 
 if [[ ! -d $mark_dir ]] ; then
@@ -50,7 +50,7 @@ if [[ ! -d $mark_dir ]] ; then
   done
 
   if [[ $EXIT_CODE == 0 ]] ; then
-    su -s /bin/bash - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+    sudo su ${hdfs_user} - -s /bin/bash -c "yes Y | hadoop --config ${conf_dir} ${command}"
     (( EXIT_CODE = $EXIT_CODE | $? ))
   else
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
index 172abb1..8f8078f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
@@ -166,15 +166,24 @@ def install_snappy():
   so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
   so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
   if params.has_namenode:
-    Execute(
-      format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
-    Execute(
-      format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))
+    Directory([so_target_dir_x86, so_target_dir_x64],
+              recursive=True,
+    )    
+    Link(so_target_x86,
+         to=so_src_x86,
+    )
+    Link(so_target_x64,
+         to=so_src_x64,
+    )
 
 
 def create_javahome_symlink():
   if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
-    Execute("mkdir -p /usr/jdk64/")
-    Execute("ln -s /usr/jdk/jdk1.6.0_31 /usr/jdk64/jdk1.6.0_31")
+    Directory("/usr/jdk64/",
+         recursive=True,
+    )
+    Link("/usr/jdk/jdk1.6.0_31",
+         to="/usr/jdk64/jdk1.6.0_31",
+    )
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkForFormat.sh
index 773510e..c5af7de 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkForFormat.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/files/checkForFormat.sh
@@ -34,8 +34,8 @@ export list_of_non_empty_dirs=""
 
 mark_file=/var/run/hadoop/hdfs/namenode-formatted
 if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
+  sudo rm -f ${mark_file}
+  sudo mkdir -p ${mark_dir}
 fi
 
 if [[ ! -d $mark_dir ]] ; then
@@ -50,7 +50,7 @@ if [[ ! -d $mark_dir ]] ; then
   done
 
   if [[ $EXIT_CODE == 0 ]] ; then
-    su -s /bin/bash - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+    sudo su ${hdfs_user} - -s /bin/bash -c "yes Y | hadoop --config ${conf_dir} ${command}"
     (( EXIT_CODE = $EXIT_CODE | $? ))
   else
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_namenode.py
index e0ff40e..5a023eb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/hdfs_namenode.py
@@ -37,13 +37,14 @@ def namenode(action=None, do_format=True):
       create_log_dir=True
     )
 
-    namenode_safe_mode_off = format("su -s /bin/bash - {hdfs_user} -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'")
+    namenode_safe_mode_off = format("hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'")
     if params.security_enabled:
       Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
               user = params.hdfs_user)
     Execute(namenode_safe_mode_off,
             tries=40,
-            try_sleep=10
+            try_sleep=10,
+            user=params.hdfs_user
     )
     create_hdfs_directories()
 
@@ -99,11 +100,13 @@ def format_namenode(force=None):
            content=StaticFile("checkForFormat.sh"),
            mode=0755)
       Execute(format(
-        "sh {tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
+        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
         "{dfs_name_dir}"),
               not_if=format("test -d {mark_dir}"),
               path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
-    Execute(format("mkdir -p {mark_dir}"))
+      Directory(mark_dir,
+        recursive = True
+      )
 
 
 def decommission():

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
index a73d711..ae0a05d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
@@ -25,7 +25,7 @@ import itertools
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
-ulimit_cmd = "ulimit -c unlimited; "
+ulimit_cmd = "ulimit -c unlimited && "
 
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/service_check.py
index 9bc681a..be869fd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/service_check.py
@@ -32,7 +32,7 @@ class HdfsServiceCheck(Script):
     safemode_command = "dfsadmin -safemode get | grep OFF"
 
     create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod 777 {dir}")
-    test_dir_exists = format("su -s /bin/bash - {smoke_user} -c 'hadoop fs -test -e {dir}'")
+    test_dir_exists = as_user(format("hadoop fs -test -e {dir}"), params.smoke_user)
     cleanup_cmd = format("fs -rm {tmp_file}")
     #cleanup put below to handle retries; if retrying there wil be a stale file
     #that needs cleanup; exit code is fn of second command
@@ -40,9 +40,9 @@ class HdfsServiceCheck(Script):
       "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
     test_cmd = format("fs -test -e {tmp_file}")
     if params.security_enabled:
-      Execute(format(
-        "su -s /bin/bash - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
-        "{smoke_user}'"))
+      Execute(format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_user}"),
+              user=params.smoke_user,
+      )
     ExecuteHadoop(safemode_command,
                   user=params.smoke_user,
                   logoutput=True,
@@ -74,20 +74,18 @@ class HdfsServiceCheck(Script):
     )
     if params.has_journalnode_hosts:
       journalnode_port = params.journalnode_port
-      smoke_test_user = params.smoke_user
       checkWebUIFileName = "checkWebUI.py"
       checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
       comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-      checkWebUICmd = format(
-        "su -s /bin/bash - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
-        "{comma_sep_jn_hosts} -p {journalnode_port}'")
+      checkWebUICmd = format("python {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port}")
       File(checkWebUIFilePath,
            content=StaticFile(checkWebUIFileName))
 
       Execute(checkWebUICmd,
               logoutput=True,
               try_sleep=3,
-              tries=5
+              tries=5,
+              user=params.smoke_user
       )
 
     if params.has_zkfc_hosts:

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/utils.py
index f5f5cde..a5a5ec4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/utils.py
@@ -49,15 +49,17 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
       pid_file = format(
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
 
-  daemon_cmd = format("{ulimit_cmd} su -s /bin/bash - {user} -c '{cmd} {action} {name}'")
+  daemon_cmd = format("{ulimit_cmd} {cmd} {action} {name}")
 
   service_is_up = check_process if action == "start" else None
   #remove pid file from dead process
   File(pid_file,
        action="delete",
        not_if=check_process,
+       
   )
   Execute(daemon_cmd,
+          user=user,
           not_if=service_is_up
   )
   if action == "stop":

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
index b3119ea..3cfb0b7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -30,6 +30,7 @@ def setup_java():
   jdk_curl_target = format("{artifact_dir}/{jdk_name}")
   java_dir = os.path.dirname(params.java_home)
   java_exec = format("{java_home}/bin/java")
+  tmp_java_dir = format("{tmp_dir}/jdk")
 
   if not params.jdk_name:
     return
@@ -46,12 +47,21 @@ def setup_java():
           environment = environment)
 
   if params.jdk_name.endswith(".bin"):
-    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
+    chmod_cmd = ("chmod", "+x", jdk_curl_target)
+    install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && sudo cp -r {tmp_java_dir}/* {java_dir}")
   elif params.jdk_name.endswith(".gz"):
-    install_cmd = format("mkdir -p {java_dir} ; chmod a+x {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
+    chmod_cmd = ("chmod","a+x", java_dir)
+    install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && tar -xf {jdk_curl_target} && sudo cp -r {tmp_java_dir}/* {java_dir}")
+
+  Directory(java_dir
+  )
+  
+  Execute(chmod_cmd,
+          not_if = format("test -e {java_exec}"),
+          sudo = True    
+  )
 
   Execute(install_cmd,
-          path = ["/bin","/usr/bin/"],
           not_if = format("test -e {java_exec}")
   )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
index 814cbc3..eea9847 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
@@ -36,8 +36,8 @@ export list_of_non_empty_dirs=""
 
 mark_file=/var/run/hadoop/hdfs/namenode-formatted
 if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
+  sudo rm -f ${mark_file}
+  sudo mkdir -p ${mark_dir}
 fi
 
 if [[ ! -d $mark_dir ]] ; then
@@ -52,8 +52,7 @@ if [[ ! -d $mark_dir ]] ; then
   done
 
   if [[ $EXIT_CODE == 0 ]] ; then
-    export PATH=$PATH:$bin_dir
-    su -s /bin/bash - ${hdfs_user} -c "yes Y | hdfs --config ${conf_dir} ${command}"
+    sudo su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
     (( EXIT_CODE = $EXIT_CODE | $? ))
   else
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
index 6fda3f7..655c731 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
@@ -165,14 +165,22 @@ def install_snappy():
   so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
   so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
   if params.has_namenode:
-    Execute(
-      format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
-    Execute(
-      format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))
-
+    Directory([so_target_dir_x86, so_target_dir_x64],
+              recursive=True,
+    )    
+    Link(so_target_x86,
+         to=so_src_x86,
+    )
+    Link(so_target_x64,
+         to=so_src_x64,
+    )
 
 def create_javahome_symlink():
   if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
-    Execute("mkdir -p /usr/jdk64/")
-    Execute("ln -s /usr/jdk/jdk1.6.0_31 /usr/jdk64/jdk1.6.0_31")
+    Directory("/usr/jdk64/",
+         recursive=True,
+    )
+    Link("/usr/jdk/jdk1.6.0_31",
+         to="/usr/jdk64/jdk1.6.0_31",
+    )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
index e6fd87d..54405f6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
@@ -38,8 +38,8 @@ export list_of_non_empty_dirs=""
 
 mark_file=/var/run/hadoop/hdfs/namenode-formatted
 if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
+  sudo rm -f ${mark_file}
+  sudo mkdir -p ${mark_dir}
 fi
 
 if [[ -d $old_mark_dir ]] ; then
@@ -58,7 +58,7 @@ if [[ ! -d $mark_dir ]] ; then
   done
 
   if [[ $EXIT_CODE == 0 ]] ; then
-    su -s /bin/bash - ${hdfs_user} -c "export PATH=$PATH:${bin_dir} ; yes Y | hdfs --config ${conf_dir} ${command}"
+    sudo su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:${bin_dir} ; yes Y | hdfs --config ${conf_dir} ${command}"
     (( EXIT_CODE = $EXIT_CODE | $? ))
   else
     echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
index de7fd58..c773902 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
@@ -57,11 +57,11 @@ def namenode(action=None, do_format=True):
               user = params.hdfs_user)
 
     if params.dfs_ha_enabled:
-      dfs_check_nn_status_cmd = format("su -s /bin/bash - {hdfs_user} -c 'export PATH=$PATH:{hadoop_bin_dir} ; hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active > /dev/null'")
+      dfs_check_nn_status_cmd = as_user(format("hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
     else:
       dfs_check_nn_status_cmd = None
 
-    namenode_safe_mode_off = format("su -s /bin/bash - {hdfs_user} -c 'export PATH=$PATH:{hadoop_bin_dir} ; hdfs --config {hadoop_conf_dir} dfsadmin -safemode get' | grep 'Safe mode is OFF'")
+    namenode_safe_mode_off = format("hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'")
 
     # If HA is enabled and it is in standby, then stay in safemode, otherwise, leave safemode.
     leave_safe_mode = True
@@ -74,13 +74,18 @@ def namenode(action=None, do_format=True):
       # First check if Namenode is not in 'safemode OFF' (equivalent to safemode ON), if so, then leave it
       code, out = shell.call(namenode_safe_mode_off)
       if code != 0:
-        leave_safe_mode_cmd = format("su -s /bin/bash - {hdfs_user} -c 'export PATH=$PATH:{hadoop_bin_dir} ; hdfs --config {hadoop_conf_dir} dfsadmin -safemode leave'")
-        Execute(leave_safe_mode_cmd)
+        leave_safe_mode_cmd = format("hdfs --config {hadoop_conf_dir} dfsadmin -safemode leave")
+        Execute(leave_safe_mode_cmd,
+                user=params.hdfs_user,
+                path=[params.hadoop_bin_dir],
+        )
 
     # Verify if Namenode should be in safemode OFF
     Execute(namenode_safe_mode_off,
             tries=40,
             try_sleep=10,
+            path=[params.hadoop_bin_dir],
+            user=params.hdfs_user,
             only_if=dfs_check_nn_status_cmd #skip when HA not active
     )
     create_hdfs_directories(dfs_check_nn_status_cmd)

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
index ee2bb1d..5cc00fc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
@@ -63,7 +63,6 @@ class NameNode(Script):
     import status_params
 
     env.set_params(status_params)
-    Execute(format("echo '{namenode_pid_file}' >> /1.txt"))
     check_process_status(status_params.namenode_pid_file)
     pass
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
index ea4132a..02e6235 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
@@ -78,7 +78,7 @@ hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
 limits_conf_dir = "/etc/security/limits.d"
 
 execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
-ulimit_cmd = "ulimit -c unlimited; "
+ulimit_cmd = "ulimit -c unlimited && "
 
 #security params
 smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
index 5ef78b7..46e6f53 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
@@ -33,7 +33,7 @@ class HdfsServiceCheck(Script):
 
     create_dir_cmd = format("fs -mkdir {dir}")
     chmod_command = format("fs -chmod 777 {dir}")
-    test_dir_exists = format("su -s /bin/bash - {smoke_user} -c '{hadoop_bin_dir}/hadoop --config {hadoop_conf_dir} fs -test -e {dir}'")
+    test_dir_exists = as_user(format("{hadoop_bin_dir}/hadoop --config {hadoop_conf_dir} fs -test -e {dir}"), params.smoke_user)
     cleanup_cmd = format("fs -rm {tmp_file}")
     #cleanup put below to handle retries; if retrying there wil be a stale file
     #that needs cleanup; exit code is fn of second command
@@ -41,9 +41,9 @@ class HdfsServiceCheck(Script):
       "{cleanup_cmd}; hadoop --config {hadoop_conf_dir} fs -put /etc/passwd {tmp_file}")
     test_cmd = format("fs -test -e {tmp_file}")
     if params.security_enabled:
-      Execute(format(
-        "su -s /bin/bash - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
-        "{smoke_user}'"))
+      Execute(format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_user}"),
+        users=params.smoke_user
+      )
     ExecuteHadoop(safemode_command,
                   user=params.smoke_user,
                   logoutput=True,
@@ -87,13 +87,10 @@ class HdfsServiceCheck(Script):
     )
     if params.has_journalnode_hosts:
       journalnode_port = params.journalnode_port
-      smoke_test_user = params.smoke_user
       checkWebUIFileName = "checkWebUI.py"
       checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
       comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-      checkWebUICmd = format(
-        "su -s /bin/bash - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
-        "{comma_sep_jn_hosts} -p {journalnode_port}'")
+      checkWebUICmd = format("python {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port}")
       File(checkWebUIFilePath,
            content=StaticFile(checkWebUIFileName),
            mode=0775)
@@ -101,7 +98,8 @@ class HdfsServiceCheck(Script):
       Execute(checkWebUICmd,
               logoutput=True,
               try_sleep=3,
-              tries=5
+              tries=5,
+              user=params.smoke_user
       )
 
     if params.is_namenode_master:

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
index 9fcc424..f923b3f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
@@ -62,23 +62,19 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
         # We need special handling for this case to handle the situation
         # when we configure non-root secure DN and then restart it
         # to handle new configs. Otherwise we will not be able to stop
-        # a running instance
+        # a running instance 
         user = "root"
+        
         try:
-          with open(hadoop_secure_dn_pid_file, 'r') as f:
-            pid = f.read()
-          os.kill(int(pid), 0)
-
+          check_process_status()
+          
           custom_export = {
             'HADOOP_SECURE_DN_USER': params.hdfs_user
           }
           hadoop_env_exports.update(custom_export)
-        except IOError:
-          pass  # Can not open pid file
-        except ValueError:
-          pass  # Pid file content is invalid
-        except OSError:
-          pass  # Process is not running
+          
+        except ComponentIsNotRunning:
+          pass
 
 
   hadoop_env_exports_str = ''
@@ -90,7 +86,7 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
     "{hadoop_bin}/hadoop-daemon.sh")
   cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
 
-  daemon_cmd = format("{ulimit_cmd} su -s /bin/bash - {user} -c '{cmd} {action} {name}'")
+  daemon_cmd = format("{ulimit_cmd} {cmd} {action} {name}")
 
   service_is_up = check_process if action == "start" else None
   #remove pid file from dead process
@@ -99,7 +95,8 @@ def service(action=None, name=None, user=None, create_pid_dir=False,
        not_if=check_process,
   )
   Execute(daemon_cmd,
-          not_if=service_is_up
+          not_if=service_is_up,
+          user=user
   )
 
   #After performing the desired action, perform additional tasks.

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py
index 086fa2b..fadd512 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py
@@ -49,9 +49,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
-                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+        not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+        user = 'hdfs',
+    )
     self.assertNoMoreResources()
 
   def test_stop_default(self):
@@ -72,9 +73,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
-                              not_if = None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+        not_if = None,
+        user = 'hdfs',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
                               action = ['delete'],
                               )
@@ -108,9 +110,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - root -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
-                              )
+                              user = 'root',
+    )
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
@@ -131,9 +134,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - root -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
-                              not_if = None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+        not_if = None,
+        user = 'root',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
                               action = ['delete'],
                               )

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py
index 91a678d..0507a09 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py
@@ -43,12 +43,13 @@ class TestNamenode(RMFTestCase):
                               content = StaticFile('checkForFormat.sh'),
                               mode = 0755,
                               )
-    self.assertResourceCalled('Execute', 'sh /tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+    self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
                               path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
                               not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/',
                               )
-    self.assertResourceCalled('Execute', 'mkdir -p /var/run/hadoop/hdfs/namenode/formatted/',
-                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs/namenode/formatted/',
+        recursive = True,
+    )
     self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
                               owner = 'hdfs',
                               recursive = True,
@@ -61,13 +62,15 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'',
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode',
                               not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
-                              )
-    self.assertResourceCalled('Execute', "su -s /bin/bash - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+                              user = 'hdfs',
+    )
+    self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
                               tries = 40,
+                              user = 'hdfs',
                               try_sleep = 10,
-                              )
+    )
     self.assertResourceCalled('HdfsDirectory', '/tmp',
                               security_enabled = False,
                               keytab = UnknownConfigurationMock(),
@@ -108,9 +111,10 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode\'',
-                              not_if = None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode',
+        not_if = None,
+        user = 'hdfs',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
                               action = ['delete'],
                               )
@@ -136,12 +140,13 @@ class TestNamenode(RMFTestCase):
                               content = StaticFile('checkForFormat.sh'),
                               mode = 0755,
                               )
-    self.assertResourceCalled('Execute', 'sh /tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+    self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
                               path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
                               not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/',
                               )
-    self.assertResourceCalled('Execute', 'mkdir -p /var/run/hadoop/hdfs/namenode/formatted/',
-                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs/namenode/formatted/',
+        recursive = True,
+    )
     self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
                               owner = 'hdfs',
                               recursive = True,
@@ -154,15 +159,17 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'',
-                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode',
+        not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
+        user = 'hdfs',
+    )
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
                               user = 'hdfs',
                               )
-    self.assertResourceCalled('Execute', "su -s /bin/bash - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+    self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
                               tries = 40,
                               try_sleep = 10,
+                              user = 'hdfs'
                               )
     self.assertResourceCalled('HdfsDirectory', '/tmp',
                               security_enabled = True,
@@ -204,9 +211,10 @@ class TestNamenode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode\'',
-                              not_if = None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode',
+        not_if = None,
+        user = 'hdfs',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
                               action = ['delete'],
                               )

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py
index 353745d..1bddb80 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py
@@ -39,7 +39,7 @@ class TestServiceCheck(RMFTestCase):
                         command="service_check",
                         config_file="secured.json"
     )
-    self.assertResourceCalled('Execute', "su -s /bin/bash - ambari-qa -c '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa'",)
+    self.assertResourceCalled('Execute', "/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa",user='ambari-qa')
     self.assert_service_check()
     self.assertNoMoreResources()
         
@@ -54,7 +54,7 @@ class TestServiceCheck(RMFTestCase):
     self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp ; hadoop fs -chmod 777 /tmp',
                               conf_dir = '/etc/hadoop/conf',
                               logoutput = True,
-                              not_if = "su -s /bin/bash - ambari-qa -c 'hadoop fs -test -e /tmp'",
+                              not_if = "/usr/bin/sudo -Hi su - ambari-qa -s /bin/bash -c 'export {ENV_PLACEHOLDER} ; hadoop fs -test -e /tmp'",
                               try_sleep = 3,
                               tries = 5,
                               user = 'ambari-qa',

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_snamenode.py b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_snamenode.py
index 8f65f11..b556967 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_snamenode.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_snamenode.py
@@ -61,9 +61,10 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode\'',
-                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode',
+        not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
+        user = 'hdfs',
+    )
     self.assertNoMoreResources()
 
   def test_stop_default(self):
@@ -84,9 +85,10 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode\'',
-                              not_if = None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode',
+        not_if = None,
+        user = 'hdfs',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid',
                               action = ['delete'],
                               )
@@ -130,9 +132,10 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode\'',
-                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode',
+        not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
+        user = 'hdfs',
+    )
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
@@ -153,9 +156,10 @@ class TestSNamenode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode\'',
-                              not_if = None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode',
+        not_if = None,
+        user = 'hdfs',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid',
                               action = ['delete'],
                               )

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py
index 904c469..17a0ed8 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-INSTALL/test_before_install.py
@@ -43,9 +43,13 @@ class TestHookBeforeInstall(RMFTestCase):
         path = ['/bin', '/usr/bin/'],
         environment = {'no_proxy': 'c6401.ambari.apache.org'}
     )
-    self.assertResourceCalled('Execute', 'mkdir -p /usr/jdk64 ; cd /usr/jdk64 ; tar -xf /tmp/AMBARI-artifacts//jdk-7u67-linux-x64.tar.gz > /dev/null 2>&1',
+    self.assertResourceCalled('Directory', '/usr/jdk64',)
+    self.assertResourceCalled('Execute', ('chmod', 'a+x', u'/usr/jdk64'),
+        not_if = 'test -e /usr/jdk64/jdk1.7.0_45/bin/java',
+        sudo = True,
+    )
+    self.assertResourceCalled('Execute', 'mkdir -p /tmp/jdk && cd /tmp/jdk && tar -xf /tmp/AMBARI-artifacts//jdk-7u67-linux-x64.tar.gz && sudo cp -r /tmp/jdk/* /usr/jdk64',
         not_if = 'test -e /usr/jdk64/jdk1.7.0_45/bin/java',
-        path = ['/bin', '/usr/bin/'],
     )
     self.assertResourceCalled('Package', 'unzip',)
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py
index 7fc033e..fc45b8f 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/hooks/before-START/test_before_start.py
@@ -147,10 +147,18 @@ class TestHookBeforeStart(RMFTestCase):
     self.assertResourceCalled('Execute', '/bin/echo 0 > /selinux/enforce',
                               only_if = 'test -f /selinux/enforce',
                               )
-    self.assertResourceCalled('Execute', 'mkdir -p /usr/lib/hadoop/lib/native/Linux-i386-32; ln -sf /usr/lib/libsnappy.so /usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
-                              )
-    self.assertResourceCalled('Execute', 'mkdir -p /usr/lib/hadoop/lib/native/Linux-amd64-64; ln -sf /usr/lib64/libsnappy.so /usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
-                              )
+    self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
+        recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-amd64-64',
+        recursive = True,
+    )
+    self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
+        to = '/usr/lib/libsnappy.so',
+    )
+    self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
+        to = '/usr/lib64/libsnappy.so',
+    )
     self.assertResourceCalled('Directory', '/var/log/hadoop',
                               owner = 'root',
                               group = 'hadoop',

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index c53cde0..b4a86d7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -57,9 +57,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
-                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+        not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+        user = 'hdfs',
+    )
     self.assertResourceCalled('Execute',
                               'hdfs dfsadmin -report -live',
                               user='hdfs'
@@ -90,9 +91,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
-                              not_if = None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+        not_if = None,
+        user = 'hdfs',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
                               action = ['delete'],
                               )
@@ -131,9 +133,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - root -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
-                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+        not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+        user = 'root',
+    )
     self.assertResourceCalled('Execute',
                               'hdfs dfsadmin -report -live',
                               user='hdfs'
@@ -170,9 +173,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - root -c \'export HADOOP_LIBEXEC_DIR=/usr/hdp/current/hadoop-client/libexec && /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
-                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/hdp/current/hadoop-client/libexec && /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+        not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+        user = 'root',
+    )
     self.assertResourceCalled('Execute',
                               'hdfs dfsadmin -report -live',
                               user='hdfs'
@@ -180,7 +184,7 @@ class TestDatanode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_start_secured_HDP22_non_root_https_only(self):
-    config_file="stacks/2.0.6/configs/secured.json"
+    config_file = "stacks/2.0.6/configs/secured.json"
     with open(config_file, "r") as f:
       secured_json = json.load(f)
 
@@ -212,9 +216,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/hdp/current/hadoop-client/libexec && /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode\'',
-                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/hdp/current/hadoop-client/libexec && /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+        not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+        user = 'hdfs',
+    )
     self.assertResourceCalled('Execute',
                               'hdfs dfsadmin -report -live',
                               user='hdfs'
@@ -245,9 +250,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - root -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
-                              not_if = None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+        not_if = None,
+        user = 'root',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
                               action = ['delete'],
                               )
@@ -284,9 +290,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - root -c \'export HADOOP_LIBEXEC_DIR=/usr/hdp/current/hadoop-client/libexec && /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
-                              not_if = None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/hdp/current/hadoop-client/libexec && /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+        not_if = None,
+        user = 'root',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
                               action = ['delete'],
                               )
@@ -325,9 +332,10 @@ class TestDatanode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/hdp/current/hadoop-client/libexec && /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode\'',
-                              not_if=None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/hdp/current/hadoop-client/libexec && /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+        not_if = None,
+        user = 'hdfs',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
                               action=['delete'],
                               )

http://git-wip-us.apache.org/repos/asf/ambari/blob/716b9b78/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
index 063d09d..0ba9889 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
@@ -56,9 +56,10 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode\'',
-                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode',
+        not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
+        user = 'hdfs',
+    )
     self.assertResourceCalled('Execute', 'hdfs dfsadmin -report -live',
                               user='hdfs'
                               )
@@ -82,9 +83,10 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode\'',
-                              not_if = None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode',
+        not_if = None,
+        user = 'hdfs',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid',
                               action = ['delete'],
                               )
@@ -123,9 +125,10 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode\'',
-                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode',
+        not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
+        user = 'hdfs',
+    )
     self.assertResourceCalled('Execute', 'hdfs dfsadmin -report -live',
                               user='hdfs'
                               )
@@ -149,9 +152,10 @@ class TestJournalnode(RMFTestCase):
                               action = ['delete'],
                               not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
                               )
-    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  su -s /bin/bash - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode\'',
-                              not_if = None,
-                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited &&  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode',
+        not_if = None,
+        user = 'hdfs',
+    )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid',
                               action = ['delete'],
                               )