You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by se...@apache.org on 2022/03/26 07:28:26 UTC

[bigtop] branch branch-3.0 updated (ec973f8 -> 14da1ec)

This is an automated email from the ASF dual-hosted git repository.

sekikn pushed a change to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/bigtop.git.


    from ec973f8  BIGTOP-3639: Alluxio  build fails on ppc64le (#861)
     new 444c01d  BIGTOP-3591: Upgrading Bigtop-Mpack services deploying scripts aligned to Ambari-2.7.5 (#824)
     new 14da1ec  BIGTOP-3592: Remove Ambari stack-select tool dependency from stack hooks (#825)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../src/common/ambari/patch8-stack-hooks.diff      |  23 +
 .../bigtop-ambari-mpack/bgtp-ambari-mpack/pom.xml  |   4 +-
 .../stacks/BGTP/1.0/configuration/cluster-env.xml  |   6 +
 .../BGTP/1.0/hooks/after-INSTALL/scripts/hook.py   |   2 +
 .../BGTP/1.0/hooks/after-INSTALL/scripts/params.py |  58 +-
 .../after-INSTALL/scripts/shared_initialization.py |  82 ++-
 .../hooks/before-ANY/files/changeToSecureUid.sh    |  15 +-
 .../BGTP/1.0/hooks/before-ANY/scripts/hook.py      |   9 +-
 .../BGTP/1.0/hooks/before-ANY/scripts/params.py    | 166 ++++--
 .../before-ANY/scripts/shared_initialization.py    | 113 +++-
 .../BGTP/1.0/hooks/before-INSTALL/scripts/hook.py  |  10 +-
 .../1.0/hooks/before-INSTALL/scripts/params.py     |  48 +-
 .../before-INSTALL/scripts/repo_initialization.py  |  34 +-
 .../BGTP/1.0/hooks/before-RESTART/scripts/hook.py  |   3 +-
 .../scripts/hook.py                                |  26 +-
 .../before-START/files/fast-hdfs-resource.jar      | Bin 19416017 -> 19286899 bytes
 .../before-START/scripts/custom_extensions.py      | 173 ++++++
 .../BGTP/1.0/hooks/before-START/scripts/hook.py    |  12 +-
 .../BGTP/1.0/hooks/before-START/scripts/params.py  | 180 ++++--
 .../hooks/before-START/scripts/rack_awareness.py   |   1 +
 .../before-START/scripts/shared_initialization.py  | 137 ++++-
 .../templates/hadoop-metrics2.properties.j2        |  30 +-
 .../BGTP/1.0/services/YARN/YARN_widgets.json       |  18 +-
 .../stacks/BGTP/1.0/services/YARN/alerts.json      |  58 +-
 .../YARN/configuration-mapred/mapred-env.xml       |   6 +
 .../YARN/configuration-mapred/mapred-site.xml      |   5 +-
 .../YARN/configuration/container-executor.xml      |  38 +-
 .../1.0/services/YARN/configuration/yarn-env.xml   |  11 +
 .../1.0/services/YARN/configuration/yarn-log4j.xml |  27 +-
 .../1.0/services/YARN/configuration/yarn-site.xml  |  27 +-
 .../stacks/BGTP/1.0/services/YARN/kerberos.json    |  21 +-
 .../stacks/BGTP/1.0/services/YARN/metainfo.xml     |  66 ++-
 .../files/validateYarnComponentStatusWindows.py    |   2 +-
 .../package/scripts/application_timeline_server.py |  85 +--
 .../services/YARN/package/scripts/historyserver.py |  83 +--
 .../services/YARN/package/scripts/install_jars.py  |   2 +-
 .../YARN/package/scripts/mapred_service_check.py   |   6 +-
 .../YARN/package/scripts/mapreduce2_client.py      |   9 +-
 .../services/YARN/package/scripts/nodemanager.py   |  76 +--
 .../YARN/package/scripts/nodemanager_upgrade.py    |   3 +-
 .../1.0/services/YARN/package/scripts/params.py    |   3 +-
 .../services/YARN/package/scripts/params_linux.py  | 410 ++++++++-----
 .../YARN/package/scripts/params_windows.py         |  11 +-
 .../YARN/package/scripts/resourcemanager.py        | 109 ++--
 .../1.0/services/YARN/package/scripts/service.py   |  22 +-
 .../services/YARN/package/scripts/service_check.py |  97 ++--
 .../YARN/package/scripts/setup_ranger_yarn.py      |  10 +-
 .../services/YARN/package/scripts/status_params.py |  57 +-
 .../BGTP/1.0/services/YARN/package/scripts/yarn.py | 637 +++++++++++----------
 .../services/YARN/package/scripts/yarn_client.py   |  11 +-
 .../templates/input.config-mapreduce2.json.j2      |  48 ++
 .../{include_hosts_list.j2 => mapred_jaas.conf.j2} |  13 +-
 ...include_hosts_list.j2 => yarn_ats_jaas.conf.j2} |  14 +-
 .../{include_hosts_list.j2 => yarn_jaas.conf.j2}   |  21 +-
 ...{include_hosts_list.j2 => yarn_nm_jaas.conf.j2} |  14 +-
 .../container-executor.cfg.j2                      |   0
 .../BGTP/1.0/services/YARN/role_command_order.json |  21 +
 .../services/YARN/themes-mapred/directories.json   | 137 +++++
 .../BGTP/1.0/services/YARN/themes/directories.json | 177 ++++++
 59 files changed, 2290 insertions(+), 1197 deletions(-)
 create mode 100644 bigtop-packages/src/common/ambari/patch8-stack-hooks.diff
 copy bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/{before-START => before-SET_KEYTAB}/scripts/hook.py (64%)
 copy bigtop-data-generators/bigtop-location-data/src/main/resources/input_data/ACS_12_5YR_S1903/ACS_12_5YR_S1903_with_ann.csv => bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/files/fast-hdfs-resource.jar (66%)
 mode change 100755 => 100644
 create mode 100644 bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/custom_extensions.py
 copy settings.gradle => bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/container-executor.xml (54%)
 create mode 100644 bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/input.config-mapreduce2.json.j2
 copy bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/{include_hosts_list.j2 => mapred_jaas.conf.j2} (74%)
 copy bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/{include_hosts_list.j2 => yarn_ats_jaas.conf.j2} (72%)
 copy bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/{include_hosts_list.j2 => yarn_jaas.conf.j2} (64%)
 copy bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/{include_hosts_list.j2 => yarn_nm_jaas.conf.j2} (73%)
 rename bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/{package/templates => properties}/container-executor.cfg.j2 (100%)
 create mode 100644 bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/role_command_order.json
 create mode 100644 bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/themes-mapred/directories.json
 create mode 100644 bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/themes/directories.json

[bigtop] 02/02: BIGTOP-3592: Remove Ambari stack-select tool dependency from stack hooks (#825)

Posted by se...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

sekikn pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/bigtop.git

commit 14da1ec3a9150e17636a48544be78c093721d04d
Author: Yuqi Gu <yu...@arm.com>
AuthorDate: Thu Nov 4 18:01:46 2021 +0800

    BIGTOP-3592: Remove Ambari stack-select tool dependency from stack hooks (#825)
    
    Change-Id: I8c34031fc8d21a6d908153f21b85e61b55b5a300
    Signed-off-by: Yuqi Gu <yu...@arm.com>
    (cherry picked from commit 1c1eed9deab5aad4c713bc7afac47879b6407829)
---
 .../src/common/ambari/patch8-stack-hooks.diff      | 23 ++++++++++++++++++++++
 1 file changed, 23 insertions(+)

diff --git a/bigtop-packages/src/common/ambari/patch8-stack-hooks.diff b/bigtop-packages/src/common/ambari/patch8-stack-hooks.diff
new file mode 100644
index 0000000..ab85aec
--- /dev/null
+++ b/bigtop-packages/src/common/ambari/patch8-stack-hooks.diff
@@ -0,0 +1,23 @@
+diff --git a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
+index ca3cdc64b1..ef6cc10655 100644
+--- a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
++++ b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
+@@ -49,11 +49,6 @@ def setup_stack_symlinks(struct_out_file):
+     Logger.warning("Skipping running stack-selector-tool because this is a sys_prepped host. This may cause symlink pointers not to be created for HDP components installed later on top of an already sys_prepped host")
+     return
+ 
+-  # get the packages which the stack-select tool should be used on
+-  stack_packages = stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL)
+-  if stack_packages is None:
+-    return
+-
+   json_version = load_version(struct_out_file)
+ 
+   if not json_version:
+@@ -145,4 +140,4 @@ def link_configs(struct_out_file):
+     with open(params.conf_select_marker_file, "wb") as fp:
+       pass
+   else:
+-    Logger.info(format("Skipping conf-select stage, since cluster-env/sysprep_skip_conf_select is set and mark file {conf_select_marker_file} exists"))
+\ No newline at end of file
++    Logger.info(format("Skipping conf-select stage, since cluster-env/sysprep_skip_conf_select is set and mark file {conf_select_marker_file} exists"))

[bigtop] 01/02: BIGTOP-3591: Upgrading Bigtop-Mpack services deploying scripts aligned to Ambari-2.7.5 (#824)

Posted by se...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

sekikn pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/bigtop.git

commit 444c01d5cb982ba3f8e7ea62f9570ddea14ea4c4
Author: Yuqi Gu <yu...@arm.com>
AuthorDate: Mon Nov 8 10:04:31 2021 +0800

    BIGTOP-3591: Upgrading Bigtop-Mpack services deploying scripts aligned to Ambari-2.7.5 (#824)
    
    After upgrading Ambari to 2.7.5, it failed to install the services from the Bigtop-Mpack.
    The patch is is to upgrade Bigtop-Mpack services deploying scripts and fix the related issues.
    
    Change-Id: I6fa8803dba46a8f908052eaf933ab9aa348df88c
    Signed-off-by: Yuqi Gu <yu...@arm.com>
    (cherry picked from commit 60394e29f03ffdbdd739a932ffeefae1b98c2122)
---
 .../bigtop-ambari-mpack/bgtp-ambari-mpack/pom.xml  |   4 +-
 .../stacks/BGTP/1.0/configuration/cluster-env.xml  |   6 +
 .../BGTP/1.0/hooks/after-INSTALL/scripts/hook.py   |   2 +
 .../BGTP/1.0/hooks/after-INSTALL/scripts/params.py |  58 +-
 .../after-INSTALL/scripts/shared_initialization.py |  82 ++-
 .../hooks/before-ANY/files/changeToSecureUid.sh    |  15 +-
 .../BGTP/1.0/hooks/before-ANY/scripts/hook.py      |   9 +-
 .../BGTP/1.0/hooks/before-ANY/scripts/params.py    | 166 ++++--
 .../before-ANY/scripts/shared_initialization.py    | 113 +++-
 .../BGTP/1.0/hooks/before-INSTALL/scripts/hook.py  |  10 +-
 .../1.0/hooks/before-INSTALL/scripts/params.py     |  48 +-
 .../before-INSTALL/scripts/repo_initialization.py  |  34 +-
 .../BGTP/1.0/hooks/before-RESTART/scripts/hook.py  |   3 +-
 .../scripts/hook.py                                |  26 +-
 .../before-START/files/fast-hdfs-resource.jar      | Bin 0 -> 19286899 bytes
 .../before-START/scripts/custom_extensions.py      | 173 ++++++
 .../BGTP/1.0/hooks/before-START/scripts/hook.py    |  12 +-
 .../BGTP/1.0/hooks/before-START/scripts/params.py  | 180 ++++--
 .../hooks/before-START/scripts/rack_awareness.py   |   1 +
 .../before-START/scripts/shared_initialization.py  | 137 ++++-
 .../templates/hadoop-metrics2.properties.j2        |  30 +-
 .../BGTP/1.0/services/YARN/YARN_widgets.json       |  18 +-
 .../stacks/BGTP/1.0/services/YARN/alerts.json      |  58 +-
 .../YARN/configuration-mapred/mapred-env.xml       |   6 +
 .../YARN/configuration-mapred/mapred-site.xml      |   5 +-
 .../YARN/configuration/container-executor.xml      |  36 ++
 .../1.0/services/YARN/configuration/yarn-env.xml   |  11 +
 .../1.0/services/YARN/configuration/yarn-log4j.xml |  27 +-
 .../1.0/services/YARN/configuration/yarn-site.xml  |  27 +-
 .../stacks/BGTP/1.0/services/YARN/kerberos.json    |  21 +-
 .../stacks/BGTP/1.0/services/YARN/metainfo.xml     |  66 ++-
 .../files/validateYarnComponentStatusWindows.py    |   2 +-
 .../package/scripts/application_timeline_server.py |  85 +--
 .../services/YARN/package/scripts/historyserver.py |  83 +--
 .../services/YARN/package/scripts/install_jars.py  |   2 +-
 .../YARN/package/scripts/mapred_service_check.py   |   6 +-
 .../YARN/package/scripts/mapreduce2_client.py      |   9 +-
 .../services/YARN/package/scripts/nodemanager.py   |  76 +--
 .../YARN/package/scripts/nodemanager_upgrade.py    |   3 +-
 .../1.0/services/YARN/package/scripts/params.py    |   3 +-
 .../services/YARN/package/scripts/params_linux.py  | 410 ++++++++-----
 .../YARN/package/scripts/params_windows.py         |  11 +-
 .../YARN/package/scripts/resourcemanager.py        | 109 ++--
 .../1.0/services/YARN/package/scripts/service.py   |  22 +-
 .../services/YARN/package/scripts/service_check.py |  97 ++--
 .../YARN/package/scripts/setup_ranger_yarn.py      |  10 +-
 .../services/YARN/package/scripts/status_params.py |  57 +-
 .../BGTP/1.0/services/YARN/package/scripts/yarn.py | 637 +++++++++++----------
 .../services/YARN/package/scripts/yarn_client.py   |  11 +-
 .../templates/input.config-mapreduce2.json.j2      |  48 ++
 .../YARN/package/templates/mapred_jaas.conf.j2     |  28 +
 .../YARN/package/templates/yarn_ats_jaas.conf.j2   |  27 +
 .../YARN/package/templates/yarn_jaas.conf.j2       |  36 ++
 .../YARN/package/templates/yarn_nm_jaas.conf.j2    |  27 +
 .../container-executor.cfg.j2                      |   0
 .../BGTP/1.0/services/YARN/role_command_order.json |  21 +
 .../services/YARN/themes-mapred/directories.json   | 137 +++++
 .../BGTP/1.0/services/YARN/themes/directories.json | 177 ++++++
 58 files changed, 2353 insertions(+), 1165 deletions(-)

diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/pom.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/pom.xml
index 00cff8c..8d354e8 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/pom.xml
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/pom.xml
@@ -23,8 +23,8 @@
   <name>BGTP Ambari Management Pack</name>
   <url>http://ambari.apache.org/</url>
   <properties>
-    <minAmbariVersion>2.5.0.0</minAmbariVersion>
-    <maxAmbariVersion></maxAmbariVersion>
+    <minAmbariVersion>2.7.5.0.0</minAmbariVersion>
+    <maxAmbariVersion>2.7.5.0.0</maxAmbariVersion>
     <buildnumber-maven-plugin-version>1.4</buildnumber-maven-plugin-version>
   </properties>
   <build>
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/configuration/cluster-env.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/configuration/cluster-env.xml
index 7f2b13a..876c7cc 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/configuration/cluster-env.xml
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/configuration/cluster-env.xml
@@ -89,6 +89,12 @@
     <value-attributes>
       <type>user</type>
       <overridable>false</overridable>
+      <user-groups>
+        <property>
+          <type>cluster-env</type>
+          <name>user_group</name>
+        </property>
+      </user-groups>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/hook.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/hook.py
index 8bae9e6..39546b1 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/hook.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/hook.py
@@ -22,6 +22,7 @@ from shared_initialization import link_configs
 from shared_initialization import setup_config
 from shared_initialization import setup_stack_symlinks
 
+
 class AfterInstallHook(Hook):
 
   def hook(self, env):
@@ -33,5 +34,6 @@ class AfterInstallHook(Hook):
 
     link_configs(self.stroutfile)
 
+
 if __name__ == "__main__":
   AfterInstallHook().execute()
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/params.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/params.py
index a46759a..910b02c 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/params.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/params.py
@@ -20,37 +20,56 @@ limitations under the License.
 import os
 
 from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.constants import LOGFEEDER_CONF_DIR
 from resource_management.libraries.script import Script
+from resource_management.libraries.script.script import get_config_lock_file
 from resource_management.libraries.functions import default
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import format_stack_version, get_major_version
+from resource_management.libraries.functions.format import format
+from string import lower
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
-dfs_type = default("/commandParams/dfs_type", "")
+dfs_type = default("/clusterLevelParams/dfs_type", "")
 
-is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
+is_parallel_execution_enabled = int(default("/agentLevelParams/agentConfigParams/agent/parallel_execution", 0)) == 1
+host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)
 
 sudo = AMBARI_SUDO_BINARY
 
-stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_unformatted = config['clusterLevelParams']['stack_version']
 stack_version_formatted = format_stack_version(stack_version_unformatted)
+major_stack_version = get_major_version(stack_version_formatted)
+
+# service name
+service_name = config['serviceName']
+
+# logsearch configuration
+logsearch_logfeeder_conf = LOGFEEDER_CONF_DIR
+
+agent_cache_dir = config['agentLevelParams']['agentCacheDir']
+service_package_folder = config['serviceLevelParams']['service_package_folder']
+logsearch_service_name = service_name.lower().replace("_", "-")
+logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json"
+logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + "/templates/" + logsearch_config_file_name + ".j2"
+logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path)
 
 # default hadoop params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
 
-versioned_stack_root = '/usr/bgtp/current'
+mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+
+versioned_stack_root = '/usr/hdp/current'
 
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 #java params
-java_home = config['hostLevelParams']['java_home']
+java_home = config['ambariLevelParams']['java_home']
 
 #hadoop params
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
@@ -79,13 +98,28 @@ mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefi
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 user_group = config['configurations']['cluster-env']['user_group']
 
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0
+namenode_hosts = default("/clusterHostInfo/namenode_hosts", [])
+hdfs_client_hosts = default("/clusterHostInfo/hdfs_client_hosts", [])
+has_hdfs_clients = len(hdfs_client_hosts) > 0
+has_namenode = len(namenode_hosts) > 0
+has_hdfs = has_hdfs_clients or has_namenode
 
-if has_namenode or dfs_type == 'HCFS':
+if has_hdfs or dfs_type == 'HCFS':
   hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 
-link_configs_lock_file = os.path.join(tmp_dir, "link_configs_lock_file")
+  mount_table_xml_inclusion_file_full_path = None
+  mount_table_content = None
+  if 'viewfs-mount-table' in config['configurations']:
+    xml_inclusion_file_name = 'viewfs-mount-table.xml'
+    mount_table = config['configurations']['viewfs-mount-table']
+
+    if 'content' in mount_table and mount_table['content'].strip():
+      mount_table_xml_inclusion_file_full_path = os.path.join(hadoop_conf_dir, xml_inclusion_file_name)
+      mount_table_content = mount_table['content']
+
+link_configs_lock_file = get_config_lock_file()
 stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
 
 upgrade_suspended = default("/roleParams/upgrade_suspended", False)
+sysprep_skip_conf_select = default("/configurations/cluster-env/sysprep_skip_conf_select", False)
+conf_select_marker_file = format("{tmp_dir}/conf_select_done_marker")
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/shared_initialization.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
index 0ae8466..c9e84c3 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -19,11 +19,14 @@ limitations under the License.
 import os
 
 import ambari_simplejson as json
+from ambari_jinja2 import Environment as JinjaEnvironment
 from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Directory, File
+from resource_management.core.source import InlineTemplate, Template
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
 from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.libraries.script import Script
@@ -38,6 +41,14 @@ def setup_stack_symlinks(struct_out_file):
   :return:
   """
   import params
+  if params.upgrade_suspended:
+    Logger.warning("Skipping running stack-selector-tool because there is a suspended upgrade")
+    return
+
+  if params.host_sys_prepped:
+    Logger.warning("Skipping running stack-selector-tool because this is a sys_prepped host. This may cause symlink pointers not to be created for HDP components installed later on top of an already sys_prepped host")
+    return
+
   # get the packages which the stack-select tool should be used on
   #stack_packages = stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL)
   #if stack_packages is None:
@@ -54,6 +65,7 @@ def setup_stack_symlinks(struct_out_file):
     for package in stack_packages:
       stack_select.select(package, json_version)
 
+
 def setup_config():
   import params
   stackversion = params.stack_version_unformatted
@@ -65,32 +77,72 @@ def setup_config():
   else:
     Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
 
-  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
-    # create core-site only if the hadoop config diretory exists
+  if is_hadoop_conf_dir_present and (params.has_hdfs or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
+    # create core-site only if the hadoop config directory exists
     XmlConfig("core-site.xml",
               conf_dir=params.hadoop_conf_dir,
               configurations=params.config['configurations']['core-site'],
-              configuration_attributes=params.config['configuration_attributes']['core-site'],
+              configuration_attributes=params.config['configurationAttributes']['core-site'],
               owner=params.hdfs_user,
               group=params.user_group,
-              only_if=format("ls {hadoop_conf_dir}"))
+              only_if=format("ls {hadoop_conf_dir}"),
+              xml_include_file=params.mount_table_xml_inclusion_file_full_path
+              )
+
+    if params.mount_table_content:
+      File(os.path.join(params.hadoop_conf_dir, params.xml_inclusion_file_name),
+           owner=params.hdfs_user,
+           group=params.user_group,
+           content=params.mount_table_content
+           )
+
+  Directory(params.logsearch_logfeeder_conf,
+            mode=0755,
+            cd_access='a',
+            create_parents=True
+            )
+
+  if params.logsearch_config_file_exists:
+    File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name),
+         content=Template(params.logsearch_config_file_path,extra_imports=[default])
+         )
+  else:
+    Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path)
 
 
 def load_version(struct_out_file):
   """
   Load version from file.  Made a separate method for testing
   """
-  json_version = None
   try:
-    if os.path.exists(struct_out_file):
-      with open(struct_out_file, 'r') as fp:
-        json_info = json.load(fp)
-        json_version = json_info['version']
-  except:
-    pass
+    with open(struct_out_file, 'r') as fp:
+      json_info = json.load(fp)
+
+    return json_info['version']
+  except (IOError, KeyError, TypeError):
+    return None
 
-  return json_version
-  
 
 def link_configs(struct_out_file):
-  return
+  """
+  Use the conf_select module to link configuration directories correctly.
+  """
+  import params
+
+  json_version = load_version(struct_out_file)
+
+  if not json_version:
+    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
+    return
+
+  if not params.sysprep_skip_conf_select or not os.path.exists(params.conf_select_marker_file):
+    # On parallel command execution this should be executed by a single process at a time.
+    with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+      for package_name, directories in conf_select.get_package_dirs().iteritems():
+        conf_select.convert_conf_directories_to_symlinks(package_name, json_version, directories)
+
+    # create a file to mark that conf-selects were already done
+    with open(params.conf_select_marker_file, "wb") as fp:
+      pass
+  else:
+    Logger.info(format("Skipping conf-select stage, since cluster-env/sysprep_skip_conf_select is set and mark file {conf_select_marker_file} exists"))
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/files/changeToSecureUid.sh b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/files/changeToSecureUid.sh
index 08542c4..a6b8b77 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/files/changeToSecureUid.sh
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/files/changeToSecureUid.sh
@@ -21,6 +21,7 @@
 
 username=$1
 directories=$2
+newUid=$3
 
 function find_available_uid() {
  for ((i=1001; i<=2000; i++))
@@ -34,7 +35,18 @@ function find_available_uid() {
  done
 }
 
-find_available_uid
+if [ -z $2 ]; then
+  test $(id -u ${username} 2>/dev/null)
+  if [ $? -ne 1 ]; then
+   newUid=`id -u ${username}`
+  else
+   find_available_uid
+  fi
+  echo $newUid
+  exit 0
+else
+  find_available_uid
+fi
 
 if [ $newUid -eq 0 ]
 then
@@ -43,7 +55,6 @@ then
 fi
 
 set -e
-
 dir_array=($(echo $directories | sed 's/,/\n/g'))
 old_uid=$(id -u $username)
 sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/hook.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/hook.py
index c34be0b..25ca3a9 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/hook.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/hook.py
@@ -17,8 +17,10 @@ limitations under the License.
 
 """
 
-from resource_management import *
-from shared_initialization import *
+
+from shared_initialization import setup_users, setup_hadoop_env, setup_java
+from resource_management import Hook
+
 
 class BeforeAnyHook(Hook):
 
@@ -27,10 +29,11 @@ class BeforeAnyHook(Hook):
     env.set_params(params)
 
     setup_users()
-    if params.has_namenode or params.dfs_type == 'HCFS':
+    if params.has_hdfs or params.dfs_type == 'HCFS':
       setup_hadoop_env()
     setup_java()
 
+
 if __name__ == "__main__":
   BeforeAnyHook().execute()
 
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/params.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/params.py
index 415e90d..1d69dac 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/params.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/params.py
@@ -20,6 +20,7 @@ limitations under the License.
 import collections
 import re
 import os
+import ast
 
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 
@@ -31,35 +32,46 @@ from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format_jvm_option
 from resource_management.libraries.functions.is_empty import is_empty
 from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.functions.expect import expect
-from ambari_commons.os_check import OSCheck
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.get_architecture import get_architecture
 from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions.namenode_ha_utils import get_properties_for_all_nameservices, namenode_federation_enabled
 
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
-dfs_type = default("/commandParams/dfs_type", "")
+stack_root = Script.get_stack_root()
+
+architecture = get_architecture()
+
+dfs_type = default("/clusterLevelParams/dfs_type", "")
 
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = config['hostLevelParams']['java_home']
-java_version = expect("/hostLevelParams/java_version", int)
-jdk_location = config['hostLevelParams']['jdk_location']
+jdk_name = default("/ambariLevelParams/jdk_name", None)
+java_home = config['ambariLevelParams']['java_home']
+java_version = expect("/ambariLevelParams/java_version", int)
+jdk_location = config['ambariLevelParams']['jdk_location']
+
+hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
 
 sudo = AMBARI_SUDO_BINARY
 
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']
 
-stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_unformatted = config['clusterLevelParams']['stack_version']
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 
-restart_type = default("/commandParams/restart_type", "")
+upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
 version = default("/commandParams/version", None)
 # Handle upgrade and downgrade
-if (restart_type.lower() == "rolling_upgrade" or restart_type.lower() == "nonrolling_upgrade") and version:
+if (upgrade_type is not None) and version:
   stack_version_formatted = format_stack_version(version)
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
 
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
@@ -92,24 +104,39 @@ def is_secure_port(port):
   else:
     return False
 
-# hadoop default params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-
 # upgrades would cause these directories to have a version instead of "current"
 # which would cause a lot of problems when writing out hadoop-env.sh; instead
 # force the use of "current" in the hook
 hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
 hadoop_home = stack_select.get_hadoop_dir("home")
 hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
 
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-hadoop_secure_dn_user = hdfs_user
 hadoop_dir = "/etc/hadoop"
-versioned_stack_root = '/usr/bgtp/current'
 hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
 datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
 is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
 
+mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+
+if not security_enabled:
+  hadoop_secure_dn_user = '""'
+else:
+  dfs_dn_port = get_port(dfs_dn_addr)
+  dfs_dn_http_port = get_port(dfs_dn_http_addr)
+  dfs_dn_https_port = get_port(dfs_dn_https_addr)
+  # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+  if dfs_http_policy == "HTTPS_ONLY":
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
+  elif dfs_http_policy == "HTTP_AND_HTTPS":
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+  else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
+  if secure_dn_ports_are_in_use:
+    hadoop_secure_dn_user = hdfs_user
+  else:
+    hadoop_secure_dn_user = '""'
+
 #hadoop params
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
@@ -144,27 +171,83 @@ tez_user = config['configurations']['tez-env']["tez_user"]
 oozie_user = config['configurations']['oozie-env']["oozie_user"]
 falcon_user = config['configurations']['falcon-env']["falcon_user"]
 ranger_user = config['configurations']['ranger-env']["ranger_user"]
+zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"]
+zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"]
 
 user_group = config['configurations']['cluster-env']['user_group']
 
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", [])
+namenode_hosts = default("/clusterHostInfo/namenode_hosts", [])
+hdfs_client_hosts = default("/clusterHostInfo/hdfs_client_hosts", [])
 hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
 falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
 ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
 
-has_namenode = not len(namenode_host) == 0
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+
+has_namenode = len(namenode_hosts) > 0
+has_hdfs_clients = len(hdfs_client_hosts) > 0
+has_hdfs = has_hdfs_clients or has_namenode
 has_ganglia_server = not len(ganglia_server_hosts) == 0
 has_tez = 'tez-site' in config['configurations']
 has_hbase_masters = not len(hbase_master_hosts) == 0
 has_oozie_server = not len(oozie_servers) == 0
 has_falcon_server_hosts = not len(falcon_server_hosts) == 0
 has_ranger_admin = not len(ranger_admin_hosts) == 0
-
-if has_namenode or dfs_type == 'HCFS':
-  #hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+has_zeppelin_master = not len(zeppelin_master_hosts) == 0
+stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
+
+hostname = config['agentLevelParams']['hostname']
+hdfs_site = config['configurations']['hdfs-site']
+
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+
+# on stacks without any filesystem there is no hdfs-site
+dfs_ha_namenode_ids_all_ns = get_properties_for_all_nameservices(hdfs_site, 'dfs.ha.namenodes') if 'hdfs-site' in config['configurations'] else {}
+dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
+
+# Values for the current Host
+namenode_id = None
+namenode_rpc = None
+
+dfs_ha_namemodes_ids_list = []
+other_namenode_id = None
+
+for ns, dfs_ha_namenode_ids in dfs_ha_namenode_ids_all_ns.iteritems():
+  found = False
+  if not is_empty(dfs_ha_namenode_ids):
+    dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+    dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+    if dfs_ha_namenode_ids_array_len > 1:
+      dfs_ha_enabled = True
+  if dfs_ha_enabled:
+    for nn_id in dfs_ha_namemodes_ids_list:
+      nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{ns}.{nn_id}')]
+      if hostname in nn_host:
+        namenode_id = nn_id
+        namenode_rpc = nn_host
+        found = True
+    # With HA enabled namenode_address is recomputed
+    namenode_address = format('hdfs://{ns}')
+
+    # Calculate the namenode id of the other namenode. This is needed during RU to initiate an HA failover using ZKFC.
+    if namenode_id is not None and len(dfs_ha_namemodes_ids_list) == 2:
+      other_namenode_id = list(set(dfs_ha_namemodes_ids_list) - set([namenode_id]))[0]
+
+  if found:
+    break
+
+if has_hdfs or dfs_type == 'HCFS':
+    hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+    hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
 
 hbase_tmp_dir = "/tmp/hbase-hbase"
 
@@ -172,6 +255,7 @@ proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
 ranger_group = config['configurations']['ranger-env']['ranger_group']
 dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
 
+sysprep_skip_create_users_and_groups = default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", False)
 ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
 fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
 
@@ -179,28 +263,28 @@ smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},
 if has_hbase_masters:
   hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
 #repo params
-repo_info = config['hostLevelParams']['repo_info']
+repo_info = config['hostLevelParams']['repoInfo']
 service_repo_info = default("/hostLevelParams/service_repo_info",None)
 
-user_to_groups_dict = collections.defaultdict(lambda:[user_group])
-user_to_groups_dict[smoke_user] = [proxyuser_group]
-if has_ganglia_server:
-  user_to_groups_dict[gmond_user] = [gmond_user]
-  user_to_groups_dict[gmetad_user] = [gmetad_user]
-if has_tez:
-  user_to_groups_dict[tez_user] = [proxyuser_group]
-if has_oozie_server:
-  user_to_groups_dict[oozie_user] = [proxyuser_group]
-if has_falcon_server_hosts:
-  user_to_groups_dict[falcon_user] = [proxyuser_group]
-if has_ranger_admin:
-  user_to_groups_dict[ranger_user] = [ranger_group]
+user_to_groups_dict = {}
+
+#Append new user-group mapping to the dict
+try:
+  user_group_map = ast.literal_eval(config['clusterLevelParams']['user_groups'])
+  for key in user_group_map.iterkeys():
+    user_to_groups_dict[key] = user_group_map[key]
+except ValueError:
+  print('User Group mapping (user_group) is missing in the hostLevelParams')
 
 user_to_gid_dict = collections.defaultdict(lambda:user_group)
 
-user_list = json.loads(config['hostLevelParams']['user_list'])
-group_list = json.loads(config['hostLevelParams']['group_list'])
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+user_list = json.loads(config['clusterLevelParams']['user_list'])
+group_list = json.loads(config['clusterLevelParams']['group_list'])
+host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)
 
 tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
 override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()
+
+# if NN HA on secure clutser, access Zookeper securely
+if stack_supports_zk_security and dfs_ha_enabled and security_enabled:
+    hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username=zookeeper -Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf -Dzookeeper.sasl.clientconfig=Client")
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/shared_initialization.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/shared_initialization.py
index d3e2b91..ec9497f 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/shared_initialization.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -24,13 +24,19 @@ import tempfile
 from copy import copy
 from resource_management.libraries.functions.version import compare_versions
 from resource_management import *
+from resource_management.core import shell
 
 def setup_users():
   """
   Creates users before cluster installation
   """
   import params
-  should_create_users_and_groups = not params.host_sys_prepped and not params.ignore_groupsusers_create
+
+  should_create_users_and_groups = False
+  if params.host_sys_prepped:
+    should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups
+  else:
+    should_create_users_and_groups = not params.ignore_groupsusers_create
 
   if should_create_users_and_groups:
     for group in params.group_list:
@@ -39,10 +45,11 @@ def setup_users():
 
     for user in params.user_list:
       User(user,
-          gid = params.user_to_gid_dict[user],
-          groups = params.user_to_groups_dict[user],
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-      )
+           uid = get_uid(user) if params.override_uid == "true" else None,
+           gid = params.user_to_gid_dict[user],
+           groups = params.user_to_groups_dict[user],
+           fetch_nonlocal_groups = params.fetch_nonlocal_groups,
+           )
 
     if params.override_uid == "true":
       set_uid(params.smoke_user, params.smoke_user_dirs)
@@ -60,19 +67,17 @@ def setup_users():
                create_parents = True,
                cd_access="a",
     )
-    if not params.host_sys_prepped and params.override_uid == "true":
+
+    if params.override_uid == "true":
       set_uid(params.hbase_user, params.hbase_user_dirs)
     else:
       Logger.info('Skipping setting uid for hbase user as host is sys prepped')
-      pass
 
-  if not params.host_sys_prepped:
-    if params.has_namenode:
-      if should_create_users_and_groups:
-        create_dfs_cluster_admins()
+  if should_create_users_and_groups:
+    if params.has_hdfs:
+      create_dfs_cluster_admins()
     if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
-      if should_create_users_and_groups:
-        create_tez_am_view_acls()
+      create_tez_am_view_acls()
   else:
     Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
 
@@ -86,7 +91,7 @@ def create_dfs_cluster_admins():
 
   User(params.hdfs_user,
     groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+    fetch_nonlocal_groups = params.fetch_nonlocal_groups
   )
 
 def create_tez_am_view_acls():
@@ -103,12 +108,16 @@ def create_users_and_groups(user_and_groups):
 
   import params
 
-  parts = re.split('\s', user_and_groups)
+  parts = re.split('\s+', user_and_groups)
   if len(parts) == 1:
     parts.append("")
 
-  users_list = parts[0].split(",") if parts[0] else []
-  groups_list = parts[1].split(",") if parts[1] else []
+  users_list = parts[0].strip(",").split(",") if parts[0] else []
+  groups_list = parts[1].strip(",").split(",") if parts[1] else []
+
+  # skip creating groups and users if * is provided as value.
+  users_list = filter(lambda x: x != '*' , users_list)
+  groups_list = filter(lambda x: x != '*' , groups_list)
 
   if users_list:
     User(users_list,
@@ -130,14 +139,48 @@ def set_uid(user, user_dirs):
        content=StaticFile("changeToSecureUid.sh"),
        mode=0555)
   ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
+  uid = get_uid(user, return_existing=True)
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {new_uid}", new_uid=0 if uid is None else uid),
           not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
 
+def get_uid(user, return_existing=False):
+  """
+  Tries to get UID for username. It will try to find UID in custom properties in *cluster_env* and, if *return_existing=True*,
+  it will try to return UID of existing *user*.
+
+  :param user: username to get UID for
+  :param return_existing: return UID for existing user
+  :return:
+  """
+  import params
+  user_str = str(user) + "_uid"
+  service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
+
+  if service_env and params.config['configurations'][service_env[0]][user_str]:
+    service_env_str = str(service_env[0])
+    uid = params.config['configurations'][service_env_str][user_str]
+    if len(service_env) > 1:
+      Logger.warning("Multiple values found for %s, using %s"  % (user_str, uid))
+    return uid
+  else:
+    if return_existing:
+      # pick up existing UID or try to find available UID in /etc/passwd, see changeToSecureUid.sh for more info
+      if user == params.smoke_user:
+        return None
+      File(format("{tmp_dir}/changeUid.sh"),
+           content=StaticFile("changeToSecureUid.sh"),
+           mode=0555)
+      code, newUid = shell.call(format("{tmp_dir}/changeUid.sh {user}"))
+      return int(newUid)
+    else:
+      # do not return UID for existing user, used in User resource call to let OS to choose UID for us
+      return None
+
 def setup_hadoop_env():
   import params
   stackversion = params.stack_version_unformatted
   Logger.info("FS Type: {0}".format(params.dfs_type))
-  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
+  if params.has_hdfs or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
     if params.security_enabled:
       tc_owner = "root"
     else:
@@ -162,17 +205,26 @@ def setup_hadoop_env():
 
 def setup_java():
   """
-  Installs jdk using specific params, that comes from ambari-server
+  Install jdk using specific params.
+  Install ambari jdk as well if the stack and ambari jdk are different.
   """
   import params
+  __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
+  if params.ambari_java_home and params.ambari_java_home != params.java_home:
+    __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
 
-  java_exec = format("{java_home}/bin/java")
+def __setup_java(custom_java_home, custom_jdk_name):
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+  java_exec = format("{custom_java_home}/bin/java")
 
   if not os.path.isfile(java_exec):
     if not params.jdk_name: # if custom jdk is used.
       raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
 
-    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
     java_dir = os.path.dirname(params.java_home)
 
     Directory(params.artifact_dir,
@@ -180,9 +232,13 @@ def setup_java():
               )
 
     File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
          not_if = format("test -f {jdk_curl_target}")
-    )
+         )
+
+    File(jdk_curl_target,
+         mode = 0755,
+         )
 
     tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
 
@@ -195,7 +251,7 @@ def setup_java():
         install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
 
       Directory(java_dir
-      )
+                )
 
       Execute(chmod_cmd,
               sudo = True,
@@ -207,10 +263,11 @@ def setup_java():
     finally:
       Directory(tmp_java_dir, action="delete")
 
-    File(format("{java_home}/bin/java"),
+    File(format("{custom_java_home}/bin/java"),
          mode=0755,
          cd_access="a",
          )
     Execute(('chmod', '-R', '755', params.java_home),
-      sudo = True,
-    )
+            sudo = True,
+            )
+
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/hook.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/hook.py
index 2f0f524..c470965 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/hook.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/hook.py
@@ -16,11 +16,10 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from resource_management import Hook
+from shared_initialization import install_packages
+from repo_initialization import install_repos
 
-import sys
-from resource_management import *
-from shared_initialization import *
-from repo_initialization import *
 
 class BeforeInstallHook(Hook):
 
@@ -29,9 +28,10 @@ class BeforeInstallHook(Hook):
 
     self.run_custom_hook('before-ANY')
     env.set_params(params)
-
+    
     install_repos()
     install_packages()
 
+
 if __name__ == "__main__":
   BeforeInstallHook().execute()
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/params.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/params.py
index 6193c11..0ba8332 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/params.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/params.py
@@ -28,9 +28,9 @@ config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 sudo = AMBARI_SUDO_BINARY
 
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
-agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+stack_version_unformatted = config['clusterLevelParams']['stack_version']
+agent_stack_retry_on_unavailability = config['ambariLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/ambariLevelParams/agent_stack_retry_count", int)
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 #users and groups
@@ -50,24 +50,24 @@ repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_templa
 repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
 
 #hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hostname = config['agentLevelParams']['hostname']
+ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']
+rm_host = default("/clusterHostInfo/resourcemanager_hosts", [])
+slave_hosts = default("/clusterHostInfo/datanode_hosts", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_hosts", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_hosts", [])
 hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+hs_host = default("/clusterHostInfo/historyserver_hosts", [])
+jtnode_host = default("/clusterHostInfo/jtnode_hosts", [])
+namenode_hosts = default("/clusterHostInfo/namenode_hosts", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_server_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", [])
 storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
 falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
 
 has_sqoop_client = 'sqoop-env' in config['configurations']
-has_namenode = not len(namenode_host) == 0
+has_namenode = len(namenode_hosts) > 0
 has_hs = not len(hs_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
@@ -81,7 +81,7 @@ has_storm_server = not len(storm_server_hosts) == 0
 has_falcon_server = not len(falcon_host) == 0
 has_tez = 'tez-site' in config['configurations']
 
-is_namenode_master = hostname in namenode_host
+is_namenode_master = hostname in namenode_hosts
 is_jtnode_master = hostname in jtnode_host
 is_rmnode_master = hostname in rm_host
 is_hsnode_master = hostname in hs_host
@@ -96,18 +96,20 @@ hbase_tmp_dir = "/tmp/hbase-hbase"
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
 #java params
-java_home = config['hostLevelParams']['java_home']
+java_home = config['ambariLevelParams']['java_home']
 artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
+jdk_name = default("/ambariLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/ambariLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['ambariLevelParams']['jdk_location']
+jdk_location = config['ambariLevelParams']['jdk_location']
 ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)
 
 smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
 if has_hbase_masters:
   hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
 #repo params
-repo_info = config['hostLevelParams']['repo_info']
+repo_info = config['hostLevelParams']['repoInfo']
 service_repo_info = default("/hostLevelParams/service_repo_info",None)
+
+repo_file = default("/repositoryFile", None)
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/repo_initialization.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
index a35dce7..f6f2a12 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
@@ -19,19 +19,17 @@ limitations under the License.
 
 from ambari_commons.os_check import OSCheck
 from resource_management.libraries.resources.repository import Repository
+from resource_management.libraries.functions.repository_util import CommandRepository, UBUNTU_REPO_COMPONENTS_POSTFIX
+from resource_management.libraries.script.script import Script
 from resource_management.core.logger import Logger
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import ambari_simplejson as json
 
-# components_lits = repoName + postfix
-_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
 
-def _alter_repo(action, repo_string, repo_template):
+def _alter_repo(action, repo_dicts, repo_template):
   """
   @param action: "delete" or "create"
-  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
+  @param repo_dicts: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
   """
-  repo_dicts = json.loads(repo_string)
-
   if not isinstance(repo_dicts, list):
     repo_dicts = [repo_dicts]
 
@@ -45,24 +43,34 @@ def _alter_repo(action, repo_string, repo_template):
       repo['baseUrl'] = None
     if not 'mirrorsList' in repo:
       repo['mirrorsList'] = None
-    
-    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
-    
+
+    ubuntu_components = [ repo['distribution'] if 'distribution' in repo and repo['distribution'] else repo['repoName'] ] \
+                        + [repo['components'].replace(",", " ") if 'components' in repo and repo['components'] else UBUNTU_REPO_COMPONENTS_POSTFIX]
+
     Repository(repo['repoId'],
-               action = action,
+               action = "prepare",
                base_url = repo['baseUrl'],
                mirror_list = repo['mirrorsList'],
                repo_file_name = repo['repoName'],
                repo_template = repo_template,
-               components = ubuntu_components, # ubuntu specific
-    )
+               components = ubuntu_components) # ubuntu specific
+
+  Repository(None, action = "create")
+
 
 def install_repos():
   import params
   if params.host_sys_prepped:
     return
 
+  # use this newer way of specifying repositories, if available
+  if params.repo_file is not None:
+    Script.repository_util.create_repo_files()
+    return
+
   template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
+
   _alter_repo("create", params.repo_info, template)
+
   if params.service_repo_info:
     _alter_repo("create", params.service_repo_info, template)
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-RESTART/scripts/hook.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-RESTART/scripts/hook.py
index 14b9d99..f7f4f1c 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-RESTART/scripts/hook.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-RESTART/scripts/hook.py
@@ -16,14 +16,15 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from resource_management import Hook
 
-from resource_management import *
 
 class BeforeRestartHook(Hook):
 
   def hook(self, env):
     self.run_custom_hook('before-START')
 
+
 if __name__ == "__main__":
   BeforeRestartHook().execute()
 
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/hook.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-SET_KEYTAB/scripts/hook.py
similarity index 64%
copy from bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/hook.py
copy to bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-SET_KEYTAB/scripts/hook.py
index f21e4b1..289475b 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/hook.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-SET_KEYTAB/scripts/hook.py
@@ -16,24 +16,24 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from resource_management import Hook
 
-import sys
-from resource_management import *
-from rack_awareness import create_topology_script_and_mapping
-from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
 
-class BeforeStartHook(Hook):
+class BeforeSetKeytabHook(Hook):
 
   def hook(self, env):
-    import params
-
+    """
+    This will invoke the before-ANY hook which contains all of the user and group creation logic.
+    Keytab regeneration requires all users are already created, which is usually done by the
+    before-INSTALL hook. However, if the keytab regeneration is executed as part of an upgrade,
+    then the before-INSTALL hook never ran.
+
+    :param env:
+    :return:
+    """
     self.run_custom_hook('before-ANY')
-    env.set_params(params)
 
-    setup_hadoop()
-    setup_configs()
-    create_javahome_symlink()
-    create_topology_script_and_mapping()
 
 if __name__ == "__main__":
-  BeforeStartHook().execute()
+  BeforeSetKeytabHook().execute()
+
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/files/fast-hdfs-resource.jar b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/files/fast-hdfs-resource.jar
new file mode 100644
index 0000000..b8f633f
Binary files /dev/null and b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/files/fast-hdfs-resource.jar differ
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/custom_extensions.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/custom_extensions.py
new file mode 100644
index 0000000..04299ba
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/custom_extensions.py
@@ -0,0 +1,173 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.core.resources import Directory
+from resource_management.core.resources import Execute
+from resource_management.libraries.functions import default
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import format
+
+
+DEFAULT_HADOOP_HDFS_EXTENSION_DIR = "/hdp/ext/{0}/hadoop"
+DEFAULT_HADOOP_HIVE_EXTENSION_DIR = "/hdp/ext/{0}/hive"
+DEFAULT_HADOOP_HBASE_EXTENSION_DIR = "/hdp/ext/{0}/hbase"
+
+def setup_extensions():
+  """
+  The goal of this method is to distribute extensions (for example jar files) from
+  HDFS (/hdp/ext/{major_stack_version}/{service_name}) to all nodes which contain related
+  components of service (YARN, HIVE or HBASE). Extensions should be added to HDFS by
+  user manually.
+  """
+
+  import params
+
+  # Hadoop Custom extensions
+  hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
+  hadoop_custom_extensions_services = default("/configurations/core-site/hadoop.custom-extensions.services", "")
+  hadoop_custom_extensions_owner = default("/configurations/core-site/hadoop.custom-extensions.owner", params.hdfs_user)
+  hadoop_custom_extensions_hdfs_dir = get_config_formatted_value(default("/configurations/core-site/hadoop.custom-extensions.root",
+                                                 DEFAULT_HADOOP_HDFS_EXTENSION_DIR.format(params.major_stack_version)))
+  hadoop_custom_extensions_services = [ service.strip().upper() for service in hadoop_custom_extensions_services.split(",") ]
+  hadoop_custom_extensions_services.append("YARN")
+
+  hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(Script.get_stack_root())
+
+  if params.current_service in hadoop_custom_extensions_services:
+    clean_extensions(hadoop_custom_extensions_local_dir)
+    if hadoop_custom_extensions_enabled:
+      download_extensions(hadoop_custom_extensions_owner, params.user_group,
+                          hadoop_custom_extensions_hdfs_dir,
+                          hadoop_custom_extensions_local_dir)
+
+  setup_extensions_hive()
+
+  hbase_custom_extensions_services = []
+  hbase_custom_extensions_services.append("HBASE")
+  if params.current_service in hbase_custom_extensions_services:
+    setup_hbase_extensions()
+
+
+def setup_hbase_extensions():
+  import params
+
+  # HBase Custom extensions
+  hbase_custom_extensions_enabled = default("/configurations/hbase-site/hbase.custom-extensions.enabled", False)
+  hbase_custom_extensions_owner = default("/configurations/hbase-site/hbase.custom-extensions.owner", params.hdfs_user)
+  hbase_custom_extensions_hdfs_dir = get_config_formatted_value(default("/configurations/hbase-site/hbase.custom-extensions.root",
+                                                DEFAULT_HADOOP_HBASE_EXTENSION_DIR.format(params.major_stack_version)))
+  hbase_custom_extensions_local_dir = "{0}/current/ext/hbase".format(Script.get_stack_root())
+
+  impacted_components = ['HBASE_MASTER', 'HBASE_REGIONSERVER', 'PHOENIX_QUERY_SERVER'];
+  role = params.config.get('role','')
+
+  if role in impacted_components:
+    clean_extensions(hbase_custom_extensions_local_dir)
+    if hbase_custom_extensions_enabled:
+      download_extensions(hbase_custom_extensions_owner, params.user_group,
+                          hbase_custom_extensions_hdfs_dir,
+                          hbase_custom_extensions_local_dir)
+
+
+def setup_extensions_hive():
+  import params
+
+  hive_custom_extensions_enabled = default("/configurations/hive-site/hive.custom-extensions.enabled", False)
+  hive_custom_extensions_owner = default("/configurations/hive-site/hive.custom-extensions.owner", params.hdfs_user)
+  hive_custom_extensions_hdfs_dir = DEFAULT_HADOOP_HIVE_EXTENSION_DIR.format(params.major_stack_version)
+
+  hive_custom_extensions_local_dir = "{0}/current/ext/hive".format(Script.get_stack_root())
+
+  impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT'];
+  role = params.config.get('role','')
+
+  # Run copying for HIVE_SERVER and HIVE_CLIENT
+  if params.current_service == 'HIVE' and role in impacted_components:
+    clean_extensions(hive_custom_extensions_local_dir)
+    if hive_custom_extensions_enabled:
+      download_extensions(hive_custom_extensions_owner, params.user_group,
+                          hive_custom_extensions_hdfs_dir,
+                          hive_custom_extensions_local_dir)
+
+def download_extensions(owner_user, owner_group, hdfs_source_dir, local_target_dir):
+  """
+  :param owner_user: user owner of the HDFS directory
+  :param owner_group: group owner of the HDFS directory
+  :param hdfs_source_dir: the HDFS directory from where the files are being pull
+  :param local_target_dir: the location of where to download the files
+  :return: Will return True if successful, otherwise, False.
+  """
+  import params
+
+  if not os.path.isdir(local_target_dir):
+    extensions_tmp_dir=format("{tmp_dir}/custom_extensions")
+    Directory(local_target_dir,
+              owner="root",
+              mode=0755,
+              group="root",
+              create_parents=True)
+
+    params.HdfsResource(hdfs_source_dir,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=owner_user,
+                        group=owner_group,
+                        mode=0755)
+
+    Directory(extensions_tmp_dir,
+              owner=params.hdfs_user,
+              mode=0755,
+              create_parents=True)
+
+    # copy from hdfs to /tmp
+    params.HdfsResource(extensions_tmp_dir,
+                        type="directory",
+                        action="download_on_execute",
+                        source=hdfs_source_dir,
+                        user=params.hdfs_user,
+                        mode=0644,
+                        replace_existing_files=True)
+
+    # Execute command is not quoting correctly.
+    cmd = format("{sudo} mv {extensions_tmp_dir}/* {local_target_dir}")
+    only_if_cmd = "ls -d {extensions_tmp_dir}/*".format(extensions_tmp_dir=extensions_tmp_dir)
+    Execute(cmd, only_if=only_if_cmd)
+
+    only_if_local = 'ls -d "{local_target_dir}"'.format(local_target_dir=local_target_dir)
+    Execute(("chown", "-R", "root:root", local_target_dir),
+            sudo=True,
+            only_if=only_if_local)
+
+    params.HdfsResource(None,action="execute")
+  return True
+
+def clean_extensions(local_dir):
+  """
+  :param local_dir: The local directory where the extensions are stored.
+  :return: Will return True if successful, otherwise, False.
+  """
+  if os.path.isdir(local_dir):
+    Directory(local_dir,
+              action="delete")
+  return True
+
+def get_config_formatted_value(property_value):
+  return format(property_value.replace("{{", "{").replace("}}", "}"))
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/hook.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/hook.py
index f21e4b1..2f68cb1 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/hook.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/hook.py
@@ -16,11 +16,11 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-
-import sys
-from resource_management import *
 from rack_awareness import create_topology_script_and_mapping
-from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
+from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink, setup_unlimited_key_jce_policy, \
+  Hook
+from custom_extensions import setup_extensions
+
 
 class BeforeStartHook(Hook):
 
@@ -34,6 +34,10 @@ class BeforeStartHook(Hook):
     setup_configs()
     create_javahome_symlink()
     create_topology_script_and_mapping()
+    setup_unlimited_key_jce_policy()
+    if params.stack_supports_hadoop_custom_extensions:
+      setup_extensions()
+
 
 if __name__ == "__main__":
   BeforeStartHook().execute()
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/params.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/params.py
index 569f125..faccce3 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/params.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/params.py
@@ -24,41 +24,74 @@ from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import default
 from resource_management.libraries.functions import format_jvm_option
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.libraries.functions.version import format_stack_version, compare_versions, get_major_version
 from ambari_commons.os_check import OSCheck
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions import StackFeature
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 
 config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+artifact_dir = tmp_dir + "/AMBARI-artifacts"
 
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+version_for_stack_feature_checks = get_stack_feature_version(config)
+stack_supports_hadoop_custom_extensions = check_stack_feature(StackFeature.HADOOP_CUSTOM_EXTENSIONS, version_for_stack_feature_checks)
 
-stack_version_unformatted = config['hostLevelParams']['stack_version']
+sudo = AMBARI_SUDO_BINARY
+
+# Global flag enabling or disabling the sysprep feature
+host_sys_prepped = default("/ambariLevelParams/host_sys_prepped", False)
+
+# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
+# This is required if tarballs are going to be copied to HDFS, so set to False
+sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
+
+# Whether to skip setting up the unlimited key JCE policy
+sysprep_skip_setup_jce = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_setup_jce", False)
+
+stack_version_unformatted = config['clusterLevelParams']['stack_version']
 stack_version_formatted = format_stack_version(stack_version_unformatted)
+major_stack_version = get_major_version(stack_version_formatted)
 
-dfs_type = default("/commandParams/dfs_type", "")
+dfs_type = default("/clusterLevelParams/dfs_type", "")
 hadoop_conf_dir = "/etc/hadoop/conf"
-
 component_list = default("/localComponents", [])
 
-hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
 
-# hadoop default params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_metrics2_properties_content = None
+if 'hadoop-metrics2.properties' in config['configurations']:
+  hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content']
 
 hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
 hadoop_lib_home = stack_select.get_hadoop_dir("lib")
 hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_home = '/usr'
-create_lib_snappy_symlinks = True
 
+mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+hadoop_home = stack_select.get_hadoop_dir("home")
+create_lib_snappy_symlinks = False
+  
 current_service = config['serviceName']
 
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
+ambari_server_resources_url = default("/ambariLevelParams/jdk_location", None)
+if ambari_server_resources_url is not None and ambari_server_resources_url.endswith('/'):
+  ambari_server_resources_url = ambari_server_resources_url[:-1]
+
+# Unlimited key JCE policy params
+jce_policy_zip = default("/ambariLevelParams/jce_name", None) # None when jdk is already installed by user
+unlimited_key_jce_required = default("/componentLevelParams/unlimited_key_jce_required", False)
+jdk_name = default("/ambariLevelParams/jdk_name", None)
+java_home = default("/ambariLevelParams/java_home", None)
+java_exec = "{0}/bin/java".format(java_home) if java_home is not None else "/bin/java"
+
 #users and groups
 has_hadoop_env = 'hadoop-env' in config['configurations']
 mapred_user = config['configurations']['mapred-env']['mapred_user']
@@ -68,22 +101,32 @@ yarn_user = config['configurations']['yarn-env']['yarn_user']
 user_group = config['configurations']['cluster-env']['user_group']
 
 #hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hostname = config['agentLevelParams']['hostname']
+ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']
+rm_host = default("/clusterHostInfo/resourcemanager_hosts", [])
+slave_hosts = default("/clusterHostInfo/datanode_hosts", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_hosts", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_hosts", [])
 hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
-
-has_namenode = not len(namenode_host) == 0
+hs_host = default("/clusterHostInfo/historyserver_hosts", [])
+jtnode_host = default("/clusterHostInfo/jtnode_hosts", [])
+namenode_hosts = default("/clusterHostInfo/namenode_hosts", [])
+hdfs_client_hosts = default("/clusterHostInfo/hdfs_client_hosts", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_server_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", [])
+cluster_name = config["clusterName"]
+set_instanceId = "false"
+if 'cluster-env' in config['configurations'] and \
+    'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
+has_namenode = len(namenode_hosts) > 0
+has_hdfs_clients = len(hdfs_client_hosts) > 0
+has_hdfs = has_hdfs_clients or has_namenode
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
 has_oozie_server = not len(oozie_servers) == 0
@@ -94,25 +137,23 @@ has_zk_host = not len(zk_hosts) == 0
 has_ganglia_server = not len(ganglia_server_hosts) == 0
 has_metric_collector = not len(ams_collector_hosts) == 0
 
-is_namenode_master = hostname in namenode_host
+is_namenode_master = hostname in namenode_hosts
 is_jtnode_master = hostname in jtnode_host
 is_rmnode_master = hostname in rm_host
 is_hsnode_master = hostname in hs_host
 is_hbase_master = hostname in hbase_master_hosts
 is_slave = hostname in slave_hosts
+
 if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
+
+metric_collector_port = None
 if has_metric_collector:
   if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
-    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
   else:
-    metric_collector_host = ams_collector_hosts[0]
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
-  else:
-    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
     if metric_collector_web_address.find(':') != -1:
       metric_collector_port = metric_collector_web_address.split(':')[1]
     else:
@@ -124,11 +165,33 @@ if has_metric_collector:
   metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
   metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
   metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+  metric_legacy_hadoop_sink = check_stack_feature(StackFeature.AMS_LEGACY_HADOOP_SINK, version_for_stack_feature_checks)
 
   pass
+
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
 metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
 
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+is_aggregation_https_enabled = False
+if default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+  host_in_memory_aggregation_protocol = 'https'
+  is_aggregation_https_enabled = True
+else:
+  host_in_memory_aggregation_protocol = 'http'
+
+# Cluster Zookeeper quorum
+zookeeper_quorum = None
+if has_zk_host:
+  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
+    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
+  else:
+    zookeeper_clientPort = '2181'
+  zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_server_hosts'])
+  # last port config
+  zookeeper_quorum += ':' + zookeeper_clientPort
+
 #hadoop params
 
 if has_namenode or dfs_type == 'HCFS':
@@ -140,18 +203,8 @@ hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_p
 hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hbase_tmp_dir = "/tmp/hbase-hbase"
 #db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-ambari_server_resources = config['hostLevelParams']['jdk_location']
-oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
-mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+oracle_driver_symlink_url = format("{ambari_server_resources_url}/oracle-jdbc-driver.jar")
+mysql_driver_symlink_url = format("{ambari_server_resources_url}/mysql-jdbc-driver.jar")
 
 if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
   rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
@@ -164,7 +217,6 @@ else:
   rca_prefix = rca_disabled_prefix
 
 #hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
 
 jsvc_path = "/usr/lib/bigtop-utils"
 
@@ -190,6 +242,16 @@ yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/v
 
 dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
 
+# Hdfs log4j settings
+hadoop_log_max_backup_size = default('configurations/hdfs-log4j/hadoop_log_max_backup_size', 256)
+hadoop_log_number_of_backup_files = default('configurations/hdfs-log4j/hadoop_log_number_of_backup_files', 10)
+hadoop_security_log_max_backup_size = default('configurations/hdfs-log4j/hadoop_security_log_max_backup_size', 256)
+hadoop_security_log_number_of_backup_files = default('configurations/hdfs-log4j/hadoop_security_log_number_of_backup_files', 20)
+
+# Yarn log4j settings
+yarn_rm_summary_log_max_backup_size = default('configurations/yarn-log4j/yarn_rm_summary_log_max_backup_size', 256)
+yarn_rm_summary_log_number_of_backup_files = default('configurations/yarn-log4j/yarn_rm_summary_log_number_of_backup_files', 20)
+
 #log4j.properties
 if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
   log4j_props = config['configurations']['hdfs-log4j']['content']
@@ -203,6 +265,10 @@ command_params = config["commandParams"] if "commandParams" in config else None
 if command_params is not None:
   refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
 
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+ambari_jce_name = default("/commandParams/ambari_jce_name", None)
+  
 ambari_libs_dir = "/var/lib/ambari-agent/lib"
 is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
@@ -211,7 +277,7 @@ default_fs = config['configurations']['core-site']['fs.defaultFS']
 all_hosts = default("/clusterHostInfo/all_hosts", [])
 all_racks = default("/clusterHostInfo/all_racks", [])
 all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+slave_hosts = default("/clusterHostInfo/datanode_hosts", [])
 
 #topology files
 net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
@@ -219,16 +285,15 @@ net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
 net_topology_mapping_data_file_name = 'topology_mappings.data'
 net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
 
-#Added logic to create /tmp and /user directory for HCFS stack.
+#Added logic to create /tmp and /user directory for HCFS stack.  
 has_core_site = 'core-site' in config['configurations']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 kinit_path_local = get_kinit_path()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_unformatted = config['clusterLevelParams']['stack_version']
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
 hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
 smoke_hdfs_user_dir = format("/user/{smoke_user}")
 smoke_hdfs_user_mode = 0770
@@ -262,16 +327,21 @@ if dfs_ha_namenode_ids:
 if dfs_ha_enabled:
  for nn_id in dfs_ha_namemodes_ids_list:
    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-   if hostname in nn_host:
+   if hostname.lower() in nn_host.lower():
      namenode_id = nn_id
      namenode_rpc = nn_host
    pass
  pass
 else:
- namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None)
-
-if namenode_rpc:
- nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
+  namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', default_fs)
+
+# if HDFS is not installed in the cluster, then don't try to access namenode_rpc
+if has_namenode and namenode_rpc and "core-site" in config['configurations']:
+  port_str = namenode_rpc.split(':')[-1].strip()
+  try:
+    nn_rpc_client_port = int(port_str)
+  except ValueError:
+    nn_rpc_client_port = None
 
 if dfs_ha_enabled:
  dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/rack_awareness.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/rack_awareness.py
index 548f051..48158bb 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/rack_awareness.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/rack_awareness.py
@@ -30,6 +30,7 @@ def create_topology_mapping():
        content=Template("topology_mappings.data.j2"),
        owner=params.hdfs_user,
        group=params.user_group,
+       mode=0644,
        only_if=format("test -d {net_topology_script_dir}"))
 
 def create_topology_script():
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/shared_initialization.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/shared_initialization.py
index 9a918c1..ce6b869 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/shared_initialization.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/scripts/shared_initialization.py
@@ -19,6 +19,7 @@ limitations under the License.
 
 import os
 from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+from resource_management.core.resources.jcepolicyinfo import JcePolicyInfo
 
 from resource_management import *
 
@@ -50,6 +51,11 @@ def setup_hadoop():
               group='root',
               cd_access='a',
       )
+      Directory(format("{hadoop_pid_dir_prefix}/{hdfs_user}"),
+              owner=params.hdfs_user,
+              cd_access='a',
+      )
+
     Directory(params.hadoop_tmp_dir,
               create_parents = True,
               owner=params.hdfs_user,
@@ -60,17 +66,7 @@ def setup_hadoop():
       tc_owner = "root"
     else:
       tc_owner = params.hdfs_user
-
-    # if WebHDFS is not enabled we need this jar to create hadoop folders.
-    if params.host_sys_prepped:
-      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
-    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-      # for source-code of jar goto contrib/fast-hdfs-resource
-      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
-           mode=0644,
-           content=StaticFile("fast-hdfs-resource.jar")
-      )
-
+      
     if os.path.exists(params.hadoop_conf_dir):
       File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
            owner=tc_owner,
@@ -89,7 +85,7 @@ def setup_hadoop():
              mode=0644,
              group=params.user_group,
              owner=params.hdfs_user,
-             content=params.log4j_props
+             content=InlineTemplate(params.log4j_props)
         )
       elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
         File(log4j_filename,
@@ -98,14 +94,34 @@ def setup_hadoop():
              owner=params.hdfs_user,
         )
 
-      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-           owner=params.hdfs_user,
-           group=params.user_group,
-           content=Template("hadoop-metrics2.properties.j2")
-      )
+    create_microsoft_r_dir()
+
+  if params.has_hdfs or params.dfs_type == 'HCFS':
+    # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS.
+    if params.sysprep_skip_copy_fast_jar_hdfs:
+      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
+    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.dfs_type):
+      # for source-code of jar goto contrib/fast-hdfs-resource
+      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
+           mode=0644,
+           content=StaticFile("fast-hdfs-resource.jar")
+           )
+    if os.path.exists(params.hadoop_conf_dir):
+      if params.hadoop_metrics2_properties_content:
+        File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+             owner=params.hdfs_user,
+             group=params.user_group,
+             content=InlineTemplate(params.hadoop_metrics2_properties_content)
+             )
+      else:
+        File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+             owner=params.hdfs_user,
+             group=params.user_group,
+             content=Template("hadoop-metrics2.properties.j2")
+             )
 
     if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
-       create_dirs()
+      create_dirs()
 
 
 def setup_configs():
@@ -159,3 +175,88 @@ def create_dirs():
                       action="execute"
    )
 
+def create_microsoft_r_dir():
+  import params
+  if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs:
+    directory = '/user/RevoShare'
+    try:
+      params.HdfsResource(directory,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.hdfs_user,
+                          mode=0777)
+      params.HdfsResource(None, action="execute")
+    except Exception as exception:
+      Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
+
+def setup_unlimited_key_jce_policy():
+  """
+  Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the  stack use different JDK)
+  """
+  import params
+  __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip)
+  if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip:
+    __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name)
+
+def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name):
+  """
+  Sets up the unlimited key JCE policy if needed.
+
+  The following criteria must be met:
+
+    * The cluster has not been previously prepared (sys preped) - cluster-env/sysprep_skip_setup_jce = False
+    * Ambari is managing the host's JVM - /ambariLevelParams/jdk_name is set
+    * Either security is enabled OR a service requires it - /componentLevelParams/unlimited_key_jce_required = True
+    * The unlimited key JCE policy has not already been installed
+
+  If the conditions are met, the following steps are taken to install the unlimited key JCE policy JARs
+
+    1. The unlimited key JCE policy ZIP file is downloaded from the Ambari server and stored in the
+        Ambari agent's temporary directory
+    2. The existing JCE policy JAR files are deleted
+    3. The downloaded ZIP file is unzipped into the proper JCE policy directory
+
+  :return: None
+  """
+  import params
+
+  if params.sysprep_skip_setup_jce:
+    Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped")
+
+  elif not custom_jdk_name:
+    Logger.info("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari")
+
+  elif not params.unlimited_key_jce_required:
+    Logger.info("Skipping unlimited key JCE policy check and setup since it is not required")
+
+  else:
+    jcePolicyInfo = JcePolicyInfo(custom_java_home)
+
+    if jcePolicyInfo.is_unlimited_key_jce_policy():
+      Logger.info("The unlimited key JCE policy is required, and appears to have been installed.")
+
+    elif custom_jce_name is None:
+      raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.")
+
+    else:
+      Logger.info("The unlimited key JCE policy is required, and needs to be installed.")
+
+      jce_zip_target = format("{artifact_dir}/{custom_jce_name}")
+      jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}")
+      java_security_dir = format("{custom_java_home}/jre/lib/security")
+
+      Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target))
+      Directory(params.artifact_dir, create_parents=True)
+      File(jce_zip_target, content=DownloadSource(jce_zip_source))
+
+      Logger.debug("Removing existing JCE policy JAR files: {0}.".format(java_security_dir))
+      File(format("{java_security_dir}/US_export_policy.jar"), action="delete")
+      File(format("{java_security_dir}/local_policy.jar"), action="delete")
+
+      Logger.debug("Unzipping the unlimited key JCE policy files from {0} into {1}.".format(jce_zip_target, java_security_dir))
+      extract_cmd = ("unzip", "-o", "-j", "-q", jce_zip_target, "-d", java_security_dir)
+      Execute(extract_cmd,
+              only_if=format("test -e {java_security_dir} && test -f {jce_zip_target}"),
+              path=['/bin/', '/usr/bin'],
+              sudo=True
+              )
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
index fcd9b23..49be9c4 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -67,26 +67,36 @@ resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
 {% if has_metric_collector %}
 
 *.period={{metrics_collection_period}}
+{% if metric_legacy_hadoop_sink %}
+*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink-legacy.jar
+{% else %}
 *.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+{% endif %}
 *.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
 *.sink.timeline.period={{metrics_collection_period}}
 *.sink.timeline.sendInterval={{metrics_report_interval}}000
-*.sink.timeline.slave.host.name = {{hostname}}
+*.sink.timeline.slave.host.name={{hostname}}
+*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
+*.sink.timeline.protocol={{metric_collector_protocol}}
+*.sink.timeline.port={{metric_collector_port}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+{% if is_aggregation_https_enabled %}
+*.sink.timeline.host_in_memory_aggregation_protocol = {{host_in_memory_aggregation_protocol}}
+{% endif %}
 
 # HTTPS properties
 *.sink.timeline.truststore.path = {{metric_truststore_path}}
 *.sink.timeline.truststore.type = {{metric_truststore_type}}
 *.sink.timeline.truststore.password = {{metric_truststore_password}}
 
-datanode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-namenode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-resourcemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-nodemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-jobhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-journalnode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-maptask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-reducetask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
-applicationhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
 
 resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
 
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/YARN_widgets.json b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/YARN_widgets.json
index 4b76a17..df91f9a 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/YARN_widgets.json
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/YARN_widgets.json
@@ -438,20 +438,20 @@
           "is_visible": false,
           "metrics": [
             {
-              "name": "yarn.NodeManagerMetrics.ContainersFailed._rate",
-              "metric_path": "metrics/yarn/ContainersFailed._rate",
+              "name": "yarn.NodeManagerMetrics.ContainersFailed",
+              "metric_path": "metrics/yarn/ContainersFailed",
               "service_name": "YARN",
               "component_name": "NODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersCompleted._rate",
-              "metric_path": "metrics/yarn/ContainersCompleted._rate",
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted",
+              "metric_path": "metrics/yarn/ContainersCompleted",
               "service_name": "YARN",
               "component_name": "NODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersLaunched._rate",
-              "metric_path": "metrics/yarn/ContainersLaunched._rate",
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched",
+              "metric_path": "metrics/yarn/ContainersLaunched",
               "service_name": "YARN",
               "component_name": "NODEMANAGER"
             },
@@ -462,8 +462,8 @@
               "component_name": "NODEMANAGER"
             },
             {
-              "name": "yarn.NodeManagerMetrics.ContainersKilled._rate",
-              "metric_path": "metrics/yarn/ContainersKilled._rate",
+              "name": "yarn.NodeManagerMetrics.ContainersKilled",
+              "metric_path": "metrics/yarn/ContainersKilled",
               "service_name": "YARN",
               "component_name": "NODEMANAGER"
             },
@@ -477,7 +477,7 @@
           "values": [
             {
               "name": "Container Failures",
-              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._rate/(yarn.NodeManagerMetrics.ContainersFailed._rate + yarn.NodeManagerMetrics.ContainersCompleted._rate + yarn.NodeManagerMetrics.ContainersLaunched._rate + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled._rate + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed/(yarn.NodeManagerMetrics.ContainersFailed + yarn.NodeManagerMetrics.ContainersCompleted + yarn.NodeManagerMetrics.ContainersLaunched + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
             }
           ],
           "properties": {
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/alerts.json b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/alerts.json
index 8561922..eb3cda2 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/alerts.json
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/alerts.json
@@ -15,8 +15,8 @@
             "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
             "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
             "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
+            "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+            "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
             "connection_timeout": 5.0
           },
           "reporting": {
@@ -43,8 +43,8 @@
           "type": "METRIC",
           "uri": {
             "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
+            "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+            "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
             "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
             "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
             "https_property_value": "HTTPS_ONLY",
@@ -86,8 +86,8 @@
           "uri": {
             "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
             "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
-            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}",
+            "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+            "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
             "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
             "https_property_value": "HTTPS_ONLY",
             "connection_timeout": 5.0
@@ -114,32 +114,6 @@
             "value": "{0}"
           }
         }
-      },
-      {
-        "name": "mapreduce_history_server_process",
-        "label": "History Server Process",
-        "description": "This host-level alert is triggered if the History Server process cannot be established to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
-          "default_port": 19888,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
       }
     ]
   },
@@ -188,8 +162,8 @@
             "https_property": "{{yarn-site/yarn.http.policy}}",
             "https_property_value": "HTTPS_ONLY",
             "default_port": 8042,
-            "kerberos_keytab": "{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}",
+            "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+            "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
             "connection_timeout": 5.0
           },
           "reporting": {
@@ -243,8 +217,8 @@
             "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
             "https_property": "{{yarn-site/yarn.http.policy}}",
             "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
+            "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+            "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
             "connection_timeout": 5.0,
             "high_availability": {
               "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
@@ -277,8 +251,8 @@
           "uri": {
             "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
             "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
+            "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+            "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
             "https_property": "{{yarn-site/yarn.http.policy}}",
             "https_property_value": "HTTPS_ONLY",
             "connection_timeout": 5.0,
@@ -324,8 +298,8 @@
           "uri": {
             "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
             "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
-            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
-            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
+            "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+            "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
             "https_property": "{{yarn-site/yarn.http.policy}}",
             "https_property_value": "HTTPS_ONLY",
             "connection_timeout": 5.0,
@@ -396,8 +370,8 @@
             "https": "{{yarn-site/yarn.timeline-service.webapp.https.address}}/ws/v1/timeline",
             "https_property": "{{yarn-site/yarn.http.policy}}",
             "https_property_value": "HTTPS_ONLY",
-            "kerberos_keytab": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab}}",
-            "kerberos_principal": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.principal}}",
+            "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}",
+            "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}",
             "connection_timeout": 5.0
           },
           "reporting": {
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration-mapred/mapred-env.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration-mapred/mapred-env.xml
index fe6d4b9..9a78249 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration-mapred/mapred-env.xml
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration-mapred/mapred-env.xml
@@ -52,6 +52,12 @@
     <value-attributes>
       <type>user</type>
       <overridable>false</overridable>
+      <user-groups>
+        <property>
+          <type>cluster-env</type>
+          <name>user_group</name>
+        </property>
+      </user-groups>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration-mapred/mapred-site.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration-mapred/mapred-site.xml
index 434eea0..d78d6ef 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration-mapred/mapred-site.xml
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration-mapred/mapred-site.xml
@@ -235,6 +235,7 @@
   </property>
   <property>
     <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <display-name>Mapreduce JobHistory Intermediate Done directory</display-name>
     <value>/mr-history/tmp</value>
     <description>
       Directory where history files are written by MapReduce jobs.
@@ -243,6 +244,7 @@
   </property>
   <property>
     <name>mapreduce.jobhistory.done-dir</name>
+    <display-name>Mapreduce JobHistory Done directory</display-name>
     <value>/mr-history/done</value>
     <description>
       Directory where history files are managed by the MR JobHistory Server.
@@ -273,6 +275,7 @@
   </property>
   <property>
     <name>yarn.app.mapreduce.am.staging-dir</name>
+    <display-name>YARN App Mapreduce AM Staging directory</display-name>
     <value>/user</value>
     <description>
       The staging dir used while submitting jobs.
@@ -438,7 +441,7 @@
   </property>
   <property>
     <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/Linux-amd64-64</value>
+    <value>LD_LIBRARY_PATH={{hadoop_lib_home}}/native:{{hadoop_lib_home}}/native/Linux-{{architecture}}-64:./mr-framework/hadoop/lib/native:./mr-framework/hadoop/lib/native/Linux-{{architecture}}-64</value>
     <description>
       Additional execution environment entries for map and reduce task processes.
       This is not an additive property. You must preserve the original value if
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/container-executor.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/container-executor.xml
new file mode 100644
index 0000000..e19fe90
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/container-executor.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>container-executor configuration template</display-name>
+    <description>This is the jinja template for container-executor.cfg file</description>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value/>
+    <value-attributes>
+	  <type>content</type>
+      <property-file-name>container-executor.cfg.j2</property-file-name>
+      <property-file-type>text</property-file-type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-env.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-env.xml
index c3bbcb6..52560ac 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-env.xml
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-env.xml
@@ -52,6 +52,12 @@
     <value-attributes>
       <type>user</type>
       <overridable>false</overridable>
+      <user-groups>
+        <property>
+          <type>cluster-env</type>
+          <name>user_group</name>
+        </property>
+      </user-groups>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
@@ -124,6 +130,7 @@
     <description>This is the jinja template for yarn-env.sh file</description>
     <value>
 export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+USER="$(whoami)"
 export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
 export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
 export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
@@ -237,6 +244,10 @@ if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
 fi
 YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
 YARN_OPTS="$YARN_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}"
+
+{% if rm_security_opts is defined %}
+YARN_OPTS="{{rm_security_opts}} $YARN_OPTS"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-log4j.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-log4j.xml
index 89dd52d..a49ad04 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-log4j.xml
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-log4j.xml
@@ -19,8 +19,29 @@
  * limitations under the License.
  */
 -->
-<configuration supports_final="false" supports_adding_forbidden="true">
+<configuration supports_final="false" supports_adding_forbidden="false">
   <property>
+    <name>yarn_rm_summary_log_max_backup_size</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>YARN Log: backup file size</display-name>
+    <value-attributes>
+        <unit>MB</unit>
+    </value-attributes>
+     <on-ambari-upgrade add="false"/>
+   </property>
+   <property>
+      <name>yarn_rm_summary_log_number_of_backup_files</name>
+      <value>20</value>
+      <description>The number of backup files</description>
+      <display-name>YARN Log: # of backup files</display-name>
+    <value-attributes>
+        <type>int</type>
+        <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
     <name>content</name>
     <display-name>yarn-log4j template</display-name>
     <description>Custom log4j.properties</description>
@@ -54,8 +75,8 @@ yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
 #    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
 log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
 log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.MaxFileSize={{yarn_rm_summary_log_max_backup_size}}MB
+log4j.appender.RMSUMMARY.MaxBackupIndex={{yarn_rm_summary_log_number_of_backup_files}}
 log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
 log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-site.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-site.xml
index f3ea462..7f044b1 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-site.xml
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/configuration/yarn-site.xml
@@ -18,6 +18,7 @@
 -->
 <!-- Put site-specific property overrides in this file. -->
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
+  <!-- ResourceManager -->
   <property>
     <name>yarn.resourcemanager.hostname</name>
     <value>localhost</value>
@@ -106,6 +107,25 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
+    <name>yarn.acl.enable</name>
+    <value>false</value>
+    <description> Are acls enabled. </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>yarn.admin.acl</name>
+    <value/>
+    <description> ACL of who can be admin of the YARN cluster. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- NodeManager -->
+  <property>
     <name>yarn.nodemanager.address</name>
     <value>0.0.0.0:45454</value>
     <description>The address of the container manager in the NM.</description>
@@ -182,6 +202,7 @@
   </property>
   <property>
     <name>yarn.nodemanager.log-dirs</name>
+    <display-name>YARN NodeManager Log directories</display-name>
     <value>/hadoop/yarn/log</value>
     <description>
       Where to store container logs. An application's localized log directory
@@ -197,6 +218,7 @@
   </property>
   <property>
     <name>yarn.nodemanager.local-dirs</name>
+    <display-name>YARN NodeManager Local directories</display-name>
     <value>/hadoop/yarn/local</value>
     <description>
       List of directories to store localized files in. An
@@ -239,13 +261,13 @@
     <on-ambari-upgrade add="true"/>
   </property>
   <property>
-    <name>yarn.nodemanager.log.retain-second</name>
+    <name>yarn.nodemanager.log.retain-seconds</name>
     <value>604800</value>
     <description>
       Time in seconds to retain user logs. Only applicable if
       log aggregation is disabled.
     </description>
-    <on-ambari-upgrade add="true"/>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>yarn.log-aggregation-enable</name>
@@ -259,6 +281,7 @@
   </property>
   <property>
     <name>yarn.nodemanager.remote-app-log-dir</name>
+    <display-name>YARN NodeManager Remote App Log directory</display-name>
     <value>/app-logs</value>
     <description>Location to aggregate logs to. </description>
     <property-type>NOT_MANAGED_HDFS_PATH</property-type>
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/kerberos.json b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/kerberos.json
index 647e4fc..8688753 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/kerberos.json
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/kerberos.json
@@ -25,15 +25,16 @@
             "yarn.timeline-service.http-authentication.token.validity": "",
             "yarn.timeline-service.http-authentication.cookie.domain": "",
             "yarn.timeline-service.http-authentication.cookie.path": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
-            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
             "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
-            "yarn.resourcemanager.proxyusers.*.groups": "",
-            "yarn.resourcemanager.proxyusers.*.hosts": "",
-            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
             "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
-            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
+            "hadoop.registry.secure" : "true",
+            "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm"
           }
         },
         {
@@ -158,7 +159,7 @@
               }
             },
             {
-              "name": "yarn_app_timelineserver_hdfs",
+              "name": "yarn_app_timeline_server_hdfs",
               "reference": "/HDFS/NAMENODE/hdfs"
             }
           ]
@@ -182,7 +183,7 @@
           "name": "HISTORYSERVER",
           "identities": [
             {
-              "name": "mapreduce2_history_server_hdfs",
+              "name": "mapreduce2_historyserver_hdfs",
               "reference": "/HDFS/NAMENODE/hdfs"
             },
             {
@@ -207,7 +208,7 @@
               }
             },
             {
-              "name": "mapreduce2_history_server_spnego",
+              "name": "mapreduce2_historyserver_spnego",
               "reference": "/spnego",
               "principal": {
                 "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/metainfo.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/metainfo.xml
index 1d4fea9..ffc6ca0 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/metainfo.xml
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/metainfo.xml
@@ -23,29 +23,15 @@
       <name>YARN</name>
       <displayName>YARN</displayName>
       <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.8.4+bigtop</version>
+      <version>2.1.0.2.0</version>
       <components>
 
-      <component>
-          <name>APP_TIMELINE_SERVER</name>
-          <displayName>App Timeline Server</displayName>
-          <category>MASTER</category>
-          <cardinality>0-1</cardinality>
-          <versionAdvertised>false</versionAdvertised>
-          <reassignAllowed>true</reassignAllowed>
-          <commandScript>
-            <script>scripts/application_timeline_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-        </component>
-
         <component>
           <name>RESOURCEMANAGER</name>
           <displayName>ResourceManager</displayName>
           <category>MASTER</category>
           <cardinality>1</cardinality>
-          <versionAdvertised>false</versionAdvertised>
+          <versionAdvertised>true</versionAdvertised>
           <reassignAllowed>true</reassignAllowed>
           <commandScript>
             <script>scripts/resourcemanager.py</script>
@@ -93,7 +79,7 @@
           <displayName>NodeManager</displayName>
           <category>SLAVE</category>
           <cardinality>1+</cardinality>
-          <versionAdvertised>false</versionAdvertised>
+          <versionAdvertised>true</versionAdvertised>
           <decommissionAllowed>true</decommissionAllowed>
           <commandScript>
             <script>scripts/nodemanager.py</script>
@@ -117,7 +103,7 @@
           <displayName>YARN Client</displayName>
           <category>CLIENT</category>
           <cardinality>1+</cardinality>
-          <versionAdvertised>false</versionAdvertised>
+          <versionAdvertised>true</versionAdvertised>
           <commandScript>
             <script>scripts/yarn_client.py</script>
             <scriptType>PYTHON</scriptType>
@@ -148,7 +134,7 @@
               <type>xml</type>
               <fileName>capacity-scheduler.xml</fileName>
               <dictionaryName>capacity-scheduler</dictionaryName>
-            </configFile> 
+            </configFile>                        
           </configFiles>
         </component>
       </components>
@@ -175,10 +161,11 @@
         <scriptType>PYTHON</scriptType>
         <timeout>300</timeout>
       </commandScript>
-
+      
       <requiredServices>
         <service>HDFS</service>
         <service>MAPREDUCE2</service>
+        <service>ZOOKEEPER</service>
       </requiredServices>
 
       <configuration-dependencies>
@@ -189,7 +176,6 @@
         <config-type>core-site</config-type>
         <config-type>mapred-site</config-type>
         <config-type>yarn-log4j</config-type>
-        <config-type>ams-ssl-client</config-type>
         <config-type>ranger-yarn-plugin-properties</config-type>
         <config-type>ranger-yarn-audit</config-type>
         <config-type>ranger-yarn-policymgr-ssl</config-type>
@@ -197,20 +183,26 @@
       </configuration-dependencies>
       <widgetsFileName>YARN_widgets.json</widgetsFileName>
       <metricsFileName>YARN_metrics.json</metricsFileName>
+      <themes>
+        <theme>
+          <fileName>directories.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
     </service>
 
     <service>
       <name>MAPREDUCE2</name>
       <displayName>MapReduce2</displayName>
       <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.8.4+bigtop</version>
+      <version>2.1.0.2.0.6.0</version>
       <components>
         <component>
           <name>HISTORYSERVER</name>
           <displayName>History Server</displayName>
           <category>MASTER</category>
           <cardinality>1</cardinality>
-          <versionAdvertised>false</versionAdvertised>
+          <versionAdvertised>true</versionAdvertised>
           <reassignAllowed>true</reassignAllowed>
           <auto-deploy>
             <enabled>true</enabled>
@@ -224,6 +216,20 @@
                 <enabled>true</enabled>
               </auto-deploy>
             </dependency>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SLIDER/SLIDER</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
           </dependencies>
           <commandScript>
             <script>scripts/historyserver.py</script>
@@ -243,7 +249,7 @@
           <displayName>MapReduce2 Client</displayName>
           <category>CLIENT</category>
           <cardinality>0+</cardinality>
-          <versionAdvertised>false</versionAdvertised>
+          <versionAdvertised>true</versionAdvertised>
           <commandScript>
             <script>scripts/mapreduce2_client.py</script>
             <scriptType>PYTHON</scriptType>
@@ -285,7 +291,7 @@
         <scriptType>PYTHON</scriptType>
         <timeout>300</timeout>
       </commandScript>
-
+      
       <requiredServices>
         <service>YARN</service>
       </requiredServices>
@@ -300,11 +306,19 @@
         <config-type>mapred-env</config-type>
         <config-type>ssl-client</config-type>
         <config-type>ssl-server</config-type>
-        <config-type>ams-ssl-client</config-type>
+        <config-type>yarn-site</config-type>
       </configuration-dependencies>
       <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
       <widgetsFileName>MAPREDUCE2_widgets.json</widgetsFileName>
       <metricsFileName>MAPREDUCE2_metrics.json</metricsFileName>
+
+      <themes-dir>themes-mapred</themes-dir>
+      <themes>
+        <theme>
+          <fileName>directories.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
     </service>
   </services>
 </metainfo>
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py
index 5e2b4d9..218b893 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/files/validateYarnComponentStatusWindows.py
@@ -19,7 +19,7 @@ limitations under the License.
 '''
 
 import optparse
-import subprocess
+from ambari_commons import subprocess32
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 import urllib2
 
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/application_timeline_server.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/application_timeline_server.py
index e3a81cf..2aec6ba 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/application_timeline_server.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/application_timeline_server.py
@@ -19,15 +19,18 @@ Ambari Agent
 
 """
 
-from resource_management import *
-from resource_management.libraries.functions import conf_select
+from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties,\
   FILE_TYPE_XML
 from resource_management.libraries.functions.format import format
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+
 from yarn import yarn
 from service import service
 from ambari_commons import OSConst
@@ -63,84 +66,18 @@ class ApplicationTimelineServerWindows(ApplicationTimelineServer):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ApplicationTimelineServerDefault(ApplicationTimelineServer):
-  def get_component_name(self):
-    return "hadoop-yarn-timelineserver"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      stack_select.select("hadoop-yarn-timelineserver", params.version)
+      stack_select.select_packages(params.version)
 
   def status(self, env):
     import status_params
     env.set_params(status_params)
-    Execute(format("mv {yarn_historyserver_pid_file_old} {yarn_historyserver_pid_file}"),
-            only_if = format("test -e {yarn_historyserver_pid_file_old}", user=status_params.yarn_user))
-    functions.check_process_status(status_params.yarn_historyserver_pid_file)
-
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.enabled": "true",
-                           "yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.timeline-service.principal",
-                           "yarn.timeline-service.keytab",
-                           "yarn.timeline-service.http-authentication.kerberos.principal",
-                           "yarn.timeline-service.http-authentication.kerberos.keytab"]
-
-      props_read_check = ["yarn.timeline-service.keytab",
-                          "yarn.timeline-service.http-authentication.kerberos.keytab"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                                  props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.timeline-service.keytab' not in security_params['yarn-site']
-               or 'yarn.timeline-service.principal' not in security_params['yarn-site']) \
-            or 'yarn.timeline-service.http-authentication.kerberos.keytab' not in security_params['yarn-site'] \
-            or 'yarn.timeline-service.http-authentication.kerberos.principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
-                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
+    check_process_status(status_params.yarn_historyserver_pid_file)
 
   def get_log_folder(self):
     import params
@@ -150,5 +87,11 @@ class ApplicationTimelineServerDefault(ApplicationTimelineServer):
     import params
     return params.yarn_user
 
+  def get_pid_files(self):
+    import status_params
+    Execute(format("mv {status_params.yarn_historyserver_pid_file_old} {status_params.yarn_historyserver_pid_file}"),
+            only_if = format("test -e {status_params.yarn_historyserver_pid_file_old}", user=status_params.yarn_user))
+    return [status_params.yarn_historyserver_pid_file]
+
 if __name__ == "__main__":
   ApplicationTimelineServer().execute()
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/historyserver.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/historyserver.py
index f933e91..a93bc17 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/historyserver.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/historyserver.py
@@ -21,13 +21,11 @@ Ambari Agent
 
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions.stack_features import check_stack_feature
 from resource_management.libraries.functions.check_process_status import check_process_status
 from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
@@ -70,20 +68,17 @@ class HistoryserverWindows(HistoryServer):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class HistoryServerDefault(HistoryServer):
-  def get_component_name(self):
-    return "hadoop-mapreduce-historyserver"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      stack_select.select("hadoop-mapreduce-historyserver", params.version)
+      stack_select.select_packages(params.version)
       # MC Hammer said, "Can't touch this"
-      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
-      copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
-      copy_to_hdfs("slider", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
+      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
+      copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
+      copy_to_hdfs("slider", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
       params.HdfsResource(None, action="execute")
 
   def start(self, env, upgrade_type=None):
@@ -91,23 +86,23 @@ class HistoryServerDefault(HistoryServer):
     env.set_params(params)
     self.configure(env) # FOR SECURITY
 
-    if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
+    if check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.version_for_stack_feature_checks):
       # MC Hammer said, "Can't touch this"
       resource_created = copy_to_hdfs(
         "mapreduce",
         params.user_group,
         params.hdfs_user,
-        host_sys_prepped=params.host_sys_prepped)
+        skip=params.sysprep_skip_copy_tarballs_hdfs)
       resource_created = copy_to_hdfs(
         "tez",
         params.user_group,
         params.hdfs_user,
-        host_sys_prepped=params.host_sys_prepped) or resource_created
+        skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
       resource_created = copy_to_hdfs(
         "slider",
         params.user_group,
         params.hdfs_user,
-        host_sys_prepped=params.host_sys_prepped) or resource_created
+        skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
       if resource_created:
         params.HdfsResource(None, action="execute")
     else:
@@ -121,62 +116,6 @@ class HistoryServerDefault(HistoryServer):
     env.set_params(status_params)
     check_process_status(status_params.mapred_historyserver_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      expectations = {}
-      expectations.update(build_expectations('mapred-site',
-                                             None,
-                                             [
-                                               'mapreduce.jobhistory.keytab',
-                                               'mapreduce.jobhistory.principal',
-                                               'mapreduce.jobhistory.webapp.spnego-keytab-file',
-                                               'mapreduce.jobhistory.webapp.spnego-principal'
-                                             ],
-                                             None))
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'mapred-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, expectations)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'mapred-site' not in security_params or
-               'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
-               'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal not set."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.keytab'],
-                                security_params['mapred-site']['mapreduce.jobhistory.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.mapred_user,
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
-                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.mapred_log_dir
@@ -185,5 +124,9 @@ class HistoryServerDefault(HistoryServer):
     import params
     return params.mapred_user
 
+  def get_pid_files(self):
+    import status_params
+    return [status_params.mapred_historyserver_pid_file]
+
 if __name__ == "__main__":
   HistoryServer().execute()
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/install_jars.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/install_jars.py
index 44015bf..728a014 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/install_jars.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/install_jars.py
@@ -18,7 +18,7 @@ limitations under the License.
 
 """
 
-from resource_management import *
+from resource_management.libraries.functions.format import format
 import os
 import glob
 
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/mapred_service_check.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/mapred_service_check.py
index 5fc498d..6288ac0 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/mapred_service_check.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/mapred_service_check.py
@@ -20,7 +20,11 @@ Ambari Agent
 """
 
 import sys
-from resource_management import *
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import StaticFile
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
 from resource_management.core.logger import Logger
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/mapreduce2_client.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/mapreduce2_client.py
index 8de9d56..234e931 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/mapreduce2_client.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/mapreduce2_client.py
@@ -24,7 +24,7 @@ import sys
 
 # Local imports
 from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions.stack_features import check_stack_feature
 from resource_management.core.exceptions import ClientComponentHasNoStatus
@@ -66,8 +66,6 @@ class MapReduce2Client(Script):
     config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name)
 
     if config_dir:
-      Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version)))
-
       # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
       # must change it now so this function can find the Jinja Templates for the service.
       env.config.basedir = base_dir
@@ -81,15 +79,12 @@ class MapReduce2ClientWindows(MapReduce2Client):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class MapReduce2ClientDefault(MapReduce2Client):
-  def get_component_name(self):
-    return "hadoop-client"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      stack_select.select("hadoop-client", params.version)
+      stack_select.select_packages(params.version)
 
 
 if __name__ == "__main__":
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/nodemanager.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/nodemanager.py
index 4f3eecb..280fc2f 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/nodemanager.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/nodemanager.py
@@ -21,15 +21,16 @@ Ambari Agent
 
 import nodemanager_upgrade
 
-from resource_management import *
-from resource_management.libraries.functions import conf_select
+from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.check_process_status import check_process_status
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
+from resource_management.core.logger import Logger
 from yarn import yarn
 from service import service
 from ambari_commons import OSConst
@@ -65,16 +66,13 @@ class NodemanagerWindows(Nodemanager):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class NodemanagerDefault(Nodemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-nodemanager"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing NodeManager Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      stack_select.select("hadoop-yarn-nodemanager", params.version)
+      stack_select.select_packages(params.version)
 
   def post_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing NodeManager Stack Upgrade post-restart")
@@ -88,66 +86,6 @@ class NodemanagerDefault(Nodemanager):
     env.set_params(status_params)
     check_process_status(status_params.nodemanager_pid_file)
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.nodemanager.principal",
-                           "yarn.nodemanager.keytab",
-                           "yarn.nodemanager.webapp.spnego-principal",
-                           "yarn.nodemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.nodemanager.keytab",
-                          "yarn.nodemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.keytab'],
-                                security_params['yarn-site']['yarn.nodemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def get_log_folder(self):
     import params
     return params.yarn_log_dir
@@ -156,5 +94,9 @@ class NodemanagerDefault(Nodemanager):
     import params
     return params.yarn_user
 
+  def get_pid_files(self):
+    import status_params
+    return [status_params.nodemanager_pid_file]
+
 if __name__ == "__main__":
   Nodemanager().execute()
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/nodemanager_upgrade.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
index 1c886f9..2407598 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/nodemanager_upgrade.py
@@ -17,7 +17,7 @@ limitations under the License.
 
 """
 
-import subprocess
+from ambari_commons import subprocess32
 
 from resource_management.core.logger import Logger
 from resource_management.core.exceptions import Fail
@@ -25,6 +25,7 @@ from resource_management.core.resources.system import Execute
 from resource_management.core import shell
 from resource_management.libraries.functions.decorator import retry
 from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.libraries.functions.format import format
 
 
 def post_upgrade_check():
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params.py
index 073e84f..d0ad6f6 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params.py
@@ -21,11 +21,12 @@ Ambari Agent
 """
 from ambari_commons import OSCheck
 from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.copy_tarball import get_sysprep_skip_copy_tarballs_hdfs
 
 if OSCheck.is_windows_family():
   from params_windows import *
 else:
   from params_linux import *
 
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
 retryAble = default("/commandParams/command_retry_enabled", False)
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params_linux.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params_linux.py
index 764dea2..5b4177c 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params_linux.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params_linux.py
@@ -20,8 +20,11 @@ Ambari Agent
 """
 import os
 
+from resource_management.core import sudo
+from resource_management.core.logger import Logger
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import component_version
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
@@ -30,10 +33,12 @@ from resource_management.libraries.functions.stack_features import check_stack_f
 from resource_management.libraries.functions.stack_features import get_stack_feature_version
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import format_stack_version, get_major_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries import functions
 from resource_management.libraries.functions import is_empty
+from resource_management.libraries.functions.get_architecture import get_architecture
+from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
 
 import status_params
 
@@ -55,37 +60,87 @@ YARN_SERVER_ROLE_DIRECTORY_MAP = {
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
+architecture = get_architecture()
+
 stack_name = status_params.stack_name
 stack_root = Script.get_stack_root()
 tarball_map = default("/configurations/cluster-env/tarball_map", None)
 
-config_path = os.path.join(stack_root, "current/hadoop-client/conf")
+config_path = stack_select.get_hadoop_dir("conf")
 config_dir = os.path.realpath(config_path)
 
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
 # This is expected to be of the form #.#.#.#
-stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_unformatted = config['clusterLevelParams']['stack_version']
 stack_version_formatted_major = format_stack_version(stack_version_unformatted)
 stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')
+major_stack_version = get_major_version(stack_version_formatted_major)
 
-stack_supports_ru = stack_version_formatted_major and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted_major)
-stack_supports_timeline_state_store = stack_version_formatted_major and check_stack_feature(StackFeature.TIMELINE_STATE_STORE, stack_version_formatted_major)
+stack_supports_ru = check_stack_feature(StackFeature.ROLLING_UPGRADE, version_for_stack_feature_checks)
+stack_supports_timeline_state_store = check_stack_feature(StackFeature.TIMELINE_STATE_STORE, version_for_stack_feature_checks)
 
 # New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
 # It cannot be used during the initial Cluser Install because the version is not yet known.
 version = default("/commandParams/version", None)
 
-# get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
+def get_spark_version(service_name, component_name, yarn_version):
+  """
+  Attempts to calculate the correct version placeholder value for spark or spark2 based on
+  what is installed in the cluster. If Spark is not installed, then this value will need to be
+  that of YARN so it can still find the correct spark class.
+
+  On cluster installs, we have not yet calcualted any versions and all known values could be None.
+  This doesn't affect daemons, but it does affect client-only hosts where they will never receive
+  a start command after install. Therefore, this function will attempt to use stack-select as a
+  last resort to get a value value.
+
+  ATS needs this since it relies on packages installed by Spark. Some classes, like the shuffle
+  classes, are not provided by spark, but by a dependent RPM to YARN, so they do not use this
+  value.
+  :param service_name:  the service name (SPARK, SPARK2, etc)
+  :param component_name:  the component name (SPARK_CLIENT, etc)
+  :param yarn_version:  the default version of Yarn to use if no spark is installed
+  :return:  a value for the version placeholder in spark classpath properties
+  """
+  # start off seeing if we need to populate a default value for YARN
+  if yarn_version is None:
+    yarn_version = component_version.get_component_repository_version(service_name = "YARN",
+      component_name = "YARN_CLIENT")
+
+  # now try to get the version of spark/spark2, defaulting to the version if YARN
+  spark_classpath_version = component_version.get_component_repository_version(service_name = service_name,
+    component_name = component_name, default_value = yarn_version)
+
+  # even with the default of using YARN's version, on an install this might be None since we haven't
+  # calculated the version of YARN yet - use stack_select as a last ditch effort
+  if spark_classpath_version is None:
+    try:
+      spark_classpath_version = stack_select.get_role_component_current_stack_version()
+    except:
+      Logger.exception("Unable to query for the correct spark version to use when building classpaths")
+
+  return spark_classpath_version
+
+
+# these are used to render the classpath for picking up Spark classes
+# in the event that spark is not installed, then we must default to the vesrion of YARN installed
+# since it will still load classes from its own spark version
+spark_version = get_spark_version("SPARK", "SPARK_CLIENT", version)
+spark2_version = get_spark_version("SPARK2", "SPARK2_CLIENT", version)
 
 stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
 stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
 
-hostname = config['hostname']
+hostname = config['agentLevelParams']['hostname']
 
 # hadoop default parameters
+hadoop_home = status_params.hadoop_home
 hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
 hadoop_bin = stack_select.get_hadoop_dir("sbin")
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_yarn_home = '/usr/lib/hadoop-yarn'
 hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
@@ -107,16 +162,37 @@ if stack_supports_ru:
   if command_role in YARN_SERVER_ROLE_DIRECTORY_MAP:
     yarn_role_root = YARN_SERVER_ROLE_DIRECTORY_MAP[command_role]
 
-  hadoop_mapred2_jar_location = format("{stack_root}/current/{mapred_role_root}")
-  mapred_bin = format("{stack_root}/current/{mapred_role_root}/sbin")
-
+  # defaults set to current based on role
+  hadoop_mapr_home = format("{stack_root}/current/{mapred_role_root}")
   hadoop_yarn_home = format("{stack_root}/current/{yarn_role_root}")
-  yarn_bin = format("{stack_root}/current/{yarn_role_root}/sbin")
-  yarn_container_bin = format("{stack_root}/current/{yarn_role_root}/bin")
+
+  # try to render the specific version
+  version = component_version.get_component_repository_version()
+  if version is None:
+    version = default("/commandParams/version", None)
+
+
+  if version is not None:
+    hadoop_mapr_versioned_home = format("{stack_root}/{version}/hadoop-mapreduce")
+    hadoop_yarn_versioned_home = format("{stack_root}/{version}/hadoop-yarn")
+
+    if sudo.path_isdir(hadoop_mapr_versioned_home):
+      hadoop_mapr_home = hadoop_mapr_versioned_home
+
+    if sudo.path_isdir(hadoop_yarn_versioned_home):
+      hadoop_yarn_home = hadoop_yarn_versioned_home
+
+
+  hadoop_mapred2_jar_location = hadoop_mapr_home
+  mapred_bin = format("{hadoop_mapr_home}/sbin")
+
+  yarn_bin = format("{hadoop_yarn_home}/sbin")
+  yarn_container_bin = format("{hadoop_yarn_home}/bin")
+
 
 if stack_supports_timeline_state_store:
   # Timeline Service property that was added timeline_state_store stack feature
-  ats_leveldb_state_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-state-store.path']
+  ats_leveldb_state_store_dir = default('/configurations/yarn-site/yarn.timeline-service.leveldb-state-store.path', '/hadoop/yarn/timeline')
 
 # ats 1.5 properties
 entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
@@ -140,7 +216,7 @@ ulimit_cmd = "ulimit -c unlimited;"
 mapred_user = status_params.mapred_user
 yarn_user = status_params.yarn_user
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
 
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
@@ -159,12 +235,16 @@ container_executor_mode = 06050 if is_linux_container_executor else 02050
 kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 yarn_http_policy = config['configurations']['yarn-site']['yarn.http.policy']
 yarn_https_on = (yarn_http_policy.upper() == 'HTTPS_ONLY')
-rm_hosts = config['clusterHostInfo']['rm_host']
+rm_hosts = config['clusterHostInfo']['resourcemanager_hosts']
 rm_host = rm_hosts[0]
 rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
 rm_https_port = default('/configurations/yarn-site/yarn.resourcemanager.webapp.https.address', ":8090").split(':')[-1]
+# TODO UPGRADE default, update site during upgrade
+rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
+rm_nodes_exclude_dir = os.path.dirname(rm_nodes_exclude_path)
 
-java64_home = config['hostLevelParams']['java_home']
+java64_home = config['ambariLevelParams']['java_home']
+java_exec = format("{java64_home}/bin/java")
 hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
 
 yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
@@ -179,6 +259,7 @@ mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
 mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
 mapred_env_sh_template = config['configurations']['mapred-env']['content']
 yarn_env_sh_template = config['configurations']['yarn-env']['content']
+container_executor_cfg_template = config['configurations']['container-executor']['content']
 yarn_nodemanager_recovery_dir = default('/configurations/yarn-site/yarn.nodemanager.recovery.dir', None)
 service_check_queue_name = default('/configurations/yarn-env/service_check.queue.name', 'default')
 
@@ -190,6 +271,13 @@ else:
   rm_webui_address = format("{rm_host}:{rm_port}")
   rm_webui_https_address = format("{rm_host}:{rm_https_port}")
 
+if security_enabled:
+  tc_mode = 0644
+  tc_owner = "root"
+else:
+  tc_mode = None
+  tc_owner = hdfs_user
+
 nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
 hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
 nm_address = config['configurations']['yarn-site']['yarn.nodemanager.address']  # still contains 0.0.0.0
@@ -222,15 +310,14 @@ yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduc
 user_group = config['configurations']['cluster-env']['user_group']
 
 #exclude file
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
+if 'all_decommissioned_hosts' in config['commandParams']:
+  exclude_hosts = config['commandParams']['all_decommissioned_hosts'].split(",")
+else:
+  exclude_hosts = []
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
 rm_nodes_exclude_dir = os.path.dirname(exclude_file_path)
 
-ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
-has_ats = not len(ats_host) == 0
-
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
-
+nm_hosts = default("/clusterHostInfo/nodemanager_hosts", [])
 #incude file
 include_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.include-path", None)
 include_hosts = None
@@ -239,35 +326,60 @@ if include_file_path and manage_include_files:
   rm_nodes_include_dir = os.path.dirname(include_file_path)
   include_hosts = list(set(nm_hosts) - set(exclude_hosts))
 
+ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
+has_ats = not len(ats_host) == 0
+
 # don't using len(nm_hosts) here, because check can take too much time on large clusters
 number_of_nm = 1
 
+hs_host = default("/clusterHostInfo/historyserver_hosts", [])
+has_hs = not len(hs_host) == 0
+
 # default kinit commands
 rm_kinit_cmd = ""
 yarn_timelineservice_kinit_cmd = ""
 nodemanager_kinit_cmd = ""
 
+rm_zk_address = config['configurations']['yarn-site']['yarn.resourcemanager.zk-address']
+rm_zk_znode = config['configurations']['yarn-site']['yarn.resourcemanager.zk-state-store.parent-path']
+rm_zk_store_class = config['configurations']['yarn-site']['yarn.resourcemanager.store.class']
+stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
+rm_zk_failover_znode = default('/configurations/yarn-site/yarn.resourcemanager.ha.automatic-failover.zk-base-path', '/yarn-leader-election')
+hadoop_registry_zk_root = default('/configurations/yarn-site/hadoop.registry.zk.root', '/registry')
+
 if security_enabled:
   rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']
   rm_principal_name = rm_principal_name.replace('_HOST',hostname.lower())
   rm_keytab = config['configurations']['yarn-site']['yarn.resourcemanager.keytab']
   rm_kinit_cmd = format("{kinit_path_local} -kt {rm_keytab} {rm_principal_name};")
+  yarn_jaas_file = os.path.join(config_dir, 'yarn_jaas.conf')
+  if stack_supports_zk_security:
+    zk_principal_name = default("/configurations/zookeeper-env/zookeeper_principal_name", "zookeeper/_HOST@EXAMPLE.COM")
+    zk_principal_user = zk_principal_name.split('/')[0]
+    rm_security_opts = format('-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username={zk_principal_user} -Djava.security.auth.login.config={yarn_jaas_file} -Dzookeeper.sasl.clientconfig=Client')
 
   # YARN timeline security options
   if has_ats:
-    _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
-    _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
-    _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
-    yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};")
+    yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
+    yarn_timelineservice_principal_name = yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
+    yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
+    yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {yarn_timelineservice_keytab} {yarn_timelineservice_principal_name};")
+    yarn_ats_jaas_file = os.path.join(config_dir, 'yarn_ats_jaas.conf')
 
   if 'yarn.nodemanager.principal' in config['configurations']['yarn-site']:
-    _nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None)
-    if _nodemanager_principal_name:
-      _nodemanager_principal_name = _nodemanager_principal_name.replace('_HOST', hostname.lower())
+    nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None)
+    if nodemanager_principal_name:
+      nodemanager_principal_name = nodemanager_principal_name.replace('_HOST', hostname.lower())
 
-    _nodemanager_keytab = config['configurations']['yarn-site']['yarn.nodemanager.keytab']
-    nodemanager_kinit_cmd = format("{kinit_path_local} -kt {_nodemanager_keytab} {_nodemanager_principal_name};")
+    nodemanager_keytab = config['configurations']['yarn-site']['yarn.nodemanager.keytab']
+    nodemanager_kinit_cmd = format("{kinit_path_local} -kt {nodemanager_keytab} {nodemanager_principal_name};")
+    yarn_nm_jaas_file = os.path.join(config_dir, 'yarn_nm_jaas.conf')
 
+  if has_hs:
+    mapred_jhs_principal_name = config['configurations']['mapred-site']['mapreduce.jobhistory.principal']
+    mapred_jhs_principal_name = mapred_jhs_principal_name.replace('_HOST', hostname.lower())
+    mapred_jhs_keytab = config['configurations']['mapred-site']['mapreduce.jobhistory.keytab']
+    mapred_jaas_file = os.path.join(config_dir, 'mapred_jaas.conf')
 
 yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
 yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
@@ -276,11 +388,20 @@ mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapredu
 jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize", "900")
 jhs_leveldb_state_store_dir = default('/configurations/mapred-site/mapreduce.jobhistory.recovery.store.leveldb.path', "/hadoop/mapreduce/jhs")
 
+# Tez-related properties
+tez_user = config['configurations']['tez-env']['tez_user']
+
+# Tez jars
+tez_local_api_jars = '/usr/lib/tez/tez*.jar'
+tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
+app_dir_files = {tez_local_api_jars:None}
+
+# Tez libraries
+tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
+
 #for create_hdfs_directory
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
@@ -288,7 +409,7 @@ is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
 # Path to file that contains list of HDFS resources to be skipped during processing
 hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
 
-dfs_type = default("/commandParams/dfs_type", "")
+dfs_type = default("/clusterLevelParams/dfs_type", "")
 
 
 import functools
@@ -325,20 +446,12 @@ node_label_enable = config['configurations']['yarn-site']['yarn.node-labels.enab
 
 cgroups_dir = "/cgroups_test/cpu"
 
-# ***********************  RANGER PLUGIN CHANGES ***********************
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 # hostname of the active HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
+dfs_ha_namenode_active = default("/configurations/cluster-env/dfs_ha_initial_namenode_active", None)
 if dfs_ha_namenode_active is not None:
   namenode_hostname = dfs_ha_namenode_active
 else:
-  namenode_hostname = config['clusterHostInfo']['namenode_host'][0]
-
-ranger_admin_log_dir = default("/configurations/ranger-env/ranger_admin_log_dir","/var/log/ranger/admin")
+  namenode_hostname = config['clusterHostInfo']['namenode_hosts'][0]
 
 scheme = 'http' if not yarn_https_on else 'https'
 yarn_rm_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'] if not yarn_https_on else config['configurations']['yarn-site']['yarn.resourcemanager.webapp.https.address']
@@ -361,103 +474,124 @@ if rm_ha_enabled:
     rm_webapp_address = config['configurations']['yarn-site'][rm_webapp_address_property]
     rm_webapp_addresses_list.append(rm_webapp_address)
 
-#ranger yarn properties
-if has_ranger_admin:
-  is_supported_yarn_ranger = config['configurations']['yarn-env']['is_supported_yarn_ranger']
-
-  if is_supported_yarn_ranger:
-    enable_ranger_yarn = (config['configurations']['ranger-yarn-plugin-properties']['ranger-yarn-plugin-enabled'].lower() == 'yes')
-    policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
-    if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
-      policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
-    xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
-    xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
-    xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
-    xa_audit_db_password = ''
-    if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
-      xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
-    xa_db_host = config['configurations']['admin-properties']['db_host']
-    repo_name = str(config['clusterName']) + '_yarn'
-
-    ranger_env = config['configurations']['ranger-env']
-    ranger_plugin_properties = config['configurations']['ranger-yarn-plugin-properties']
-    policy_user = config['configurations']['ranger-yarn-plugin-properties']['policy_user']
-    yarn_rest_url = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']  
-
-    ranger_plugin_config = {
-      'username' : config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
-      'password' : unicode(config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
-      'yarn.url' : format('{scheme}://{yarn_rest_url}'),
-      'commonNameForCertificate' : config['configurations']['ranger-yarn-plugin-properties']['common.name.for.certificate']
-    }
-
-    yarn_ranger_plugin_repo = {
-      'isEnabled': 'true',
-      'configs': ranger_plugin_config,
-      'description': 'yarn repo',
-      'name': repo_name,
-      'repositoryType': 'yarn',
-      'type': 'yarn',
-      'assetType': '1'
-    }
-
-    if stack_supports_ranger_kerberos:
-      ranger_plugin_config['ambari.service.check.user'] = policy_user
-      ranger_plugin_config['hadoop.security.authentication'] = 'kerberos' if security_enabled else 'simple'
-
-    if stack_supports_ranger_kerberos and security_enabled:
-      ranger_plugin_config['policy.download.auth.users'] = yarn_user
-      ranger_plugin_config['tag.download.auth.users'] = yarn_user
-
-    #For curl command in ranger plugin to get db connector
-    jdk_location = config['hostLevelParams']['jdk_location']
-    java_share_dir = '/usr/share/java'
-    previous_jdbc_jar_name = None
-    if stack_supports_ranger_audit_db:
-      if xa_audit_db_flavor and xa_audit_db_flavor == 'mysql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "com.mysql.jdbc.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'oracle':
-        jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
-        colon_count = xa_db_host.count(':')
-        if colon_count == 2 or colon_count == 0:
-          audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-        else:
-          audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-        jdbc_driver = "oracle.jdbc.OracleDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'postgres':
-        jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-        jdbc_driver = "org.postgresql.Driver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'mssql':
-        jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-        jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-      elif xa_audit_db_flavor and xa_audit_db_flavor == 'sqla':
-        jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
-        previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
-        audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-        jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
+# for curl command in ranger plugin to get db connector
+jdk_location = config['ambariLevelParams']['jdk_location']
+
+# ranger yarn plugin section start
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
+xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
+
+# ambari-server hostname
+ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']
+
+# ranger yarn plugin enabled property
+enable_ranger_yarn = default("/configurations/ranger-yarn-plugin-properties/ranger-yarn-plugin-enabled", "No")
+enable_ranger_yarn = True if enable_ranger_yarn.lower() == 'yes' else False
+
+# ranger yarn-plugin supported flag, instead of using is_supported_yarn_ranger/yarn-env, using stack feature
+is_supported_yarn_ranger = check_stack_feature(StackFeature.YARN_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks)
+
+# get ranger yarn properties if enable_ranger_yarn is True
+if enable_ranger_yarn and is_supported_yarn_ranger:
+  # get ranger policy url
+  policymgr_mgr_url = config['configurations']['ranger-yarn-security']['ranger.plugin.yarn.policy.rest.url']
+
+  if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
+    policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+
+  # ranger audit db user
+  xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin:
+    xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+
+  # ranger yarn service/repository name
+  repo_name = str(config['clusterName']) + '_yarn'
+  repo_name_value = config['configurations']['ranger-yarn-security']['ranger.plugin.yarn.service.name']
+  if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
+    repo_name = repo_name_value
+
+  # ranger-env config
+  ranger_env = config['configurations']['ranger-env']
+
+  # create ranger-env config having external ranger credential properties
+  if not has_ranger_admin and enable_ranger_yarn:
+    external_admin_username = default('/configurations/ranger-yarn-plugin-properties/external_admin_username', 'admin')
+    external_admin_password = default('/configurations/ranger-yarn-plugin-properties/external_admin_password', 'admin')
+    external_ranger_admin_username = default('/configurations/ranger-yarn-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
+    external_ranger_admin_password = default('/configurations/ranger-yarn-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
+    ranger_env = {}
+    ranger_env['admin_username'] = external_admin_username
+    ranger_env['admin_password'] = external_admin_password
+    ranger_env['ranger_admin_username'] = external_ranger_admin_username
+    ranger_env['ranger_admin_password'] = external_ranger_admin_password
+
+  ranger_plugin_properties = config['configurations']['ranger-yarn-plugin-properties']
+  policy_user = config['configurations']['ranger-yarn-plugin-properties']['policy_user']
+  yarn_rest_url = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']
+
+  ranger_plugin_config = {
+    'username' : config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
+    'password' : unicode(config['configurations']['ranger-yarn-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
+    'yarn.url' : format('{scheme}://{yarn_rest_url}'),
+    'commonNameForCertificate' : config['configurations']['ranger-yarn-plugin-properties']['common.name.for.certificate']
+  }
+
+  yarn_ranger_plugin_repo = {
+    'isEnabled': 'true',
+    'configs': ranger_plugin_config,
+    'description': 'yarn repo',
+    'name': repo_name,
+    'repositoryType': 'yarn',
+    'type': 'yarn',
+    'assetType': '1'
+  }
+
+  custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
+  if len(custom_ranger_service_config) > 0:
+    ranger_plugin_config.update(custom_ranger_service_config)
+
+  if stack_supports_ranger_kerberos:
+    ranger_plugin_config['ambari.service.check.user'] = policy_user
+    ranger_plugin_config['hadoop.security.authentication'] = 'kerberos' if security_enabled else 'simple'
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    ranger_plugin_config['policy.download.auth.users'] = yarn_user
+    ranger_plugin_config['tag.download.auth.users'] = yarn_user
+
+  downloaded_custom_connector = None
+  previous_jdbc_jar_name = None
+  driver_curl_source = None
+  driver_curl_target = None
+  previous_jdbc_jar = None
+
+  if has_ranger_admin and stack_supports_ranger_audit_db:
+    xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+    jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config)
 
     downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
     driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
     driver_curl_target = format("{hadoop_yarn_home}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
     previous_jdbc_jar = format("{hadoop_yarn_home}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
 
+  xa_audit_db_is_enabled = False
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.db']
+
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False
+  ssl_keystore_password = config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None
+  ssl_truststore_password = config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
+
+  # for SQLA explicitly disable audit to DB for Ranger
+  if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor == 'sqla':
     xa_audit_db_is_enabled = False
-    ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
-    if xml_configurations_supported and stack_supports_ranger_audit_db:
-      xa_audit_db_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.db']
-    xa_audit_hdfs_is_enabled = config['configurations']['ranger-yarn-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
-    ssl_keystore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
-    ssl_truststore_password = unicode(config['configurations']['ranger-yarn-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
-    credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
-
-    #For SQLA explicitly disable audit to DB for Ranger
-    if xa_audit_db_flavor == 'sqla':
-      xa_audit_db_is_enabled = False
+
+# need this to capture cluster name from where ranger yarn plugin is enabled
+cluster_name = config['clusterName']
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params_windows.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params_windows.py
index 680ee47..1e6d3cf 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params_windows.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/params_windows.py
@@ -19,8 +19,11 @@ Ambari Agent
 
 """
 
-from resource_management import *
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
 from resource_management.libraries import functions
+from resource_management.libraries.functions import is_empty
 import os
 from status_params import *
 
@@ -41,13 +44,13 @@ _authentication = config['configurations']['core-site']['hadoop.security.authent
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
 kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-rm_host = config['clusterHostInfo']['rm_host'][0]
+rm_host = config['clusterHostInfo']['resourcemanager_hosts'][0]
 rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
 rm_https_port = "8090"
 rm_webui_address = format("{rm_host}:{rm_port}")
 rm_webui_https_address = format("{rm_host}:{rm_https_port}")
 
-hs_host = config['clusterHostInfo']['hs_host'][0]
+hs_host = config['clusterHostInfo']['historyserver_hosts'][0]
 hs_port = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'].split(':')[-1]
 hs_webui_address = format("{hs_host}:{hs_port}")
 
@@ -56,7 +59,6 @@ hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
 
 exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
 exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path","/etc/hadoop/conf/yarn.exclude")
-update_files_only = default("/commandParams/update_files_only",False)
 
 nm_hosts = default("/clusterHostInfo/nm_hosts", [])
 #incude file
@@ -65,3 +67,4 @@ include_hosts = None
 manage_include_files = default("/configurations/yarn-site/manage.include.files", False)
 if include_file_path and manage_include_files:
   include_hosts = list(set(nm_hosts) - set(exclude_hosts))
+update_files_only = default("/commandParams/update_files_only", False)
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/resourcemanager.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/resourcemanager.py
index 12c279a..99ad69f 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/resourcemanager.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/resourcemanager.py
@@ -20,7 +20,6 @@ Ambari Agent
 """
 
 from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import StackFeature
 from resource_management.libraries.functions.stack_features import check_stack_feature
@@ -38,7 +37,8 @@ from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
 from resource_management.libraries.providers.hdfs_resource import HdfsResourceProvider
 from resource_management import is_empty
 from resource_management import shell
-
+from resource_management.core.resources.zkmigrator import ZkMigrator
+from resource_management.libraries.functions import namenode_ha_utils
 
 from yarn import yarn
 from service import service
@@ -93,10 +93,10 @@ class ResourcemanagerWindows(Resourcemanager):
 
     if params.include_hosts:
       File(params.include_file_path,
-           content=Template("include_hosts_list.j2"),
-           owner=yarn_user,
-           mode="f"
-           )
+         content=Template("include_hosts_list.j2"),
+         owner=yarn_user,
+         mode="f"
+    )
 
     if params.update_files_only == False:
       Execute(yarn_refresh_cmd, user=yarn_user)
@@ -105,23 +105,20 @@ class ResourcemanagerWindows(Resourcemanager):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ResourcemanagerDefault(Resourcemanager):
-  def get_component_name(self):
-    return "hadoop-yarn-resourcemanager"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade post-restart")
     import params
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      stack_select.select("hadoop-yarn-resourcemanager", params.version)
+      stack_select.select_packages(params.version)
 
   def start(self, env, upgrade_type=None):
     import params
 
     env.set_params(params)
     self.configure(env) # FOR SECURITY
-    if params.has_ranger_admin and params.is_supported_yarn_ranger:
+    if params.enable_ranger_yarn and params.is_supported_yarn_ranger:
       setup_ranger_yarn() #Ranger Yarn Plugin related calls
 
     # wait for active-dir and done-dir to be created by ATS if needed
@@ -138,66 +135,6 @@ class ResourcemanagerDefault(Resourcemanager):
     check_process_status(status_params.resourcemanager_pid_file)
     pass
 
-  def security_status(self, env):
-    import status_params
-    env.set_params(status_params)
-    if status_params.security_enabled:
-      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
-                           "yarn.acl.enable": "true"}
-      props_empty_check = ["yarn.resourcemanager.principal",
-                           "yarn.resourcemanager.keytab",
-                           "yarn.resourcemanager.webapp.spnego-principal",
-                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
-
-      props_read_check = ["yarn.resourcemanager.keytab",
-                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
-      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
-                                           props_read_check)
-
-      yarn_expectations ={}
-      yarn_expectations.update(yarn_site_props)
-
-      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'yarn-site.xml': FILE_TYPE_XML})
-      result_issues = validate_security_config_properties(security_params, yarn_site_props)
-      if not result_issues: # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ( 'yarn-site' not in security_params
-               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
-               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
-            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
-            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
-                                security_params['yarn-site']['yarn.resourcemanager.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.yarn_user,
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
-                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
   def refreshqueues(self, env):
     import params
 
@@ -239,8 +176,23 @@ class ResourcemanagerDefault(Resourcemanager):
       pass
     pass
 
-
-
+  def disable_security(self, env):
+    import params
+    if not params.stack_supports_zk_security:
+      Logger.info("Stack doesn't support zookeeper security")
+      return
+    if not params.rm_zk_address:
+      Logger.info("No zookeeper connection string. Skipping reverting ACL")
+      return
+    zkmigrator = ZkMigrator(
+      params.rm_zk_address, \
+      params.java_exec, \
+      params.java64_home, \
+      params.yarn_jaas_file, \
+      params.yarn_user)
+    zkmigrator.set_acls(params.rm_zk_znode, 'world:anyone:crdwa')
+    zkmigrator.set_acls(params.hadoop_registry_zk_root, 'world:anyone:crdwa')
+    zkmigrator.delete_node(params.rm_zk_failover_znode)
 
   def wait_for_dfs_directories_created(self, *dirs):
     import params
@@ -275,9 +227,12 @@ class ResourcemanagerDefault(Resourcemanager):
 
       dir_exists = None
 
-      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+      nameservices = namenode_ha_utils.get_nameservices(params.hdfs_site)
+      nameservice = None if not nameservices else nameservices[-1]
+      
+      if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.dfs_type):
         # check with webhdfs is much faster than executing hdfs dfs -test
-        util = WebHDFSUtil(params.hdfs_site, params.hdfs_user, params.security_enabled)
+        util = WebHDFSUtil(params.hdfs_site, nameservice, params.hdfs_user, params.security_enabled)
         list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
         dir_exists = ('FileStatus' in list_status)
       else:
@@ -297,6 +252,10 @@ class ResourcemanagerDefault(Resourcemanager):
   def get_user(self):
     import params
     return params.yarn_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.resourcemanager_pid_file]
   
 if __name__ == "__main__":
   Resourcemanager().execute()
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/service.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/service.py
index b1179b9..d684080 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/service.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/service.py
@@ -19,11 +19,14 @@ Ambari Agent
 
 """
 
-from resource_management import *
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
-from resource_management.core.shell import as_user
+from resource_management.core.shell import as_user, as_sudo
 from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.signal_utils import TerminateStrategy
+from resource_management.core.logger import Logger
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def service(componentName, action='start', serviceName='yarn'):
@@ -41,6 +44,9 @@ def service(componentName, action='start', serviceName='yarn'):
   import params
 
   if serviceName == 'mapreduce' and componentName == 'historyserver':
+    if not params.hdfs_tmp_dir or params.hdfs_tmp_dir == None or params.hdfs_tmp_dir.lower() == 'null':
+      Logger.error("WARNING: HDFS tmp dir property (hdfs_tmp_dir) is empty or invalid. Ambari will change permissions for the folder on regular basis.")
+
     delete_pid_file = True
     daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
     pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
@@ -58,8 +64,9 @@ def service(componentName, action='start', serviceName='yarn'):
   cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
 
   if action == 'start':
+
     daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
-    check_process = as_user(format("ls {pid_file} && ps -p `cat {pid_file}`"), user=usr)
+    check_process = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file])
 
     # Remove the pid file if its corresponding process is not running.
     File(pid_file, action = "delete", not_if = check_process)
@@ -102,4 +109,11 @@ def service(componentName, action='start', serviceName='yarn'):
   elif action == 'refreshQueues':
     rm_kinit_cmd = params.rm_kinit_cmd
     refresh_cmd = format("{rm_kinit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")
-    Execute(refresh_cmd, user=usr)
+
+    Execute(refresh_cmd,
+            user = usr,
+            timeout = 20, # when Yarn is not started command hangs forever and should be killed
+            tries = 5,
+            try_sleep = 5,
+            timeout_kill_strategy = TerminateStrategy.KILL_PROCESS_GROUP, # the process cannot be simply killed by 'kill -15', so kill pg group instread.
+    )
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/service_check.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/service_check.py
index daa8e7e..24baa0e 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/service_check.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/service_check.py
@@ -19,19 +19,23 @@ Ambari Agent
 
 """
 
-from resource_management import *
 import sys
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 import re
-import subprocess
+from ambari_commons import subprocess32
 from ambari_commons import os_utils
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
-from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.stack_features import check_stack_feature
 from resource_management.libraries.functions.get_user_call_output import get_user_call_output
 from resource_management.core.exceptions import Fail
 from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import StaticFile
+from resource_management.core import shell
 
 CURL_CONNECTION_TIMEOUT = '5'
 
@@ -99,7 +103,6 @@ class ServiceCheckDefault(ServiceCheck):
       path_to_distributed_shell_jar = format("{stack_root}/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar")
     else:
       path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
-
     yarn_distrubuted_shell_check_params = ["yarn org.apache.hadoop.yarn.applications.distributedshell.Client",
                                            "-shell_command", "ls", "-num_containers", "{number_of_nm}",
                                            "-jar", "{path_to_distributed_shell_jar}", "-timeout", "300000",
@@ -112,48 +115,70 @@ class ServiceCheckDefault(ServiceCheck):
     else:
       smoke_cmd = yarn_distrubuted_shell_check_cmd
 
-    return_code, out = shell.checked_call(smoke_cmd,
-                                          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                          user=params.smokeuser,
-                                          )
+    #return_code, out = shell.checked_call(smoke_cmd,
+    #                                      path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    #                                      user=params.smokeuser,
+    #                                      )
 
-    m = re.search("appTrackingUrl=(.*),\s", out)
-    app_url = m.group(1)
+    #m = re.search("appTrackingUrl=(.*),\s", out)
+    #app_url = m.group(1)
 
-    splitted_app_url = str(app_url).split('/')
+    #splitted_app_url = str(app_url).split('/')
 
-    for item in splitted_app_url:
-      if "application" in item:
-        application_name = item
+    #for item in splitted_app_url:
+    #  if "application" in item:
+    #    application_name = item
 
-    for rm_webapp_address in params.rm_webapp_addresses_list:
-      info_app_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
+    # Find out the active RM from RM list
+    # Raise an exception if the active rm cannot be determined
+    #active_rm_webapp_address = self.get_active_rm_webapp_address()
+    #Logger.info("Active Resource Manager web app address is : " + active_rm_webapp_address);
 
-      get_app_info_cmd = "curl --negotiate -u : -ksL --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
+    # Verify job state from active resource manager via rest api
+    #info_app_url = params.scheme + "://" + active_rm_webapp_address + "/ws/v1/cluster/apps/" + application_name
+    #get_app_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
 
-      return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
-                                            user=params.smokeuser,
-                                            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-                                            )
-      
-      # Handle HDP<2.2.8.1 where RM doesn't do automatic redirection from standby to active
-      if stdout.startswith("This is standby RM. Redirecting to the current active RM:"):
-        Logger.info(format("Skipped checking of {rm_webapp_address} since returned '{stdout}'"))
-        continue
+    #return_code, stdout, _ = get_user_call_output(get_app_info_cmd,
+    #                                              user=params.smokeuser,
+    #                                              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    #                                              )
 
-      try:
-        json_response = json.loads(stdout)
-      except Exception as e:
-        raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
-      
-      if json_response is None or 'app' not in json_response or \
-              'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
-        raise Fail("Application " + app_url + " returns invalid data.")
+    #try:
+    #  json_response = json.loads(stdout)
+    #except Exception as e:
+    #  raise Fail(format("Response from YARN API was not a valid JSON. Response: {stdout}"))
 
-      if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
-        raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
+    #if json_response is None or 'app' not in json_response or \
+    #        'state' not in json_response['app'] or 'finalStatus' not in json_response['app']:
+    #  raise Fail("Application " + app_url + " returns invalid data.")
 
+    #if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
+    #  raise Fail("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
 
+  def get_active_rm_webapp_address(self):
+    import params
+    active_rm_webapp_address = None
+    rm_webapp_addresses = params.rm_webapp_addresses_list
+    if rm_webapp_addresses is not None and len(rm_webapp_addresses) > 0:
+      for rm_webapp_address in rm_webapp_addresses:
+        rm_state_url = params.scheme + "://" + rm_webapp_address + "/ws/v1/cluster/info"
+        get_cluster_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + rm_state_url
+        try:
+          return_code, stdout, _ = get_user_call_output(get_cluster_info_cmd,
+                                                        user=params.smokeuser,
+                                                        path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                                        )
+          json_response = json.loads(stdout)
+          if json_response is not None and 'clusterInfo' in json_response \
+            and json_response['clusterInfo']['haState'] == "ACTIVE":
+              active_rm_webapp_address = rm_webapp_address
+              break
+        except Exception as e:
+          Logger.warning(format("Cluster info is not available from calling {get_cluster_info_cmd}"))
+
+    if active_rm_webapp_address is None:
+      raise Fail('Resource Manager state is not available. Failed to determine the active Resource Manager web application address from {0}'.format(','.join(rm_webapp_addresses)));
+    return active_rm_webapp_address
 
 if __name__ == "__main__":
   ServiceCheck().execute()
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/setup_ranger_yarn.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
index 6ea7f82..8fc6482 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/setup_ranger_yarn.py
@@ -19,7 +19,7 @@ from resource_management.core.logger import Logger
 def setup_ranger_yarn():
   import params
 
-  if params.has_ranger_admin:
+  if params.enable_ranger_yarn:
 
     from resource_management.libraries.functions.setup_ranger_plugin_xml import setup_ranger_plugin
 
@@ -55,9 +55,9 @@ def setup_ranger_yarn():
                         params.policy_user, params.policymgr_mgr_url,
                         params.enable_ranger_yarn, conf_dict=params.hadoop_conf_dir,
                         component_user=params.yarn_user, component_group=params.user_group, cache_service_list=['yarn'],
-                        plugin_audit_properties=params.config['configurations']['ranger-yarn-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-yarn-audit'],
-                        plugin_security_properties=params.config['configurations']['ranger-yarn-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-yarn-security'],
-                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-yarn-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-yarn-policymgr-ssl'],
+                        plugin_audit_properties=params.config['configurations']['ranger-yarn-audit'], plugin_audit_attributes=params.config['configurationAttributes']['ranger-yarn-audit'],
+                        plugin_security_properties=params.config['configurations']['ranger-yarn-security'], plugin_security_attributes=params.config['configurationAttributes']['ranger-yarn-security'],
+                        plugin_policymgr_ssl_properties=params.config['configurations']['ranger-yarn-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configurationAttributes']['ranger-yarn-policymgr-ssl'],
                         component_list=['hadoop-yarn-resourcemanager'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
@@ -68,4 +68,4 @@ def setup_ranger_yarn():
                         component_user_keytab=params.rm_keytab if params.security_enabled else None
       )
   else:
-    Logger.info('Ranger admin not installed')
+    Logger.info('Ranger Yarn plugin is not enabled')
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/status_params.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/status_params.py
index c2e9d92..b69c531 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/status_params.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/status_params.py
@@ -19,43 +19,32 @@ limitations under the License.
 """
 from resource_management.libraries.script.script import Script
 from resource_management.libraries import functions
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.default import default
-from ambari_commons import OSCheck
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
-if OSCheck.is_windows_family():
-  resourcemanager_win_service_name = 'resourcemanager'
-  nodemanager_win_service_name = 'nodemanager'
-  historyserver_win_service_name = 'historyserver'
-  timelineserver_win_service_name = 'timelineserver'
-
-  service_map = {
-    'resourcemanager' : resourcemanager_win_service_name,
-    'nodemanager' : nodemanager_win_service_name,
-    'historyserver' : historyserver_win_service_name,
-    'timelineserver' : timelineserver_win_service_name
-  }
-else:
-  mapred_user = config['configurations']['mapred-env']['mapred_user']
-  yarn_user = config['configurations']['yarn-env']['yarn_user']
-  yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
-  mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
-  yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
-  mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
-
-  resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
-  nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
-  yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
-  yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
-  mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
-
-  hadoop_conf_dir = functions.conf_select.get_hadoop_conf_dir()
-
-  hostname = config['hostname']
-  kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-  security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
+mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
+yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
+mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
+
+resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
+nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
+yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
+yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
+mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
+
+hadoop_home = stack_select.get_hadoop_dir("home")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+
+hostname = config['agentLevelParams']['hostname']
+kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+stack_name = default("/clusterLevelParams/stack_name", None)
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/yarn.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/yarn.py
index 147fb38..6145525 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/yarn.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/yarn.py
@@ -27,74 +27,17 @@ from resource_management.libraries.script.script import Script
 from resource_management.core.resources.service import ServiceConfig
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.lzo_utils import install_lzo_if_needed
 from resource_management.core.resources.system import Directory
 from resource_management.core.resources.system import File
 from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.core.source import InlineTemplate
-from resource_management.core.source import Template
+from resource_management.core.source import InlineTemplate, Template
 from resource_management.core.logger import Logger
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
 
 from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs
 
-# Local Imports
-
-
-@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
-def yarn(name = None):
-  import params
-  XmlConfig("mapred-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            owner=params.yarn_user,
-            mode='f'
-  )
-  XmlConfig("yarn-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['yarn-site'],
-            owner=params.yarn_user,
-            mode='f',
-            configuration_attributes=params.config['configuration_attributes']['yarn-site']
-  )
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['capacity-scheduler'],
-            owner=params.yarn_user,
-            mode='f'
-  )
-
-  if params.service_map.has_key(name):
-    service_name = params.service_map[name]
-
-    ServiceConfig(service_name,
-                  action="change_user",
-                  username = params.yarn_user,
-                  password = Script.get_password(params.yarn_user))
-
-def create_log_dir(dir_name):
-  import params
-  Directory(dir_name,
-            create_parents = True,
-            cd_access="a",
-            mode=0775,
-            owner=params.yarn_user,
-            group=params.user_group,
-            ignore_failures=True,
-  )
-  
-def create_local_dir(dir_name):
-  import params
-  Directory(dir_name,
-            create_parents = True,
-            cd_access="a",
-            mode=0755,
-            owner=params.yarn_user,
-            group=params.user_group,
-            ignore_failures=True,
-            recursive_mode_flags = {'f': 'a+rw', 'd': 'a+rwx'},
-  )
-
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
 def yarn(name=None, config_dir=None):
   """
@@ -103,141 +46,54 @@ def yarn(name=None, config_dir=None):
   """
   import params
 
+  install_lzo_if_needed()
+
   if config_dir is None:
     config_dir = params.hadoop_conf_dir
 
-  if name == "historyserver":
-    if params.yarn_log_aggregation_enabled:
-      params.HdfsResource(params.yarn_nm_app_log_dir,
-                           action="create_on_execute",
-                           type="directory",
-                           owner=params.yarn_user,
-                           group=params.user_group,
-                           mode=0777,
-                           recursive_chmod=True
-      )
-
-    # create the /tmp folder with proper permissions if it doesn't exist yet
-    if params.entity_file_history_directory.startswith('/tmp'):
-        params.HdfsResource(params.hdfs_tmp_dir,
-                            action="create_on_execute",
-                            type="directory",
-                            owner=params.hdfs_user,
-                            mode=0777,
-        )
-
-    params.HdfsResource(params.entity_file_history_directory,
-                           action="create_on_execute",
-                           type="directory",
-                           owner=params.yarn_user,
-                           group=params.user_group
-    )
-    params.HdfsResource("/mapred",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.mapred_user
-    )
-    params.HdfsResource("/mapred/system",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user
-    )
-    params.HdfsResource(params.mapreduce_jobhistory_done_dir,
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.mapred_user,
-                         group=params.user_group,
-                         change_permissions_for_parents=True,
-                         mode=0777
-    )
-    params.HdfsResource(None, action="execute")
-    Directory(params.jhs_leveldb_state_store_dir,
-              owner=params.mapred_user,
-              group=params.user_group,
-              create_parents = True,
-              cd_access="a",
-              recursive_ownership = True,
-              )
-
-  #<editor-fold desc="Node Manager Section">
-  if name == "nodemanager":
-
-    # First start after enabling/disabling security
-    if params.toggle_nm_security:
-      Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
-                action='delete'
-      )
-
-      # If yarn.nodemanager.recovery.dir exists, remove this dir
-      if params.yarn_nodemanager_recovery_dir:
-        Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
-                  action='delete'
-        )
-
-      # Setting NM marker file
-      if params.security_enabled:
-        Directory(params.nm_security_marker_dir)
-        File(params.nm_security_marker,
-             content="Marker file to track first start after enabling/disabling security. "
-                     "During first start yarn local, log dirs are removed and recreated"
-             )
-      elif not params.security_enabled:
-        File(params.nm_security_marker, action="delete")
-
-
-    if not params.security_enabled or params.toggle_nm_security:
-      # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
-      nm_log_dir_to_mount_file_content = handle_mounted_dirs(create_log_dir, params.nm_log_dirs, params.nm_log_dir_to_mount_file, params)
-      # create a history file used by handle_mounted_dirs
-      File(params.nm_log_dir_to_mount_file,
-           owner=params.hdfs_user,
-           group=params.user_group,
-           mode=0644,
-           content=nm_log_dir_to_mount_file_content
-      )
-      nm_local_dir_to_mount_file_content = handle_mounted_dirs(create_local_dir, params.nm_local_dirs, params.nm_local_dir_to_mount_file, params)
-      File(params.nm_local_dir_to_mount_file,
-           owner=params.hdfs_user,
-           group=params.user_group,
-           mode=0644,
-           content=nm_local_dir_to_mount_file_content
-      )
-  #</editor-fold>
-
   if params.yarn_nodemanager_recovery_dir:
     Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
               owner=params.yarn_user,
               group=params.user_group,
-              create_parents = True,
+              create_parents=True,
               mode=0755,
-              cd_access = 'a',
+              cd_access='a',
     )
 
   Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
             owner=params.yarn_user,
             group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
+            create_parents=True,
+            cd_access='a',
   )
-
   Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
             owner=params.mapred_user,
             group=params.user_group,
-            create_parents = True,
-            cd_access = 'a',
+            create_parents=True,
+            cd_access='a',
   )
   Directory([params.yarn_log_dir_prefix],
             owner=params.yarn_user,
             group=params.user_group,
-            create_parents = True,
+            create_parents=True,
             ignore_failures=True,
-            cd_access = 'a',
+            cd_access='a',
   )
 
+  # Some of these function calls depend on the directories above being created first.
+  if name == 'resourcemanager':
+    setup_resourcemanager()
+  elif name == 'nodemanager':
+    setup_nodemanager()
+  elif name == 'apptimelineserver':
+    setup_ats()
+  elif name == 'historyserver':
+    setup_historyserver()
+  
   XmlConfig("core-site.xml",
             conf_dir=config_dir,
             configurations=params.config['configurations']['core-site'],
-            configuration_attributes=params.config['configuration_attributes']['core-site'],
+            configuration_attributes=params.config['configurationAttributes']['core-site'],
             owner=params.hdfs_user,
             group=params.user_group,
             mode=0644
@@ -246,20 +102,19 @@ def yarn(name=None, config_dir=None):
   # During RU, Core Masters and Slaves need hdfs-site.xml
   # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
   # RU should rely on all available in <stack-root>/<version>/hadoop/conf
-  if 'hdfs-site' in params.config['configurations']:
-    XmlConfig("hdfs-site.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['hdfs-site'],
-              configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-              owner=params.hdfs_user,
-              group=params.user_group,
-              mode=0644
-    )
+  XmlConfig("hdfs-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configurationAttributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
 
   XmlConfig("mapred-site.xml",
             conf_dir=config_dir,
             configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            configuration_attributes=params.config['configurationAttributes']['mapred-site'],
             owner=params.yarn_user,
             group=params.user_group,
             mode=0644
@@ -268,7 +123,7 @@ def yarn(name=None, config_dir=None):
   XmlConfig("yarn-site.xml",
             conf_dir=config_dir,
             configurations=params.config['configurations']['yarn-site'],
-            configuration_attributes=params.config['configuration_attributes']['yarn-site'],
+            configuration_attributes=params.config['configurationAttributes']['yarn-site'],
             owner=params.yarn_user,
             group=params.user_group,
             mode=0644
@@ -277,103 +132,12 @@ def yarn(name=None, config_dir=None):
   XmlConfig("capacity-scheduler.xml",
             conf_dir=config_dir,
             configurations=params.config['configurations']['capacity-scheduler'],
-            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
+            configuration_attributes=params.config['configurationAttributes']['capacity-scheduler'],
             owner=params.yarn_user,
             group=params.user_group,
             mode=0644
   )
 
-  if name == 'resourcemanager':
-    Directory(params.rm_nodes_exclude_dir,
-         mode=0755,
-         create_parents=True,
-         cd_access='a',
-    )
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.yarn_user,
-         group=params.user_group
-    )
-    if params.include_hosts:
-      Directory(params.rm_nodes_include_dir,
-        mode=0755,
-        create_parents=True,
-        cd_access='a',
-      )
-      File(params.include_file_path,
-        content=Template("include_hosts_list.j2"),
-        owner=params.yarn_user,
-        group=params.user_group
-      )
-    File(params.yarn_job_summary_log,
-       owner=params.yarn_user,
-       group=params.user_group
-    )
-    if not is_empty(params.node_label_enable) and params.node_label_enable or is_empty(params.node_label_enable) and params.node_labels_dir:
-      params.HdfsResource(params.node_labels_dir,
-                           type="directory",
-                           action="create_on_execute",
-                           change_permissions_for_parents=True,
-                           owner=params.yarn_user,
-                           group=params.user_group,
-                           mode=0700
-      )
-      params.HdfsResource(None, action="execute")
-
-
-  elif name == 'apptimelineserver':
-    Directory(params.ats_leveldb_dir,
-       owner=params.yarn_user,
-       group=params.user_group,
-       create_parents = True,
-       cd_access="a",
-    )
-
-    # if stack support application timeline-service state store property (timeline_state_store stack feature)
-    if params.stack_supports_timeline_state_store:
-      Directory(params.ats_leveldb_state_store_dir,
-       owner=params.yarn_user,
-       group=params.user_group,
-       create_parents = True,
-       cd_access="a",
-      )
-    # app timeline server 1.5 directories
-    if not is_empty(params.entity_groupfs_store_dir):
-      parent_path = os.path.dirname(params.entity_groupfs_store_dir)
-      params.HdfsResource(parent_path,
-                          type="directory",
-                          action="create_on_execute",
-                          change_permissions_for_parents=True,
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=0755
-                          )
-      params.HdfsResource(params.entity_groupfs_store_dir,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=params.entity_groupfs_store_dir_mode
-                          )
-    if not is_empty(params.entity_groupfs_active_dir):
-      parent_path = os.path.dirname(params.entity_groupfs_active_dir)
-      params.HdfsResource(parent_path,
-                          type="directory",
-                          action="create_on_execute",
-                          change_permissions_for_parents=True,
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=0755
-                          )
-      params.HdfsResource(params.entity_groupfs_active_dir,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.yarn_user,
-                          group=params.user_group,
-                          mode=params.entity_groupfs_active_dir_mode
-                          )
-    params.HdfsResource(None, action="execute")
-
   File(format("{limits_conf_dir}/yarn.conf"),
        mode=0644,
        content=Template('yarn.conf.j2')
@@ -391,8 +155,7 @@ def yarn(name=None, config_dir=None):
        content=InlineTemplate(params.yarn_env_sh_template)
   )
 
-  container_executor = format("{yarn_container_bin}/container-executor")
-  File(container_executor,
+  File(format("{yarn_container_bin}/container-executor"),
       group=params.yarn_executor_container_group,
       mode=params.container_executor_mode
   )
@@ -400,7 +163,7 @@ def yarn(name=None, config_dir=None):
   File(os.path.join(config_dir, "container-executor.cfg"),
       group=params.user_group,
       mode=0644,
-      content=Template('container-executor.cfg.j2')
+      content=InlineTemplate(params.container_executor_cfg_template)
   )
 
   Directory(params.cgroups_dir,
@@ -409,15 +172,8 @@ def yarn(name=None, config_dir=None):
             mode=0755,
             cd_access="a")
 
-  if params.security_enabled:
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
   File(os.path.join(config_dir, "mapred-env.sh"),
-       owner=tc_owner,
+       owner=params.tc_owner,
        mode=0755,
        content=InlineTemplate(params.mapred_env_sh_template)
   )
@@ -429,40 +185,61 @@ def yarn(name=None, config_dir=None):
          mode=06050
     )
     File(os.path.join(config_dir, 'taskcontroller.cfg'),
-         owner = tc_owner,
-         mode = tc_mode,
+         owner = params.tc_owner,
+         mode = params.tc_mode,
          group = params.mapred_tt_group,
          content=Template("taskcontroller.cfg.j2")
     )
+    File(os.path.join(config_dir, 'yarn_jaas.conf'),
+         owner=params.yarn_user,
+         group=params.user_group,
+         content=Template("yarn_jaas.conf.j2")
+    )
+    if params.has_ats:
+      File(os.path.join(config_dir, 'yarn_ats_jaas.conf'),
+           owner=params.yarn_user,
+           group=params.user_group,
+           content=Template("yarn_ats_jaas.conf.j2")
+      )
+    File(os.path.join(config_dir, 'yarn_nm_jaas.conf'),
+         owner=params.yarn_user,
+         group=params.user_group,
+         content=Template("yarn_nm_jaas.conf.j2")
+    )
+    if params.has_hs:
+      File(os.path.join(config_dir, 'mapred_jaas.conf'),
+           owner=params.mapred_user,
+           group=params.user_group,
+           content=Template("mapred_jaas.conf.j2")
+      )
   else:
     File(os.path.join(config_dir, 'taskcontroller.cfg'),
-         owner=tc_owner,
+         owner=params.tc_owner,
          content=Template("taskcontroller.cfg.j2")
     )
 
-  if "mapred-site" in params.config['configurations']:
-    XmlConfig("mapred-site.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations']['mapred-site'],
-              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
+  XmlConfig("mapred-site.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configurationAttributes']['mapred-site'],
+            owner=params.mapred_user,
+            group=params.user_group
+  )
+
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=config_dir,
+            configurations=params.config['configurations'][
+              'capacity-scheduler'],
+            configuration_attributes=params.config['configurationAttributes']['capacity-scheduler'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
 
-  if "capacity-scheduler" in params.config['configurations']:
-    XmlConfig("capacity-scheduler.xml",
-              conf_dir=config_dir,
-              configurations=params.config['configurations'][
-                'capacity-scheduler'],
-              configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
   if "ssl-client" in params.config['configurations']:
     XmlConfig("ssl-client.xml",
               conf_dir=config_dir,
               configurations=params.config['configurations']['ssl-client'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
+              configuration_attributes=params.config['configurationAttributes']['ssl-client'],
               owner=params.hdfs_user,
               group=params.user_group
     )
@@ -477,7 +254,7 @@ def yarn(name=None, config_dir=None):
     XmlConfig("ssl-client.xml",
               conf_dir=params.hadoop_conf_secure_dir,
               configurations=params.config['configurations']['ssl-client'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
+              configuration_attributes=params.config['configurationAttributes']['ssl-client'],
               owner=params.hdfs_user,
               group=params.user_group
     )
@@ -486,7 +263,7 @@ def yarn(name=None, config_dir=None):
     XmlConfig("ssl-server.xml",
               conf_dir=config_dir,
               configurations=params.config['configurations']['ssl-server'],
-              configuration_attributes=params.config['configuration_attributes']['ssl-server'],
+              configuration_attributes=params.config['configurationAttributes']['ssl-server'],
               owner=params.hdfs_user,
               group=params.user_group
     )
@@ -509,3 +286,255 @@ def yarn(name=None, config_dir=None):
          owner=params.mapred_user,
          group=params.user_group
     )
+
+def setup_historyserver():
+  import params
+
+  if params.yarn_log_aggregation_enabled:
+    params.HdfsResource(params.yarn_nm_app_log_dir,
+                         action="create_on_execute",
+                         type="directory",
+                         owner=params.yarn_user,
+                         group=params.user_group,
+                         mode=01777,
+                         recursive_chmod=True
+    )
+
+  # create the /tmp folder with proper permissions if it doesn't exist yet
+  if params.entity_file_history_directory.startswith('/tmp'):
+      params.HdfsResource(params.hdfs_tmp_dir,
+                          action="create_on_execute",
+                          type="directory",
+                          owner=params.hdfs_user,
+                          mode=0777,
+      )
+
+  params.HdfsResource(params.entity_file_history_directory,
+                         action="create_on_execute",
+                         type="directory",
+                         owner=params.yarn_user,
+                         group=params.user_group
+  )
+  params.HdfsResource("/mapred",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.mapred_user
+  )
+  params.HdfsResource("/mapred/system",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user
+  )
+  params.HdfsResource(params.mapreduce_jobhistory_done_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.mapred_user,
+                       group=params.user_group,
+                       change_permissions_for_parents=True,
+                       mode=0777
+  )
+  params.HdfsResource(None, action="execute")
+  Directory(params.jhs_leveldb_state_store_dir,
+            owner=params.mapred_user,
+            group=params.user_group,
+            create_parents = True,
+            cd_access="a",
+            recursive_ownership = True,
+            )
+
+def setup_nodemanager():
+  import params
+
+  # First start after enabling/disabling security
+  if params.toggle_nm_security:
+    Directory(params.nm_local_dirs_list + params.nm_log_dirs_list,
+              action='delete'
+    )
+
+    # If yarn.nodemanager.recovery.dir exists, remove this dir
+    if params.yarn_nodemanager_recovery_dir:
+      Directory(InlineTemplate(params.yarn_nodemanager_recovery_dir).get_content(),
+                action='delete'
+      )
+
+    # Setting NM marker file
+    if params.security_enabled:
+      Directory(params.nm_security_marker_dir)
+      File(params.nm_security_marker,
+           content="Marker file to track first start after enabling/disabling security. "
+                   "During first start yarn local, log dirs are removed and recreated"
+           )
+    elif not params.security_enabled:
+      File(params.nm_security_marker, action="delete")
+
+  if not params.security_enabled or params.toggle_nm_security:
+    # handle_mounted_dirs ensures that we don't create dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
+    nm_log_dir_to_mount_file_content = handle_mounted_dirs(create_log_dir, params.nm_log_dirs, params.nm_log_dir_to_mount_file, params)
+    # create a history file used by handle_mounted_dirs
+    File(params.nm_log_dir_to_mount_file,
+         owner=params.hdfs_user,
+         group=params.user_group,
+         mode=0644,
+         content=nm_log_dir_to_mount_file_content
+    )
+    nm_local_dir_to_mount_file_content = handle_mounted_dirs(create_local_dir, params.nm_local_dirs, params.nm_local_dir_to_mount_file, params)
+    File(params.nm_local_dir_to_mount_file,
+         owner=params.hdfs_user,
+         group=params.user_group,
+         mode=0644,
+         content=nm_local_dir_to_mount_file_content
+    )
+
+def setup_resourcemanager():
+  import params
+
+  Directory(params.rm_nodes_exclude_dir,
+       mode=0755,
+       create_parents=True,
+       cd_access='a',
+  )
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=params.yarn_user,
+       group=params.user_group
+  )
+  if params.include_hosts:
+    Directory(params.rm_nodes_include_dir,
+      mode=0755,
+      create_parents=True,
+      cd_access='a',
+      )
+    File(params.include_file_path,
+      content=Template("include_hosts_list.j2"),
+      owner=params.yarn_user,
+      group=params.user_group
+    )
+  File(params.yarn_job_summary_log,
+     owner=params.yarn_user,
+     group=params.user_group
+  )
+  if not is_empty(params.node_label_enable) and params.node_label_enable or is_empty(params.node_label_enable) and params.node_labels_dir:
+    params.HdfsResource(params.node_labels_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.yarn_user,
+                         group=params.user_group,
+                         mode=0700
+    )
+    params.HdfsResource(None, action="execute")
+
+def setup_ats():
+  import params
+
+  Directory(params.ats_leveldb_dir,
+     owner=params.yarn_user,
+     group=params.user_group,
+     create_parents = True,
+     cd_access="a",
+  )
+
+  # if stack support application timeline-service state store property (timeline_state_store stack feature)
+  if params.stack_supports_timeline_state_store:
+    Directory(params.ats_leveldb_state_store_dir,
+     owner=params.yarn_user,
+     group=params.user_group,
+     create_parents = True,
+     cd_access="a",
+    )
+  # app timeline server 1.5 directories
+  if not is_empty(params.entity_groupfs_store_dir):
+    parent_path = os.path.dirname(os.path.abspath(params.entity_groupfs_store_dir))
+    params.HdfsResource(parent_path,
+                        type="directory",
+                        action="create_on_execute",
+                        change_permissions_for_parents=True,
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=0755
+                        )
+    params.HdfsResource(params.entity_groupfs_store_dir,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=params.entity_groupfs_store_dir_mode
+                        )
+  if not is_empty(params.entity_groupfs_active_dir):
+    parent_path = os.path.dirname(os.path.abspath(params.entity_groupfs_active_dir))
+    params.HdfsResource(parent_path,
+                        type="directory",
+                        action="create_on_execute",
+                        change_permissions_for_parents=True,
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=0755
+                        )
+    params.HdfsResource(params.entity_groupfs_active_dir,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.yarn_user,
+                        group=params.user_group,
+                        mode=params.entity_groupfs_active_dir_mode
+                        )
+  params.HdfsResource(None, action="execute")
+
+def create_log_dir(dir_name):
+  import params
+  Directory(dir_name,
+            create_parents = True,
+            cd_access="a",
+            mode=0775,
+            owner=params.yarn_user,
+            group=params.user_group,
+            ignore_failures=True,
+  )
+
+
+def create_local_dir(dir_name):
+  import params
+
+  directory_args = {}
+
+  if params.toggle_nm_security:
+    directory_args["recursive_mode_flags"] = {'f': 'a+rw', 'd': 'a+rwx'}
+
+  Directory(dir_name,
+            create_parents=True,
+            cd_access="a",
+            mode=0755,
+            owner=params.yarn_user,
+            group=params.user_group,
+            ignore_failures=True,
+            **directory_args
+            )
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def yarn(name = None):
+  import params
+  XmlConfig("mapred-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            owner=params.yarn_user,
+            mode='f'
+  )
+  XmlConfig("yarn-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            owner=params.yarn_user,
+            mode='f',
+            configuration_attributes=params.config['configurationAttributes']['yarn-site']
+  )
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            owner=params.yarn_user,
+            mode='f'
+  )
+
+  if params.service_map.has_key(name):
+    service_name = params.service_map[name]
+
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.yarn_user,
+                  password = Script.get_password(params.yarn_user))
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/yarn_client.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/yarn_client.py
index 5cd2e69..b6a89b4 100644
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/yarn_client.py
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/scripts/yarn_client.py
@@ -20,11 +20,11 @@ Ambari Agent
 """
 
 import sys
-from resource_management import *
-from resource_management.libraries.functions import conf_select
+from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.constants import StackFeature
 from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.exceptions import ClientComponentHasNoStatus
 from yarn import yarn
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
@@ -51,15 +51,12 @@ class YarnClientWindows(YarnClient):
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class YarnClientDefault(YarnClient):
-  def get_component_name(self):
-    return "hadoop-client"
-
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      stack_select.select("hadoop-client", params.version)
+      stack_select.select_packages(params.version)
 
 
 if __name__ == "__main__":
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/input.config-mapreduce2.json.j2 b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/input.config-mapreduce2.json.j2
new file mode 100644
index 0000000..1bab72b
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/input.config-mapreduce2.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+ {
+  "input":[
+    {
+      "type":"mapred_historyserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/mapred-env/mapred_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/mapred-env/mapred_user', 'mapred')}}/mapred-{{default('configurations/mapred-env/mapred_user', 'mapred')}}-historyserver*.log"
+    }
+   ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "mapred_historyserver"
+          ]
+         }
+       },
+      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
+      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
+      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+         }
+       }
+     }
+   ]
+ }
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/mapred_jaas.conf.j2 b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/mapred_jaas.conf.j2
new file mode 100644
index 0000000..67f4bcb
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/mapred_jaas.conf.j2
@@ -0,0 +1,28 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+com.sun.security.jgss.krb5.initiate {
+  com.sun.security.auth.module.Krb5LoginModule required
+  renewTGT=false
+  doNotPrompt=true
+  useKeyTab=true
+  keyTab="{{mapred_jhs_keytab}}"
+  principal="{{mapred_jhs_principal_name}}"
+  storeKey=true
+  useTicketCache=false;
+};
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/yarn_ats_jaas.conf.j2 b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/yarn_ats_jaas.conf.j2
new file mode 100644
index 0000000..55308e8
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/yarn_ats_jaas.conf.j2
@@ -0,0 +1,27 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+com.sun.security.jgss.krb5.initiate {
+    com.sun.security.auth.module.Krb5LoginModule required
+    renewTGT=false
+    doNotPrompt=true
+    useKeyTab=true
+    keyTab="{{yarn_timelineservice_keytab}}"
+    principal="{{yarn_timelineservice_principal_name}}"
+    storeKey=true
+    useTicketCache=false;
+};
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/yarn_jaas.conf.j2 b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/yarn_jaas.conf.j2
new file mode 100644
index 0000000..99f0a1b
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/yarn_jaas.conf.j2
@@ -0,0 +1,36 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+  com.sun.security.auth.module.Krb5LoginModule required
+  useKeyTab=true
+  storeKey=true
+  useTicketCache=false
+  keyTab="{{rm_keytab}}"
+  principal="{{rm_principal_name}}";
+};
+com.sun.security.jgss.krb5.initiate {
+  com.sun.security.auth.module.Krb5LoginModule required
+  renewTGT=false
+  doNotPrompt=true
+  useKeyTab=true
+  keyTab="{{rm_keytab}}"
+  principal="{{rm_principal_name}}"
+  storeKey=true
+  useTicketCache=false;
+};
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/yarn_nm_jaas.conf.j2 b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/yarn_nm_jaas.conf.j2
new file mode 100644
index 0000000..b501c82
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/yarn_nm_jaas.conf.j2
@@ -0,0 +1,27 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+com.sun.security.jgss.krb5.initiate {
+    com.sun.security.auth.module.Krb5LoginModule required
+    renewTGT=false
+    doNotPrompt=true
+    useKeyTab=true
+    keyTab="{{nodemanager_keytab}}"
+    principal="{{nodemanager_principal_name}}"
+    storeKey=true
+    useTicketCache=false;
+};
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/container-executor.cfg.j2 b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/properties/container-executor.cfg.j2
similarity index 100%
rename from bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/package/templates/container-executor.cfg.j2
rename to bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/properties/container-executor.cfg.j2
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/role_command_order.json b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/role_command_order.json
new file mode 100644
index 0000000..9ab3b48
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/role_command_order.json
@@ -0,0 +1,21 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for YARN",
+    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START","RANGER_USERSYNC-START"],
+    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+    "RESOURCEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "SPARK_THRIFTSERVER-STOP", "SPARK2_THRIFTSERVER-STOP"],
+    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+    "RESOURCEMANAGER-RESTART": ["NAMENODE-RESTART"],
+    "NODEMANAGER-RESTART": ["NAMENODE-RESTART"],
+    "NODEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP"],
+    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HISTORYSERVER-RESTART": ["NAMENODE-RESTART"],
+    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"]
+
+  },
+  "_comment" : "Dependencies that are used in ResourceManager HA cluster",
+  "resourcemanager_optional_ha" : {
+    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
+  }
+}
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/themes-mapred/directories.json b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/themes-mapred/directories.json
new file mode 100644
index 0000000..02ffdcb
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/themes-mapred/directories.json
@@ -0,0 +1,137 @@
+{
+  "name": "directories",
+  "description": "Directories theme for MAPREDUCE2 service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "directories",
+        "tabs": [
+          {
+            "name": "directories",
+            "display-name": "Directories",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "4",
+              "sections": [
+                {
+                  "name": "subsection-data-dirs",
+                  "display-name": "DATA DIRS",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-data-dirs",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "subsection-log-dirs",
+                  "display-name": "LOG DIRS",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-log-dirs",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "subsection-pid-dirs",
+                  "display-name": "PID DIRS",
+                  "row-index": "2",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-pid-dirs",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "mapred-site/mapreduce.jobhistory.done-dir",
+          "subsection-name": "subsection-data-dirs"
+        },
+        {
+          "config": "mapred-site/mapreduce.jobhistory.intermediate-done-dir",
+          "subsection-name": "subsection-data-dirs"
+        },
+        {
+          "config": "mapred-site/yarn.app.mapreduce.am.staging-dir",
+          "subsection-name": "subsection-data-dirs"
+        },
+        {
+          "config": "mapred-env/mapred_pid_dir_prefix",
+          "subsection-name": "subsection-pid-dirs"
+        },
+        {
+          "config": "mapred-env/mapred_log_dir_prefix",
+          "subsection-name": "subsection-log-dirs"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "mapred-site/mapreduce.jobhistory.done-dir",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "mapred-site/mapreduce.jobhistory.intermediate-done-dir",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "mapred-site/yarn.app.mapreduce.am.staging-dir",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "mapred-env/mapred_pid_dir_prefix",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "mapred-env/mapred_log_dir_prefix",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/themes/directories.json b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/themes/directories.json
new file mode 100644
index 0000000..629e82e
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/YARN/themes/directories.json
@@ -0,0 +1,177 @@
+{
+  "name": "directories",
+  "description": "Directories theme for YARN service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "directories",
+        "tabs": [
+          {
+            "name": "directories",
+            "display-name": "Directories",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "4",
+              "sections": [
+                {
+                  "name": "subsection-data-dirs",
+                  "display-name": "DATA DIRS",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-data-dirs",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "subsection-log-dirs",
+                  "display-name": "LOG DIRS",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-log-dirs",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "subsection-pid-dirs",
+                  "display-name": "PID DIRS",
+                  "row-index": "2",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-pid-dirs",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "yarn-site/yarn.nodemanager.local-dirs",
+          "subsection-name": "subsection-data-dirs"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.log-dirs",
+          "subsection-name": "subsection-log-dirs"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.remote-app-log-dir",
+          "subsection-name": "subsection-log-dirs"
+        },
+        {
+          "config": "yarn-env/yarn_pid_dir_prefix",
+          "subsection-name": "subsection-pid-dirs"
+        },
+        {
+          "config": "yarn-env/yarn_log_dir_prefix",
+          "subsection-name": "subsection-log-dirs"
+        },
+        {
+          "config": "yarn-site/yarn.timeline-service.entity-group-fs-store.active-dir",
+          "subsection-name": "subsection-data-dirs"
+        },
+        {
+          "config": "yarn-site/yarn.node-labels.fs-store.root-dir",
+          "subsection-name": "subsection-data-dirs"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.recovery.dir",
+          "subsection-name": "subsection-data-dirs"
+        },
+        {
+          "config": "yarn-site/yarn.timeline-service.entity-group-fs-store.done-dir",
+          "subsection-name": "subsection-data-dirs"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "yarn-site/yarn.nodemanager.local-dirs",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.log-dirs",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.remote-app-log-dir",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "yarn-env/yarn_pid_dir_prefix",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "yarn-env/yarn_log_dir_prefix",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "yarn-site/yarn.timeline-service.entity-group-fs-store.active-dir",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "yarn-site/yarn.node-labels.fs-store.root-dir",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.recovery.dir",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "yarn-site/yarn.timeline-service.entity-group-fs-store.done-dir",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}