You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2016/03/21 19:45:28 UTC

[3/3] ambari git commit: AMBARI-15420: Refactor resource_management library (jluniya)

AMBARI-15420: Refactor resource_management library (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0f3bf8aa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0f3bf8aa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0f3bf8aa

Branch: refs/heads/trunk
Commit: 0f3bf8aa8b126a50af778d8504c73a5c8f24b334
Parents: aa8409f
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Mon Mar 21 11:45:14 2016 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Mon Mar 21 11:45:14 2016 -0700

----------------------------------------------------------------------
 .../ambari_agent/HostCheckReportFileHandler.py  |   2 +-
 .../python/resource_management/core/utils.py    |   8 +-
 .../libraries/functions/conf_select.py          | 175 +++++++++--------
 .../libraries/functions/copy_tarball.py         | 186 ++++++++++++-------
 .../libraries/functions/default.py              |   2 +
 .../dynamic_variable_interpretation.py          |  10 +-
 .../libraries/functions/get_stack_version.py    |  17 +-
 .../libraries/functions/install_windows_msi.py  |  34 ++--
 .../libraries/functions/repo_version_history.py |   4 +-
 .../libraries/functions/setup_ranger_plugin.py  |   8 +-
 .../functions/setup_ranger_plugin_xml.py        |  14 +-
 .../libraries/functions/stack_select.py         |  94 +++++-----
 .../libraries/functions/stack_tools.py          |  85 +++++++++
 .../libraries/functions/version_select_util.py  |   9 +-
 .../libraries/script/script.py                  |  23 ++-
 .../server/api/services/AmbariMetaInfo.java     |   1 +
 .../server/stack/ConfigurationDirectory.java    |  44 ++++-
 .../ambari/server/stack/ServiceModule.java      |   2 +-
 .../server/stack/StackDefinitionDirectory.java  |   9 +-
 .../apache/ambari/server/stack/StackModule.java |   2 +-
 .../ambari/server/state/PropertyInfo.java       |   3 +-
 .../server/state/ValueAttributesInfo.java       |  32 ++++
 .../apache/ambari/server/utils/JsonUtils.java   |  48 +++++
 .../package/scripts/accumulo_script.py          |   6 +-
 .../package/scripts/status_params.py            |   2 +-
 .../0.1.0.2.3/package/scripts/atlas_client.py   |   2 +-
 .../0.5.0.2.1/package/scripts/status_params.py  |   2 +-
 .../0.96.0.2.0/package/scripts/status_params.py |   2 +-
 .../0.12.0.2.0/package/scripts/hcat_client.py   |   6 +-
 .../package/scripts/hive_server_upgrade.py      |   4 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |   6 +-
 .../0.12.0.2.0/package/scripts/status_params.py |   2 +-
 .../0.5.0.2.2/package/scripts/knox_gateway.py   |   2 +-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |   2 +-
 .../4.0.0.2.0/package/scripts/oozie_server.py   |   8 +-
 .../package/scripts/oozie_server_upgrade.py     |  10 +-
 .../4.0.0.2.0/package/scripts/status_params.py  |   2 +-
 .../RANGER/0.4.0/package/scripts/params.py      |   2 +-
 .../SPARK/1.2.0.2.2/package/scripts/params.py   |   6 +-
 .../1.4.4.2.0/package/scripts/params_linux.py   |   2 +-
 .../0.9.1.2.1/package/scripts/status_params.py  |   2 +-
 .../0.4.0.2.1/package/scripts/params_linux.py   |   2 +-
 .../0.4.0.2.1/package/scripts/params_windows.py |   2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |   3 +-
 .../YARN/2.1.0.2.0/package/scripts/yarn.py      |   2 +-
 .../3.4.5.2.0/package/scripts/status_params.py  |   2 +-
 .../custom_actions/scripts/install_packages.py  |  12 +-
 .../custom_actions/scripts/remove_bits.py       |   2 +-
 .../custom_actions/scripts/ru_set_all.py        |  16 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |  29 +++
 .../scripts/shared_initialization.py            |   4 +-
 .../scripts/shared_initialization.py            |   7 +-
 .../HDP/2.0.6/properties/stack_tools.json       |   4 +
 .../HDP/2.0.6/properties/tarball_map.json       |  34 ++++
 .../ambari/server/stack/ServiceModuleTest.java  |   3 +-
 .../ambari/server/utils/TestJsonUtils.java      |  48 +++++
 .../python/custom_actions/test_ru_set_all.py    |   2 +-
 .../python/host_scripts/TestAlertDiskSpace.py   |   4 +-
 .../stacks/2.0.6/HDFS/test_hdfs_client.py       |   2 +-
 .../stacks/2.0.6/YARN/test_mapreduce2_client.py |   2 +-
 .../stacks/2.0.6/YARN/test_yarn_client.py       |   2 +-
 .../hooks/after-INSTALL/test_after_install.py   | 185 +++++++++---------
 .../python/stacks/2.1/TEZ/test_tez_client.py    |   4 +-
 .../src/main/resources/view.log4j.properties    |  18 ++
 .../src/main/resources/view.log4j.properties    |  18 ++
 .../src/main/resources/view.log4j.properties    |  17 ++
 .../src/main/resources/view.log4j.properties    |  18 ++
 .../src/main/resources/view.log4j.properties    |  18 ++
 .../src/main/resources/view.log4j.properties    |  18 ++
 .../src/main/resources/view.log4j.properties    |  18 ++
 .../src/main/resources/view.log4j.properties    |  18 ++
 71 files changed, 992 insertions(+), 402 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py b/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
index ee7db0a..940b597d 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
@@ -89,7 +89,7 @@ class HostCheckReportFileHandler:
 
   def _stack_list_directory(self):
     """
-    Return filtered list of /usr/hdp directory allowed to be removed
+    Return filtered list of <stack-root> directory allowed to be removed
     :rtype list
     """
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/core/utils.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/utils.py b/ambari-common/src/main/python/resource_management/core/utils.py
index 71d0008..c39e9b5 100644
--- a/ambari-common/src/main/python/resource_management/core/utils.py
+++ b/ambari-common/src/main/python/resource_management/core/utils.py
@@ -25,6 +25,7 @@ import sys
 import cStringIO
 from functools import wraps
 from resource_management.core.exceptions import Fail
+from itertools import chain, repeat, islice
 
 PASSWORDS_HIDE_STRING = "[PROTECTED]"
 
@@ -148,4 +149,9 @@ def lazy_property(undecorated):
       return v
 
   return decorated
-  
\ No newline at end of file
+
+def pad_infinite(iterable, padding=None):
+  return chain(iterable, repeat(padding))
+
+def pad(iterable, size, padding=None):
+  return islice(pad_infinite(iterable, padding), size)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index 770595f..d4e88e7 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -18,8 +18,9 @@ limitations under the License.
 
 """
 
-__all__ = ["select", "create", "get_hadoop_conf_dir", "get_hadoop_dir"]
+__all__ = ["select", "create", "get_hadoop_conf_dir", "get_hadoop_dir", "get_package_dirs"]
 
+import copy
 import os
 import version
 import stack_select
@@ -33,166 +34,169 @@ from resource_management.core.resources.system import Directory
 from resource_management.core.resources.system import Execute
 from resource_management.core.resources.system import Link
 from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import stack_tools
 from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.core.shell import as_sudo
 
+STACK_ROOT_PATTERN = "{{ stack_root }}"
 
-PACKAGE_DIRS = {
+_PACKAGE_DIRS = {
   "accumulo": [
     {
       "conf_dir": "/etc/accumulo/conf",
-      "current_dir": "/usr/hdp/current/accumulo-client/conf"
+      "current_dir": "{0}/current/accumulo-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "falcon": [
     {
       "conf_dir": "/etc/falcon/conf",
-      "current_dir": "/usr/hdp/current/falcon-client/conf"
+      "current_dir": "{0}/current/falcon-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "hadoop": [
     {
       "conf_dir": "/etc/hadoop/conf",
-      "current_dir": "/usr/hdp/current/hadoop-client/conf"
+      "current_dir": "{0}/current/hadoop-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "hbase": [
     {
       "conf_dir": "/etc/hbase/conf",
-      "current_dir": "/usr/hdp/current/hbase-client/conf"
+      "current_dir": "{0}/current/hbase-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "hive": [
     {
       "conf_dir": "/etc/hive/conf",
-      "current_dir": "/usr/hdp/current/hive-client/conf"
+      "current_dir": "{0}/current/hive-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "hive2": [
     {
       "conf_dir": "/etc/hive2/conf",
-      "current_dir": "/usr/hdp/current/hive-server2-hive2/conf"
+      "current_dir": "{0}/current/hive-server2-hive2/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "kafka": [
     {
       "conf_dir": "/etc/kafka/conf",
-      "current_dir": "/usr/hdp/current/kafka-broker/conf"
+      "current_dir": "{0}/current/kafka-broker/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "knox": [
     {
       "conf_dir": "/etc/knox/conf",
-      "current_dir": "/usr/hdp/current/knox-server/conf"
+      "current_dir": "{0}/current/knox-server/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "mahout": [
     {
       "conf_dir": "/etc/mahout/conf",
-      "current_dir": "/usr/hdp/current/mahout-client/conf"
+      "current_dir": "{0}/current/mahout-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "oozie": [
     {
       "conf_dir": "/etc/oozie/conf",
-      "current_dir": "/usr/hdp/current/oozie-client/conf"
+      "current_dir": "{0}/current/oozie-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "phoenix": [
     {
       "conf_dir": "/etc/phoenix/conf",
-      "current_dir": "/usr/hdp/current/phoenix-client/conf"
+      "current_dir": "{0}/current/phoenix-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "ranger-admin": [
     {
       "conf_dir": "/etc/ranger/admin/conf",
-      "current_dir": "/usr/hdp/current/ranger-admin/conf"
+      "current_dir": "{0}/current/ranger-admin/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "ranger-kms": [
     {
       "conf_dir": "/etc/ranger/kms/conf",
-      "current_dir": "/usr/hdp/current/ranger-kms/conf"
+      "current_dir": "{0}/current/ranger-kms/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "ranger-usersync": [
     {
       "conf_dir": "/etc/ranger/usersync/conf",
-      "current_dir": "/usr/hdp/current/ranger-usersync/conf"
+      "current_dir": "{0}/current/ranger-usersync/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "slider": [
     {
       "conf_dir": "/etc/slider/conf",
-      "current_dir": "/usr/hdp/current/slider-client/conf"
+      "current_dir": "{0}/current/slider-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "spark": [
     {
       "conf_dir": "/etc/spark/conf",
-      "current_dir": "/usr/hdp/current/spark-client/conf"
+      "current_dir": "{0}/current/spark-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "sqoop": [
     {
       "conf_dir": "/etc/sqoop/conf",
-      "current_dir": "/usr/hdp/current/sqoop-client/conf"
+      "current_dir": "{0}/current/sqoop-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "storm": [
     {
       "conf_dir": "/etc/storm/conf",
-      "current_dir": "/usr/hdp/current/storm-client/conf"
+      "current_dir": "{0}/current/storm-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "tez": [
     {
       "conf_dir": "/etc/tez/conf",
-      "current_dir": "/usr/hdp/current/tez-client/conf"
+      "current_dir": "{0}/current/tez-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "zookeeper": [
     {
       "conf_dir": "/etc/zookeeper/conf",
-      "current_dir": "/usr/hdp/current/zookeeper-client/conf"
+      "current_dir": "{0}/current/zookeeper-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "pig": [
     {
       "conf_dir": "/etc/pig/conf",
-      "current_dir": "/usr/hdp/current/pig-client/conf"
+      "current_dir": "{0}/current/pig-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "flume": [
     {
       "conf_dir": "/etc/flume/conf",
-      "current_dir": "/usr/hdp/current/flume-server/conf"
+      "current_dir": "{0}/current/flume-server/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "storm-slider-client": [
     {
       "conf_dir": "/etc/storm-slider-client/conf",
-      "current_dir": "/usr/hdp/current/storm-slider-client/conf"
+      "current_dir": "{0}/current/storm-slider-client/conf".format(STACK_ROOT_PATTERN)
     }
   ],
   "hive-hcatalog": [
     {
       "conf_dir": "/etc/hive-webhcat/conf",
       "prefix": "/etc/hive-webhcat",
-      "current_dir": "/usr/hdp/current/hive-webhcat/etc/webhcat"
+      "current_dir": "{0}/current/hive-webhcat/etc/webhcat".format(STACK_ROOT_PATTERN)
     },
     {
       "conf_dir": "/etc/hive-hcatalog/conf",
       "prefix": "/etc/hive-hcatalog",
-      "current_dir": "/usr/hdp/current/hive-webhcat/etc/hcatalog"
+      "current_dir": "{0}/current/hive-webhcat/etc/hcatalog".format(STACK_ROOT_PATTERN)
     }
   ]
 }
 
-def get_cmd(command, package, version):
-  return ('ambari-python-wrap','/usr/bin/conf-select', command, '--package', package, '--stack-version', version, '--conf-version', '0')
+def _get_cmd(command, package, version):
+  conf_selector_path = stack_tools.get_stack_tool_path(stack_tools.CONF_SELECTOR_NAME)
+  return ('ambari-python-wrap', conf_selector_path, command, '--package', package, '--stack-version', version, '--conf-version', '0')
 
 def _valid(stack_name, package, ver):
   if stack_name != "HDP":
@@ -203,12 +207,25 @@ def _valid(stack_name, package, ver):
 
   return True
 
+def get_package_dirs():
+  """
+  Get package dir mappings
+  :return:
+  """
+  stack_root = Script.get_stack_root()
+  package_dirs = copy.deepcopy(_PACKAGE_DIRS)
+  for package_name, directories in package_dirs.iteritems():
+    for dir in directories:
+      current_dir = dir['current_dir']
+      current_dir = current_dir.replace(STACK_ROOT_PATTERN, stack_root)
+      dir['current_dir'] = current_dir
+  return package_dirs
 
 def create(stack_name, package, version, dry_run = False):
   """
   Creates a config version for the specified package
   :param stack_name: the name of the stack
-  :param package: the name of the package, as-used by conf-select
+  :param package: the name of the package, as-used by <conf-selector-tool>
   :param version: the version number to create
   :param dry_run: False to create the versioned config directory, True to only return what would be created
   :return List of directories created
@@ -220,11 +237,11 @@ def create(stack_name, package, version, dry_run = False):
 
   command = "dry-run-create" if dry_run else "create-conf-dir"
 
-  code, stdout, stderr = shell.call(get_cmd(command, package, version), logoutput=False, quiet=False, sudo=True, stderr = subprocess.PIPE)
+  code, stdout, stderr = shell.call(_get_cmd(command, package, version), logoutput=False, quiet=False, sudo=True, stderr = subprocess.PIPE)
 
-  # conf-select can set more than one directory
+  # <conf-selector-tool> can set more than one directory
   # per package, so return that list, especially for dry_run
-  # > conf-select dry-run-create --package hive-hcatalog --stack-version 2.4.0.0-169 0
+  # > <conf-selector-tool> dry-run-create --package hive-hcatalog --stack-version 2.4.0.0-169 0
   # /etc/hive-webhcat/2.4.0.0-169/0
   # /etc/hive-hcatalog/2.4.0.0-169/0
   created_directories = []
@@ -248,7 +265,7 @@ def select(stack_name, package, version, try_create=True, ignore_errors=False):
   """
   Selects a config version for the specified package.
   :param stack_name: the name of the stack
-  :param package: the name of the package, as-used by conf-select
+  :param package: the name of the package, as-used by <conf-selector-tool>
   :param version: the version number to create
   :param try_create: optional argument to attempt to create the directory before setting it
   :param ignore_errors: optional argument to ignore any error and simply log a warning
@@ -260,15 +277,16 @@ def select(stack_name, package, version, try_create=True, ignore_errors=False):
     if try_create:
       create(stack_name, package, version)
 
-    shell.checked_call(get_cmd("set-conf-dir", package, version), logoutput=False, quiet=False, sudo=True)
+    shell.checked_call(_get_cmd("set-conf-dir", package, version), logoutput=False, quiet=False, sudo=True)
 
     # for consistency sake, we must ensure that the /etc/<component>/conf symlink exists and
-    # points to /usr/hdp/current/<component>/conf - this is because some people still prefer to
-    # use /etc/<component>/conf even though /usr/hdp is the "future"
-    if package in PACKAGE_DIRS:
+    # points to <stack-root>/current/<component>/conf - this is because some people still prefer to
+    # use /etc/<component>/conf even though <stack-root> is the "future"
+    package_dirs = get_package_dirs()
+    if package in package_dirs:
       Logger.info("Ensuring that {0} has the correct symlink structure".format(package))
 
-      directory_list = PACKAGE_DIRS[package]
+      directory_list = package_dirs[package]
       for directory_structure in directory_list:
         conf_dir = directory_structure["conf_dir"]
         current_dir = directory_structure["current_dir"]
@@ -299,30 +317,31 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
   """
   Gets the shared hadoop conf directory using:
   1.  Start with /etc/hadoop/conf
-  2.  When the stack is greater than HDP-2.2, use /usr/hdp/current/hadoop-client/conf
+  2.  When the stack is greater than HDP-2.2, use <stack-root>/current/hadoop-client/conf
   3.  Only when doing a RU and HDP-2.3 or higher, use the value as computed
-      by conf-select.  This is in the form /usr/hdp/VERSION/hadoop/conf to make sure
+      by <conf-selector-tool>.  This is in the form <stack-root>/VERSION/hadoop/conf to make sure
       the configs are written in the correct place. However, if the component itself has
       not yet been upgraded, it should use the hadoop configs from the prior version.
-      This will perform an hdp-select status to determine which version to use.
+      This will perform an <stack-selector-tool> status to determine which version to use.
   :param force_latest_on_upgrade:  if True, then force the returned path to always
-  be that of the upgrade target version, even if hdp-select has not been called. This
+  be that of the upgrade target version, even if <stack-selector-tool> has not been called. This
   is primarily used by hooks like before-ANY to ensure that hadoop environment
   configurations are written to the correct location since they are written out
-  before the hdp-select/conf-select would have been called.
+  before the <stack-selector-tool>/<conf-selector-tool> would have been called.
   """
   hadoop_conf_dir = "/etc/hadoop/conf"
   stack_name = None
+  stack_root = Script.get_stack_root()
   version = None
   allow_setting_conf_select_symlink = False
 
   if not Script.in_stack_upgrade():
     # During normal operation, the HDP stack must be 2.3 or higher
     if Script.is_stack_greater_or_equal("2.2"):
-      hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+      hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")
 
     if Script.is_stack_greater_or_equal("2.3"):
-      hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+      hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")
       stack_name = default("/hostLevelParams/stack_name", None)
       version = default("/commandParams/version", None)
 
@@ -337,13 +356,13 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
 
     Type__|_Source_|_Target_|_Direction_____________|_Comment_____________________________________________________________
     Normal|        | 2.2    |                       | Use /etc/hadoop/conf
-    Normal|        | 2.3    |                       | Use /etc/hadoop/conf, which should be a symlink to /usr/hdp/current/hadoop-client/conf
-    EU    | 2.1    | 2.3    | Upgrade               | Use versioned /usr/hdp/current/hadoop-client/conf
+    Normal|        | 2.3    |                       | Use /etc/hadoop/conf, which should be a symlink to <stack-root>/current/hadoop-client/conf
+    EU    | 2.1    | 2.3    | Upgrade               | Use versioned <stack-root>/current/hadoop-client/conf
           |        |        | No Downgrade Allowed  | Invalid
-    EU/RU | 2.2    | 2.2.*  | Any                   | Use /usr/hdp/current/hadoop-client/conf
-    EU/RU | 2.2    | 2.3    | Upgrade               | Use /usr/hdp/$version/hadoop/conf, which should be a symlink destination
-          |        |        | Downgrade             | Use /usr/hdp/current/hadoop-client/conf
-    EU/RU | 2.3    | 2.3.*  | Any                   | Use /usr/hdp/$version/hadoop/conf, which should be a symlink destination
+    EU/RU | 2.2    | 2.2.*  | Any                   | Use <stack-root>/current/hadoop-client/conf
+    EU/RU | 2.2    | 2.3    | Upgrade               | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
+          |        |        | Downgrade             | Use <stack-root>/current/hadoop-client/conf
+    EU/RU | 2.3    | 2.3.*  | Any                   | Use <stack-root>/$version/hadoop/conf, which should be a symlink destination
     '''
 
     # The method "is_stack_greater_or_equal" uses "stack_version" which is the desired stack, e.g., 2.2 or 2.3
@@ -351,7 +370,7 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
     # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is 
     # rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf
     if Script.is_stack_greater_or_equal("2.2"):
-      hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
+      hadoop_conf_dir = os.path.join(stack_root, "current", "hadoop-client", "conf")
 
       # This contains the "version", including the build number, that is actually used during a stack upgrade and
       # is the version upgrading/downgrading to.
@@ -366,16 +385,18 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
       Logger.info("In the middle of a stack upgrade/downgrade for Stack {0} and destination version {1}, determining which hadoop conf dir to use.".format(stack_name, version))
       # This is the version either upgrading or downgrading to.
       if compare_versions(format_stack_version(version), "2.3.0.0") >= 0:
-        # Determine if hdp-select has been run and if not, then use the current
+        # Determine if <stack-selector-tool> has been run and if not, then use the current
         # hdp version until this component is upgraded.
         if not force_latest_on_upgrade:
           current_stack_version = stack_select.get_role_component_current_stack_version()
           if current_stack_version is not None and version != current_stack_version:
             version = current_stack_version
-            Logger.info("hdp-select has not yet been called to update the symlink for this component, keep using version {0}".format(current_stack_version))
+            stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
+            Logger.info("{0} has not yet been called to update the symlink for this component, "
+                        "keep using version {1}".format(stack_selector_name, current_stack_version))
 
-        # Only change the hadoop_conf_dir path, don't conf-select this older version
-        hadoop_conf_dir = "/usr/hdp/{0}/hadoop/conf".format(version)
+        # Only change the hadoop_conf_dir path, don't <conf-selector-tool> this older version
+        hadoop_conf_dir = os.path.join(stack_root, version, "hadoop", "conf")
         Logger.info("Hadoop conf dir: {0}".format(hadoop_conf_dir))
 
         allow_setting_conf_select_symlink = True
@@ -385,10 +406,12 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
     # upgrading stack to version 2.3.0.0 or higher (which may be upgrade or downgrade), then consider setting the
     # symlink for /etc/hadoop/conf.
     # If a host does not have any HDFS or YARN components (e.g., only ZK), then it will not contain /etc/hadoop/conf
-    # Therefore, any calls to conf-select will fail.
+    # Therefore, any calls to <conf-selector-tool> will fail.
     # For that reason, if the hadoop conf directory exists, then make sure it is set.
     if os.path.exists(hadoop_conf_dir):
-      Logger.info("The hadoop conf dir {0} exists, will call conf-select on it for version {1}".format(hadoop_conf_dir, version))
+      conf_selector_name = stack_tools.get_stack_tool_name(stack_tools.CONF_SELECTOR_NAME)
+      Logger.info("The hadoop conf dir {0} exists, will call {1} on it for version {2}".format(
+              hadoop_conf_dir, conf_selector_name, version))
       select(stack_name, "hadoop", version)
 
   Logger.info("Using hadoop conf dir: {0}".format(hadoop_conf_dir))
@@ -403,18 +426,19 @@ def convert_conf_directories_to_symlinks(package, version, dirs, skip_existing_l
   - Creates a /etc/<component>/conf.backup directory
   - Copies all configs from /etc/<component>/conf to conf.backup
   - Removes /etc/<component>/conf
-  - Creates /etc/<component>/<version>/0 via conf-select
-  - /usr/hdp/current/<component>-client/conf -> /etc/<component>/<version>/0 via conf-select
+  - Creates /etc/<component>/<version>/0 via <conf-selector-tool>
+  - <stack-root>/current/<component>-client/conf -> /etc/<component>/<version>/0 via <conf-selector-tool>
   - Links /etc/<component>/conf to <something> depending on function paramter
-  -- /etc/<component>/conf -> /usr/hdp/current/[component]-client/conf (usually)
+  -- /etc/<component>/conf -> <stack-root>/current/[component]-client/conf (usually)
   -- /etc/<component>/conf -> /etc/<component>/conf.backup (only when supporting < HDP 2.3)
 
   :param package: the package to create symlinks for (zookeeper, falcon, etc)
-  :param version: the version number to use with conf-select (2.3.0.0-1234)
-  :param dirs: the directories associated with the package (from PACKAGE_DIRS)
+  :param version: the version number to use with <conf-selector-tool> (2.3.0.0-1234)
+  :param dirs: the directories associated with the package (from get_package_dirs())
   :param skip_existing_links: True to not do any work if already a symlink
   :param link_to: link to "current" or "backup"
   """
+  stack_name = Script.get_stack_name()
   bad_dirs = []
   for dir_def in dirs:
     if not os.path.exists(dir_def['conf_dir']):
@@ -450,7 +474,7 @@ def convert_conf_directories_to_symlinks(package, version, dirs, skip_existing_l
   # we're already in the HDP stack
   # Create the versioned /etc/[component]/[version]/0 folder.
   # The component must be installed on the host.
-  versioned_confs = create("HDP", package, version, dry_run = True)
+  versioned_confs = create(stack_name, package, version, dry_run = True)
 
   Logger.info("Package {0} will have new conf directories: {1}".format(package, ", ".join(versioned_confs)))
 
@@ -460,7 +484,7 @@ def convert_conf_directories_to_symlinks(package, version, dirs, skip_existing_l
       need_dirs.append(d)
 
   if len(need_dirs) > 0:
-    create("HDP", package, version)
+    create(stack_name, package, version)
 
     # find the matching definition and back it up (not the most efficient way) ONLY if there is more than one directory
     if len(dirs) > 1:
@@ -478,9 +502,9 @@ def convert_conf_directories_to_symlinks(package, version, dirs, skip_existing_l
         only_if = format("ls -d {old_conf}/*"))
 
 
-  # /usr/hdp/current/[component] is already set to to the correct version, e.g., /usr/hdp/[version]/[component]
+  # <stack-root>/current/[component] is already set to to the correct version, e.g., <stack-root>/[version]/[component]
   
-  select("HDP", package, version, ignore_errors = True)
+  select(stack_name, package, version, ignore_errors = True)
 
   # Symlink /etc/[component]/conf to /etc/[component]/conf.backup
   try:
@@ -494,7 +518,7 @@ def convert_conf_directories_to_symlinks(package, version, dirs, skip_existing_l
         Directory(new_symlink, action="delete")
 
       if link_to in ["current", "backup"]:
-        # link /etc/[component]/conf -> /usr/hdp/current/[component]-client/conf
+        # link /etc/[component]/conf -> <stack-root>/current/[component]-client/conf
         if link_to == "backup":
           Link(new_symlink, to = backup_dir)
         else:
@@ -508,7 +532,7 @@ def convert_conf_directories_to_symlinks(package, version, dirs, skip_existing_l
 def _seed_new_configuration_directories(package, created_directories):
   """
   Copies any files from the "current" configuration directory to the directories which were
-  newly created with conf-select. This function helps ensure that files which are not tracked
+  newly created with <conf-selector-tool>. This function helps ensure that files which are not tracked
   by Ambari will be available after performing a stack upgrade. Although old configurations
   will be copied as well, they will be overwritten when the components are writing out their
   configs after upgrade during their restart.
@@ -516,18 +540,19 @@ def _seed_new_configuration_directories(package, created_directories):
   This function will catch all errors, logging them, but not raising an exception. This is to
   prevent problems here from stopping and otherwise healthy upgrade.
 
-  :param package: the conf-select package name
-  :param created_directories: a list of directories that conf-select said it created
+  :param package: the <conf-selector-tool> package name
+  :param created_directories: a list of directories that <conf-selector-tool> said it created
   :return: None
   """
-  if package not in PACKAGE_DIRS:
+  package_dirs = get_package_dirs()
+  if package not in package_dirs:
     Logger.warning("Unable to seed newly created configuration directories for {0} because it is an unknown component".format(package))
     return
 
   # seed the directories with any existing configurations
   # this allows files which are not tracked by Ambari to be available after an upgrade
   Logger.info("Seeding versioned configuration directories for {0}".format(package))
-  expected_directories = PACKAGE_DIRS[package]
+  expected_directories = package_dirs[package]
 
   try:
     # if the expected directories don't match those created, we can't seed them
@@ -539,7 +564,7 @@ def _seed_new_configuration_directories(package, created_directories):
 
     # short circuit for a simple 1:1 mapping
     if len(expected_directories) == 1:
-      # /usr/hdp/current/component/conf
+      # <stack-root>/current/component/conf
       # the current directory is the source of the seeded configurations;
       source_seed_directory = expected_directories[0]["current_dir"]
       target_seed_directory = created_directories[0]

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
index 647b8b6..f07b76f 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
@@ -24,46 +24,130 @@ import os
 import uuid
 import tempfile
 import re
+import json
 
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.default import default
 from resource_management.core import shell
 from resource_management.core.logger import Logger
+from resource_management.libraries.functions import stack_tools
 
+STACK_NAME_PATTERN = "{{ stack_name }}"
+STACK_ROOT_PATTERN = "{{ stack_root }}"
 STACK_VERSION_PATTERN = "{{ stack_version }}"
 
-TARBALL_MAP = {
-  "HDP": {
-    "slider":      ("/usr/hdp/{0}/slider/lib/slider.tar.gz".format(STACK_VERSION_PATTERN),
-                  "/hdp/apps/{0}/slider/slider.tar.gz".format(STACK_VERSION_PATTERN)),    
-    "tez":       ("/usr/hdp/{0}/tez/lib/tez.tar.gz".format(STACK_VERSION_PATTERN),
-                  "/hdp/apps/{0}/tez/tez.tar.gz".format(STACK_VERSION_PATTERN)),
+_DEFAULT_TARBALL_MAP = {
+  "slider": ("{0}/{1}/slider/lib/slider.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+             "/{0}/apps/{1}/slider/slider.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+  "tez": ("{0}/{1}/tez/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+          "/{0}/apps/{1}/tez/tez.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+  "hive": ("{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+           "/{0}/apps/{1}/hive/hive.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+  "pig": ("{0}/{1}/pig/pig.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+          "/{0}/apps/{1}/pig/pig.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+  "hadoop_streaming": ("{0}/{1}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+                       "/{0}/apps/{1}/mapreduce/hadoop-streaming.jar".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+  "sqoop": ("{0}/{1}/sqoop/sqoop.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+            "/{0}/apps/{1}/sqoop/sqoop.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+  "mapreduce": ("{0}/{1}/hadoop/mapreduce.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
+                "/{0}/apps/{1}/mapreduce/mapreduce.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
+  "spark": ("{0}/{1}/spark/lib/spark-{2}-assembly.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN, STACK_NAME_PATTERN),
+            "/{0}/apps/{1}/spark/spark-{0}-assembly.jar".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN))
+}
 
-    "hive":      ("/usr/hdp/{0}/hive/hive.tar.gz".format(STACK_VERSION_PATTERN),
-                  "/hdp/apps/{0}/hive/hive.tar.gz".format(STACK_VERSION_PATTERN)),
+def _get_tarball_map():
+  """
+  Get the stack-specific tarball source and destination mappings
+  :return: tarball_map
+  """
+  tarball_map_config = default("/configurations/cluster-env/tarball_map", None)
 
-    "pig":       ("/usr/hdp/{0}/pig/pig.tar.gz".format(STACK_VERSION_PATTERN),
-                  "/hdp/apps/{0}/pig/pig.tar.gz".format(STACK_VERSION_PATTERN)),
+  tarball_map = _DEFAULT_TARBALL_MAP
+  if tarball_map_config:
+    tarball_map = json.loads(tarball_map_config)
 
-    "hadoop_streaming": ("/usr/hdp/{0}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_VERSION_PATTERN),
-                         "/hdp/apps/{0}/mapreduce/hadoop-streaming.jar".format(STACK_VERSION_PATTERN)),
+  return tarball_map
 
-    "sqoop":     ("/usr/hdp/{0}/sqoop/sqoop.tar.gz".format(STACK_VERSION_PATTERN),
-                  "/hdp/apps/{0}/sqoop/sqoop.tar.gz".format(STACK_VERSION_PATTERN)),
+def _get_tarball_paths(name, use_upgrading_version_during_uprade=True, custom_source_file=None, custom_dest_file=None):
 
-    "mapreduce": ("/usr/hdp/{0}/hadoop/mapreduce.tar.gz".format(STACK_VERSION_PATTERN),
-                  "/hdp/apps/{0}/mapreduce/mapreduce.tar.gz".format(STACK_VERSION_PATTERN)),
+  stack_name = Script.get_stack_name()
+  if not stack_name:
+    Logger.error("Cannot copy {0} tarball to HDFS because stack name could be be determined.".format(
+            str(name)))
+    return (False, None, None)
 
-    "spark": ("/usr/hdp/{0}/spark/lib/spark-hdp-assembly.jar".format(STACK_VERSION_PATTERN),
-                  "/hdp/apps/{0}/spark/spark-hdp-assembly.jar".format(STACK_VERSION_PATTERN))
-  }
-}
+  stack_version = _get_current_version(use_upgrading_version_during_uprade)
+  if not stack_version:
+    Logger.error("Cannot copy {0} tarball to HDFS because stack version could be be determined.".format(
+            str(name)))
+    return (False, None, None)
+
+  stack_root = Script.get_stack_root()
+  if not stack_root:
+    Logger.error("Cannot copy {0} tarball to HDFS because stack root could be be determined.".format(
+          str(name)))
+    return (False, None, None)
+
+  tarball_map = _get_tarball_map()
+  if not tarball_map:
+    Logger.error("Cannot copy {0} tarball to HDFS because tarball map could not be determined.".format(
+            str(name), str(stack_name)))
+
+  if name is None or name.lower() not in tarball_map:
+    Logger.error("Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation.".format(
+            str(name), str(stack_name)))
+    return (False, None, None)
+
+  (source_file, dest_file) = tarball_map[name.lower()]
+
+  if custom_source_file is not None:
+    source_file = custom_source_file
+
+  if custom_dest_file is not None:
+    dest_file = custom_dest_file
+
+  source_file = source_file.replace(STACK_NAME_PATTERN, stack_name.lower())
+  dest_file = dest_file.replace(STACK_NAME_PATTERN, stack_name.lower())
+
+  source_file = source_file.replace(STACK_ROOT_PATTERN, stack_root.lower())
+  dest_file = dest_file.replace(STACK_ROOT_PATTERN, stack_root.lower())
+
+  source_file = source_file.replace(STACK_VERSION_PATTERN, stack_version)
+  dest_file = dest_file.replace(STACK_VERSION_PATTERN, stack_version)
+  return (True, source_file, dest_file)
+
+def _get_current_version(use_upgrading_version_during_uprade=True):
+  upgrade_direction = default("/commandParams/upgrade_direction", None)
+  is_stack_upgrade = upgrade_direction is not None
+  current_version = default("/hostLevelParams/current_version", None)
+  Logger.info("Default version is {0}".format(current_version))
+  if is_stack_upgrade:
+    if use_upgrading_version_during_uprade:
+      # This is the version going to. In the case of a downgrade, it is the lower version.
+      current_version = default("/commandParams/version", None)
+      Logger.info("Because this is a Stack Upgrade, will use version {0}".format(current_version))
+    else:
+      Logger.info("This is a Stack Upgrade, but keep the version unchanged.")
+  else:
+    if current_version is None:
+      # During normal operation, the first installation of services won't yet know about the version, so must rely
+      # on <stack-selector> to get it.
+      stack_version = _get_single_version_from_stack_select()
+      if stack_version:
+        Logger.info("Will use stack version {0}".format(stack_version))
+        current_version = stack_version
 
+  if current_version is None:
+    message_suffix = "during stack %s" % str(upgrade_direction) if is_stack_upgrade else ""
+    Logger.warning("Cannot copy tarball because unable to determine current version {0}.".format(message_suffix))
+    return False
+
+  return current_version
 
 def _get_single_version_from_stack_select():
   """
-  Call "hdp-select versions" and return the version string if only one version is available.
+  Call "<stack-selector> versions" and return the version string if only one version is available.
   :return: Returns a version string if successful, and None otherwise.
   """
   # Ubuntu returns: "stdin: is not a tty", as subprocess output, so must use a temporary file to store the output.
@@ -73,7 +157,8 @@ def _get_single_version_from_stack_select():
   stack_version = None
 
   out = None
-  get_stack_versions_cmd = "/usr/bin/hdp-select versions > {0}".format(tmp_file)
+  stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)
+  get_stack_versions_cmd = "{0} versions > {1}".format(stack_selector_path, tmp_file)
   try:
     code, stdoutdata = shell.call(get_stack_versions_cmd, logoutput=True)
     with open(tmp_file, 'r+') as file:
@@ -88,7 +173,7 @@ def _get_single_version_from_stack_select():
       Logger.logger.exception("Could not remove file {0}. Error: {1}".format(str(tmp_file), str(e)))
 
   if code != 0 or out is None or out == "":
-    Logger.error("Could not verify HDP version by calling '{0}'. Return Code: {1}, Output: {2}.".format(get_stack_versions_cmd, str(code), str(out)))
+    Logger.error("Could not verify stack version by calling '{0}'. Return Code: {1}, Output: {2}.".format(get_stack_versions_cmd, str(code), str(out)))
     return None
 
   matches = re.findall(r"([\d\.]+\-\d+)", out)
@@ -96,7 +181,7 @@ def _get_single_version_from_stack_select():
   if matches and len(matches) == 1:
     stack_version = matches[0]
   elif matches and len(matches) > 1:
-    Logger.error("Found multiple matches for HDP version, cannot identify the correct one from: {0}".format(", ".join(matches)))
+    Logger.error("Found multiple matches for stack version, cannot identify the correct one from: {0}".format(", ".join(matches)))
 
   return stack_version
 
@@ -115,59 +200,24 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
   :return: Will return True if successful, otherwise, False.
   """
   import params
-
-  if params.stack_name is None or params.stack_name.upper() not in TARBALL_MAP:
-    Logger.error("Cannot copy {0} tarball to HDFS because stack {1} does not support this operation.".format(str(name), str(params.stack_name)))
-    return False
-
-  if name is None or name.lower() not in TARBALL_MAP[params.stack_name.upper()]:
-    Logger.warning("Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation.".format(str(name), str(params.stack_name)))
-    return False
-
   Logger.info("Called copy_to_hdfs tarball: {0}".format(name))
-  (source_file, dest_file) = TARBALL_MAP[params.stack_name.upper()][name.lower()]
-
-  if custom_source_file is not None:
-    source_file = custom_source_file
+  (success, source_file, dest_file) = _get_tarball_paths(
+          name, use_upgrading_version_during_uprade, custom_source_file, custom_dest_file)
 
-  if custom_dest_file is not None:
-    dest_file = custom_dest_file
+  if not success:
+    return False
 
   if host_sys_prepped:
-    Logger.info("Skipping copying {0} to {1} for {2} as its a sys_prepped host.".format(str(source_file), str(dest_file), str(name)))
+    Logger.info("Skipping copying {0} to {1} for {2} as its a sys_prepped host.".format(
+            str(source_file), str(dest_file), str(name)))
     return True
 
-  upgrade_direction = default("/commandParams/upgrade_direction", None)
-  is_stack_upgrade = upgrade_direction is not None
-  current_version = default("/hostLevelParams/current_version", None)
-  Logger.info("Default version is {0}".format(current_version))
-  if is_stack_upgrade:
-    if use_upgrading_version_during_uprade:
-      # This is the version going to. In the case of a downgrade, it is the lower version.
-      current_version = default("/commandParams/version", None)
-      Logger.info("Because this is a Stack Upgrade, will use version {0}".format(current_version))
-    else:
-      Logger.info("This is a Stack Upgrade, but keep the version unchanged.")
-  else:
-    if current_version is None:
-      # During normal operation, the first installation of services won't yet know about the version, so must rely
-      # on hdp-select to get it.
-      stack_version = _get_single_version_from_stack_select()
-      if stack_version:
-        Logger.info("Will use stack version {0}".format(stack_version))
-        current_version = stack_version
-
-  if current_version is None:
-    message_suffix = "during rolling %s" % str(upgrade_direction) if is_stack_upgrade else ""
-    Logger.warning("Cannot copy {0} tarball because unable to determine current version {1}.".format(name, message_suffix))
-    return False
-
-  source_file = source_file.replace(STACK_VERSION_PATTERN, current_version)
-  dest_file = dest_file.replace(STACK_VERSION_PATTERN, current_version)
   Logger.info("Source file: {0} , Dest file in HDFS: {1}".format(source_file, dest_file))
 
   if not os.path.exists(source_file):
-    Logger.warning("WARNING. Cannot copy {0} tarball because file does not exist: {1} . It is possible that this component is not installed on this host.".format(str(name), str(source_file)))
+    Logger.warning("WARNING. Cannot copy {0} tarball because file does not exist: {1} . "
+                   "It is possible that this component is not installed on this host.".format(
+            str(name), str(source_file)))
     return False
 
   # Because CopyFromLocal does not guarantee synchronization, it's possible for two processes to first attempt to

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/default.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/default.py b/ambari-common/src/main/python/resource_management/libraries/functions/default.py
index 23383dc..061bc52 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/default.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/default.py
@@ -29,6 +29,8 @@ def default(name, default_value):
   subdicts = filter(None, name.split('/'))
 
   curr_dict = Script.get_config()
+  if not curr_dict:
+    return default_value
   for x in subdicts:
     if x in curr_dict:
       curr_dict = curr_dict[x]

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py b/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
index a20b03c..ca8fe19 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
@@ -28,6 +28,7 @@ from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.resources.copy_from_local import CopyFromLocal
 from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
+from resource_management.libraries.functions import stack_tools
 from resource_management.core.resources.system import Execute
 from resource_management.core.exceptions import Fail
 from resource_management.core.logger import Logger
@@ -54,7 +55,7 @@ def _get_tar_source_and_dest_folder(tarball_prefix):
   :return: Returns a tuple of (x, y) after verifying the properties
   """
   component_tar_source_file = default("/configurations/cluster-env/%s%s" % (tarball_prefix.lower(), TAR_SOURCE_SUFFIX), None)
-  # E.g., /usr/hdp/current/hadoop-client/tez-{{ stack_version_formatted }}.tar.gz
+  # E.g., <stack-root>/current/hadoop-client/tez-{{ stack_version_formatted }}.tar.gz
 
   component_tar_destination_folder = default("/configurations/cluster-env/%s%s" % (tarball_prefix.lower(), TAR_DESTINATION_FOLDER_SUFFIX), None)
   # E.g., hdfs:///hdp/apps/{{ stack_version_formatted }}/mapreduce/
@@ -173,13 +174,14 @@ def copy_tarballs_to_hdfs(tarball_prefix, stack_select_component_name, component
   # Ubuntu returns: "stdin: is not a tty", as subprocess output.
   tmpfile = tempfile.NamedTemporaryFile()
   out = None
+  (stack_selector_name, stack_selector_path, stack_selector_package) = stack_tools.get_stack_tool(stack_tools.STACK_SELECTOR_NAME)
   with open(tmpfile.name, 'r+') as file:
-    get_stack_version_cmd = '/usr/bin/hdp-select status %s > %s' % (stack_select_component_name, tmpfile.name)
+    get_stack_version_cmd = '%s status %s > %s' % (stack_selector_path, stack_select_component_name, tmpfile.name)
     code, stdoutdata = shell.call(get_stack_version_cmd)
     out = file.read()
   pass
   if code != 0 or out is None:
-    Logger.warning("Could not verify HDP version by calling '%s'. Return Code: %s, Output: %s." %
+    Logger.warning("Could not verify stack version by calling '%s'. Return Code: %s, Output: %s." %
                    (get_stack_version_cmd, str(code), str(out)))
     return 1
 
@@ -187,7 +189,7 @@ def copy_tarballs_to_hdfs(tarball_prefix, stack_select_component_name, component
   stack_version = matches[0] if matches and len(matches) > 0 else None
 
   if not stack_version:
-    Logger.error("Could not parse HDP version from output of hdp-select: %s" % str(out))
+    Logger.error("Could not parse stack version from output of %s: %s" % (stack_selector_name, str(out)))
     return 1
 
   file_name = os.path.basename(component_tar_source_file)

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
index f2e6567..6095a80 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
@@ -30,8 +30,8 @@ from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from resource_management.core.logger import Logger
 from resource_management.core.exceptions import Fail
 from resource_management.core import shell
+from resource_management.libraries.functions import stack_tools
 
-STACK_SELECT_BINARY = "/usr/bin/hdp-select"
 
 @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
 def get_stack_version(package_name):
@@ -64,17 +64,20 @@ def get_stack_version(package_name):
   """
   @param package_name, name of the package, from which, function will try to get stack version
   """
-  
-  if not os.path.exists(STACK_SELECT_BINARY):
-    Logger.info('Skipping get_stack_version since " + STACK_SELECT_BINARY + " is not yet available')
+
+  stack_selector_path = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
+
+  if not os.path.exists(stack_selector_path):
+    Logger.info('Skipping get_stack_version since " + stack_selector_tool + " is not yet available')
     return None # lazy fail
   
   try:
-    command = 'ambari-python-wrap {STACK_SELECT_BINARY} status {package_name}'.format(STACK_SELECT_BINARY=STACK_SELECT_BINARY, package_name=package_name)
+    command = 'ambari-python-wrap {stack_selector_path} status {package_name}'.format(
+            stack_selector_path=stack_selector_path, package_name=package_name)
     return_code, stack_output = shell.call(command, timeout=20)
   except Exception, e:
     Logger.error(str(e))
-    raise Fail('Unable to execute " + STACK_SELECT_BINARY + " command to retrieve the version.')
+    raise Fail('Unable to execute " + stack_selector_path + " command to retrieve the version.')
 
   if return_code != 0:
     raise Fail(
@@ -85,7 +88,7 @@ def get_stack_version(package_name):
   match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
 
   if match is None:
-    Logger.info('Failed to get extracted version with ' + STACK_SELECT_BINARY)
+    Logger.info('Failed to get extracted version with ' + stack_selector_path)
     return None # lazy fail
 
   return stack_version

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py b/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py
index f1cd9cb..96d8661 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py
@@ -38,24 +38,24 @@ import urlparse
 __all__ = ['install_windows_msi']
 
 msi_save_dir = None
-hdp_log_dir = "c:\\hadoop\\logs"
-hdp_data_dir = "c:\\hadoopDefaultData"
+log_dir = "c:\\hadoop\\logs"
+data_dir = "c:\\hadoopDefaultData"
 local_host = socket.getfqdn()
 db_flavor = "DERBY"
 hdp_22 = """#Namenode Data directory
-HDFS_NAMENODE_DATA_DIR={hdp_data_dir}\\hdpdatann
+HDFS_NAMENODE_DATA_DIR={data_dir}\\hdpdatann
 
 #Datanode Data directory
-HDFS_DATANODE_DATA_DIR={hdp_data_dir}\\hdpdatadn
+HDFS_DATANODE_DATA_DIR={data_dir}\\hdpdatadn
 
 IS_SLIDER=yes
 IS_PHOENIX=no
 """
 cluster_properties = """#Log directory
-HDP_LOG_DIR={hdp_log_dir}
+HDP_LOG_DIR={log_dir}
 
 #Data directory
-HDP_DATA_DIR={hdp_data_dir}
+HDP_DATA_DIR={data_dir}
 
 {hdp_22_specific_props}
 
@@ -93,8 +93,8 @@ OOZIE_DB_USERNAME=oozie
 OOZIE_DB_PASSWORD=oozie
 """
 
-INSTALL_MSI_CMD = 'cmd /C start /wait msiexec /qn /i  {hdp_msi_path} /lv {hdp_log_path} MSIUSEREALADMINDETECTION=1 ' \
-                  'HDP_LAYOUT={hdp_layout_path} DESTROY_DATA=yes HDP_USER={hadoop_user} HDP_USER_PASSWORD={hadoop_password_arg} HDP=yes ' \
+INSTALL_MSI_CMD = 'cmd /C start /wait msiexec /qn /i  {msi_path} /lv {log_path} MSIUSEREALADMINDETECTION=1 ' \
+                  'HDP_LAYOUT={layout_path} DESTROY_DATA=yes HDP_USER={hadoop_user} HDP_USER_PASSWORD={hadoop_password_arg} HDP=yes ' \
                   'KNOX=yes KNOX_MASTER_SECRET="AmbariHDP2Windows" FALCON=yes STORM=yes HBase=yes STORM=yes FLUME=yes SLIDER=yes PHOENIX=no RANGER=no'
 CREATE_SERVICE_SCRIPT = os.path.abspath("sbin\createservice.ps1")
 CREATE_SERVICE_CMD = 'cmd /C powershell -ExecutionPolicy Bypass -File "{script}" -username {username} -password "{password}" -servicename ' \
@@ -176,33 +176,33 @@ def install_windows_msi(url_base, save_dir, save_files, hadoop_user, hadoop_pass
     stack_version_formatted = format_stack_version(stack_version)
     hdp_22_specific_props = ''
     if stack_version_formatted != "" and compare_versions(stack_version_formatted, '2.2') >= 0:
-      hdp_22_specific_props = hdp_22.format(hdp_data_dir=hdp_data_dir)
+      hdp_22_specific_props = hdp_22.format(data_dir=data_dir)
 
     # MSIs cannot be larger than 2GB. HDPWIN 2.3 needed split in order to accommodate this limitation
-    hdp_msi_file = ''
+    msi_file = ''
     for save_file in save_files:
       if save_file.lower().endswith(".msi"):
-        hdp_msi_file = save_file
+        msi_file = save_file
       file_url = urlparse.urljoin(url_base, save_file)
       try:
         download_file(file_url, os.path.join(msi_save_dir, save_file))
       except:
         raise Fail("Failed to download {url}".format(url=file_url))
 
-    File(os.path.join(msi_save_dir, "properties.txt"), content=cluster_properties.format(hdp_log_dir=hdp_log_dir,
-                                                                                         hdp_data_dir=hdp_data_dir,
+    File(os.path.join(msi_save_dir, "properties.txt"), content=cluster_properties.format(log_dir=log_dir,
+                                                                                         data_dir=data_dir,
                                                                                          local_host=local_host,
                                                                                          db_flavor=db_flavor,
                                                                                          hdp_22_specific_props=hdp_22_specific_props))
 
     # install msi
-    hdp_msi_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file))
-    hdp_log_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file[:-3] + "log"))
-    hdp_layout_path = os_utils.quote_path(os.path.join(save_dir, "properties.txt"))
+    msi_path = os_utils.quote_path(os.path.join(save_dir, msi_file))
+    log_path = os_utils.quote_path(os.path.join(save_dir, msi_file[:-3] + "log"))
+    layout_path = os_utils.quote_path(os.path.join(save_dir, "properties.txt"))
     hadoop_password_arg = os_utils.quote_path(hadoop_password)
 
     Execute(
-      INSTALL_MSI_CMD.format(hdp_msi_path=hdp_msi_path, hdp_log_path=hdp_log_path, hdp_layout_path=hdp_layout_path,
+      INSTALL_MSI_CMD.format(msi_path=msi_path, log_path=log_path, layout_path=layout_path,
                              hadoop_user=hadoop_user, hadoop_password_arg=hadoop_password_arg))
     reload_windows_env()
     # create additional services manually due to hdp.msi limitaitons

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/repo_version_history.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/repo_version_history.py b/ambari-common/src/main/python/resource_management/libraries/functions/repo_version_history.py
index d585dea..4273e9b 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/repo_version_history.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/repo_version_history.py
@@ -24,7 +24,7 @@ from resource_management.core.logger import Logger
 """
 Repository version file is used while executing install_packages.py
 That allows us to get actual installed version even during reinstalling existing
- repository version (when hdp-select output does not change before and after
+ repository version (when <stack-selector-tool> output does not change before and after
  installing packages).
 
 Format:
@@ -65,7 +65,7 @@ def write_actual_version_to_history_file(repository_version, actual_version):
   Save the tuple of repository_version,actual_version to the repo version history file if the repository_version
   doesn't already exist
   :param repository_version: normalized repo version (without build number) as received from the server
-  :param actual_version: Repo version with the build number, as determined using hdp-select
+  :param actual_version: Repo version with the build number, as determined using <stack-selector-tool>
   :returns Return True if appended the values to the file, otherwise, return False.
   """
   wrote_value = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
index 4d9d8a4..3eb8591 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
@@ -30,6 +30,7 @@ from resource_management.core.source import DownloadSource
 from resource_management.libraries.resources import ModifyPropertiesFile
 from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
+from resource_management.libraries.script.script import Script
 
 def setup_ranger_plugin(component_select_name, service_name,
                         downloaded_custom_connector, driver_curl_source,
@@ -50,8 +51,9 @@ def setup_ranger_plugin(component_select_name, service_name,
 
   File(driver_curl_target, mode=0644)
 
+  stack_root = Script.get_stack_root()
   stack_version = get_stack_version(component_select_name)
-  file_path = format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/install.properties')
+  file_path = format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/install.properties')
   
   if not os.path.isfile(file_path):
     raise Fail(format('Ranger {service_name} plugin install.properties file does not exist at {file_path}'))
@@ -79,7 +81,9 @@ def setup_ranger_plugin(component_select_name, service_name,
   else:
     cmd = (format('disable-{service_name}-plugin.sh'),)
     
-  cmd_env = {'JAVA_HOME': java_home, 'PWD': format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin'), 'PATH': format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin')}
+  cmd_env = {'JAVA_HOME': java_home,
+             'PWD': format('{stack_root}/{stack_version}/ranger-{service_name}-plugin'),
+             'PATH': format('{stack_root}/{stack_version}/ranger-{service_name}-plugin')}
   
   Execute(cmd, 
         environment=cmd_env, 

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
index 2ccc0c6..1046d62 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
@@ -31,6 +31,7 @@ from resource_management.core.logger import Logger
 from resource_management.core.source import DownloadSource, InlineTemplate
 from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
 from resource_management.core.utils import PasswordString
+from resource_management.libraries.script.script import Script
 
 def setup_ranger_plugin(component_select_name, service_name,
                         component_downloaded_custom_connector, component_driver_curl_source,
@@ -149,20 +150,21 @@ def setup_ranger_plugin(component_select_name, service_name,
 
 def setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list):
 
-  jar_files = os.listdir(format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/lib'))
+  stack_root = Script.get_stack_root()
+  jar_files = os.listdir(format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/lib'))
 
   for jar_file in jar_files:
     for component in component_list:
-      Execute(('ln','-sf',format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),format('/usr/hdp/current/{component}/lib/{jar_file}')),
-      not_if=format('ls /usr/hdp/current/{component}/lib/{jar_file}'),
-      only_if=format('ls /usr/hdp/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),
+      Execute(('ln','-sf',format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),format('{stack_root}/current/{component}/lib/{jar_file}')),
+      not_if=format('ls {stack_root}/current/{component}/lib/{jar_file}'),
+      only_if=format('ls {stack_root}/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),
       sudo=True)
 
 def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password,
                                 ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home):
 
-  cred_lib_path = format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/install/lib/*')
-  cred_setup_prefix = (format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)
+  cred_lib_path = format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/install/lib/*')
+  cred_setup_prefix = (format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)
 
   if audit_db_is_enabled:
     cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'auditDBCred', '-v', PasswordString(xa_audit_db_password), '-c', '1')

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
index 4a8eeb9..02cd7ca 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
@@ -28,17 +28,14 @@ from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.get_stack_version import get_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import stack_tools
 from resource_management.core.shell import call
 from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.version_select_util import get_versions_from_stack_root
 
-STACK_SELECT = '/usr/bin/hdp-select'
-STACK_SELECT_PREFIX = ('ambari-python-wrap', STACK_SELECT)
+STACK_SELECT_PREFIX = 'ambari-python-wrap'
 
-# hdp-select set oozie-server 2.2.0.0-1234
-TEMPLATE = STACK_SELECT_PREFIX + ('set',)
-
-# a mapping of Ambari server role to hdp-select component name for all
+# a mapping of Ambari server role to <stack-selector-tool> component name for all
 # non-clients
 SERVER_ROLE_DIRECTORY_MAP = {
   'ACCUMULO_MASTER' : 'accumulo-master',
@@ -75,12 +72,12 @@ SERVER_ROLE_DIRECTORY_MAP = {
   'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
   'ZOOKEEPER_SERVER' : 'zookeeper-server',
 
-  # ZKFC is tied to NN since it doesn't have its own componnet in hdp-select and there is
+  # ZKFC is tied to NN since it doesn't have its own componnet in <stack-selector-tool> and there is
   # a requirement that the ZKFC is installed on each NN
   'ZKFC' : 'hadoop-hdfs-namenode'
 }
 
-# mapping of service check to hdp-select component
+# mapping of service check to <stack-selector-tool> component
 SERVICE_CHECK_DIRECTORY_MAP = {
   "HDFS_SERVICE_CHECK" : "hadoop-client",
   "TEZ_SERVICE_CHECK" : "hadoop-client",
@@ -90,13 +87,13 @@ SERVICE_CHECK_DIRECTORY_MAP = {
   "MAHOUT_SERVICE_CHECK" : "mahout-client"
 }
 
-# /usr/hdp/current/hadoop-client/[bin|sbin|libexec|lib]
-# /usr/hdp/2.3.0.0-1234/hadoop/[bin|sbin|libexec|lib]
-HADOOP_DIR_TEMPLATE = "/usr/hdp/{0}/{1}/{2}"
+# <stack-root>/current/hadoop-client/[bin|sbin|libexec|lib]
+# <stack-root>/2.3.0.0-1234/hadoop/[bin|sbin|libexec|lib]
+HADOOP_DIR_TEMPLATE = "{0}/{1}/{2}/{3}"
 
-# /usr/hdp/current/hadoop-client
-# /usr/hdp/2.3.0.0-1234/hadoop
-HADOOP_HOME_DIR_TEMPLATE = "/usr/hdp/{0}/{1}"
+# <stack-root>/current/hadoop-client
+# <stack-root>/2.3.0.0-1234/hadoop
+HADOOP_HOME_DIR_TEMPLATE = "{0}/{1}/{2}"
 
 HADOOP_DIR_DEFAULTS = {
   "home": "/usr/lib/hadoop",
@@ -108,38 +105,41 @@ HADOOP_DIR_DEFAULTS = {
 
 def select_all(version_to_select):
   """
-  Executes hdp-select on every component for the specified version. If the value passed in is a
+  Executes <stack-selector-tool> on every component for the specified version. If the value passed in is a
   stack version such as "2.3", then this will find the latest installed version which
   could be "2.3.0.0-9999". If a version is specified instead, such as 2.3.0.0-1234, it will use
   that exact version.
-  :param version_to_select: the version to hdp-select on, such as "2.3" or "2.3.0.0-1234"
+  :param version_to_select: the version to <stack-selector-tool> on, such as "2.3" or "2.3.0.0-1234"
   """
+  stack_root = Script.get_stack_root()
+  (stack_selector_name, stack_selector_path, stack_selector_package) = stack_tools.get_stack_tool(stack_tools.STACK_SELECTOR_NAME)
   # it's an error, but it shouldn't really stop anything from working
   if version_to_select is None:
-    Logger.error("Unable to execute hdp-select after installing because there was no version specified")
+    Logger.error(format("Unable to execute {stack_selector_name} after installing because there was no version specified"))
     return
 
-  Logger.info("Executing hdp-select set all on {0}".format(version_to_select))
+  Logger.info("Executing {0} set all on {1}".format(stack_selector_name, version_to_select))
 
-  command = format('{sudo} /usr/bin/hdp-select set all `ambari-python-wrap /usr/bin/hdp-select versions | grep ^{version_to_select} | tail -1`')
-  only_if_command = format('ls -d /usr/hdp/{version_to_select}*')
+  command = format('{sudo} {stack_selector_path} set all `ambari-python-wrap {stack_selector_path} versions | grep ^{version_to_select} | tail -1`')
+  only_if_command = format('ls -d {stack_root}/{version_to_select}*')
   Execute(command, only_if = only_if_command)
 
 
 def select(component, version):
   """
-  Executes hdp-select on the specific component and version. Some global
+  Executes <stack-selector-tool> on the specific component and version. Some global
   variables that are imported via params/status_params/params_linux will need
-  to be recalcuated after the hdp-select. However, python does not re-import
+  to be recalcuated after the <stack-selector-tool>. However, python does not re-import
   existing modules. The only way to ensure that the configuration variables are
   recalculated is to call reload(...) on each module that has global parameters.
-  After invoking hdp-select, this function will also reload params, status_params,
+  After invoking <stack-selector-tool>, this function will also reload params, status_params,
   and params_linux.
-  :param component: the hdp-select component, such as oozie-server. If "all", then all components
+  :param component: the <stack-selector-tool> component, such as oozie-server. If "all", then all components
   will be updated.
   :param version: the version to set the component to, such as 2.2.0.0-1234
   """
-  command = TEMPLATE + (component, version)
+  stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)
+  command = (STACK_SELECT_PREFIX, stack_selector_path, "set", component, version)
   Execute(command, sudo=True)
 
   # don't trust the ordering of modules:
@@ -163,6 +163,7 @@ def get_role_component_current_stack_version():
   stack_select_component = None
   role = default("/role", "")
   role_command =  default("/roleCommand", "")
+  stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
 
   if role in SERVER_ROLE_DIRECTORY_MAP:
     stack_select_component = SERVER_ROLE_DIRECTORY_MAP[role]
@@ -175,8 +176,8 @@ def get_role_component_current_stack_version():
   current_stack_version = get_stack_version(stack_select_component)
 
   if current_stack_version is None:
-    Logger.warning("Unable to determine hdp-select version for {0}".format(
-      stack_select_component))
+    Logger.warning("Unable to determine {0} version for {1}".format(
+      stack_selector_name, stack_select_component))
   else:
     Logger.info("{0} is currently at version {1}".format(
       stack_select_component, current_stack_version))
@@ -188,14 +189,15 @@ def get_hadoop_dir(target, force_latest_on_upgrade=False):
   """
   Return the hadoop shared directory in the following override order
   1. Use default for 2.1 and lower
-  2. If 2.2 and higher, use /usr/hdp/current/hadoop-client/{target}
-  3. If 2.2 and higher AND for an upgrade, use /usr/hdp/<version>/hadoop/{target}.
-  However, if the upgrade has not yet invoked hdp-select, return the current
+  2. If 2.2 and higher, use <stack-root>/current/hadoop-client/{target}
+  3. If 2.2 and higher AND for an upgrade, use <stack-root>/<version>/hadoop/{target}.
+  However, if the upgrade has not yet invoked <stack-selector-tool>, return the current
   version of the component.
   :target: the target directory
   :force_latest_on_upgrade: if True, then this will return the "current" directory
-  without the HDP version built into the path, such as /usr/hdp/current/hadoop-client
+  without the stack version built into the path, such as <stack-root>/current/hadoop-client
   """
+  stack_root = Script.get_stack_root()
 
   if not target in HADOOP_DIR_DEFAULTS:
     raise Fail("Target {0} not defined".format(target))
@@ -205,9 +207,9 @@ def get_hadoop_dir(target, force_latest_on_upgrade=False):
   if Script.is_stack_greater_or_equal("2.2"):
     # home uses a different template
     if target == "home":
-      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format("current", "hadoop-client")
+      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client")
     else:
-      hadoop_dir = HADOOP_DIR_TEMPLATE.format("current", "hadoop-client", target)
+      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client", target)
 
     # if we are not forcing "current" for HDP 2.2, then attempt to determine
     # if the exact version needs to be returned in the directory
@@ -217,7 +219,7 @@ def get_hadoop_dir(target, force_latest_on_upgrade=False):
       if stack_info is not None:
         stack_version = stack_info[1]
 
-        # determine if hdp-select has been run and if not, then use the current
+        # determine if <stack-selector-tool> has been run and if not, then use the current
         # hdp version until this component is upgraded
         current_stack_version = get_role_component_current_stack_version()
         if current_stack_version is not None and stack_version != current_stack_version:
@@ -225,20 +227,21 @@ def get_hadoop_dir(target, force_latest_on_upgrade=False):
 
         if target == "home":
           # home uses a different template
-          hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
+          hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop")
         else:
-          hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)
+          hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop", target)
 
   return hadoop_dir
 
 def get_hadoop_dir_for_stack_version(target, stack_version):
   """
   Return the hadoop shared directory for the provided stack version. This is necessary
-  when folder paths of downgrade-source stack-version are needed after hdp-select. 
+  when folder paths of downgrade-source stack-version are needed after <stack-selector-tool>.
   :target: the target directory
   :stack_version: stack version to get hadoop dir for
   """
 
+  stack_root = Script.get_stack_root()
   if not target in HADOOP_DIR_DEFAULTS:
     raise Fail("Target {0} not defined".format(target))
 
@@ -248,9 +251,9 @@ def get_hadoop_dir_for_stack_version(target, stack_version):
   if Script.is_stack_greater_or_equal_to(formatted_stack_version, "2.2"):
     # home uses a different template
     if target == "home":
-      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
+      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop")
     else:
-      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)
+      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop", target)
 
   return hadoop_dir
 
@@ -275,12 +278,13 @@ def _get_upgrade_stack():
 def get_stack_versions(stack_root):
   """
   Gets list of stack versions installed on the host.
-  Be default a call to hdp-select versions is made to get the list of installed stack versions.
+  Be default a call to <stack-selector-tool> versions is made to get the list of installed stack versions.
   As a fallback list of installed versions is collected from stack version directories in stack install root.
   :param stack_root: Stack install root
   :return: Returns list of installed stack versions.
   """
-  code, out = call(STACK_SELECT_PREFIX + ('versions',))
+  stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)
+  code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))
   versions = []
   if 0 == code:
     for line in out.splitlines():
@@ -291,17 +295,19 @@ def get_stack_versions(stack_root):
 
 def get_stack_version_before_install(component_name):
   """
-  Works in the similar way to 'hdp-select status component', 
+  Works in the similar way to '<stack-selector-tool> status component',
   but also works for not yet installed packages.
   
   Note: won't work if doing initial install.
   """
-  component_dir = HADOOP_HOME_DIR_TEMPLATE.format("current", component_name)
+  stack_root = Script.get_stack_root()
+  component_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", component_name)
+  stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
   if os.path.islink(component_dir):
     stack_version = os.path.basename(os.path.dirname(os.readlink(component_dir)))
     match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
     if match is None:
-      Logger.info('Failed to get extracted version with hdp-select in method get_stack_version_before_install')
+      Logger.info('Failed to get extracted version with {0} in method get_stack_version_before_install'.format(stack_selector_name))
       return None # lazy fail
     return stack_version
   else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
new file mode 100644
index 0000000..4cd7c72
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+__all__ = ["get_stack_tool", "get_stack_tool_name", "get_stack_tool_path",
+           "get_stack_tool_package", "STACK_SELECTOR_NAME", "CONF_SELECTOR_NAME"]
+
+from resource_management.core.logger import Logger
+from resource_management.core.utils import pad
+
+STACK_SELECTOR_NAME = "stack_selector"
+CONF_SELECTOR_NAME = "conf_selector"
+
+# Format
+# SELECTOR_NAME : ( "tool-name", "tool-path", "tool-package" )
+_DEFAULT_STACK_TOOLS = {
+  STACK_SELECTOR_NAME: ("hdp-select", "/usr/bin/hdp-select", "hdp-select"),
+  CONF_SELECTOR_NAME: ("conf-select", "/usr/bin/conf-select", "conf-select")
+}
+
+def get_stack_tool(name):
+  """
+  Give a tool selector name get the stack-specific tool name, tool path, tool package
+  :param name: tool selector name
+  :return: tool_name, tool_path, tool_package
+  """
+  from resource_management.libraries.functions.default import default
+  stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
+  stack_tools = _DEFAULT_STACK_TOOLS
+  if stack_tools_config:
+    stack_tools = json.loads(stack_tools_config)
+
+  if name is None or name.lower() not in stack_tools:
+    Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))
+    return (None, None, None)
+
+  tool_config = stack_tools[name.lower()]
+
+  # Return fixed length (tool_name, tool_path tool_package) tuple
+  return tuple(pad(tool_config[:3], 3))
+
+def get_stack_tool_name(name):
+  """
+  Give a tool selector name get the stack-specific tool name
+  :param name: tool selector name
+  :return: tool_name
+  """
+  (tool_name, tool_path, tool_package) = get_stack_tool(name)
+  return tool_name
+
+
+def get_stack_tool_path(name):
+  """
+  Give a tool selector name get the stack-specific tool path
+  :param name: tool selector name
+  :return: tool_path
+  """
+  (tool_name, tool_path, tool_package) = get_stack_tool(name)
+  return tool_path
+
+
+def get_stack_tool_package(name):
+  """
+  Give a tool selector name get the stack-specific tool package
+  :param name: tool selector name
+  :return: tool_package
+  """
+  (tool_name, tool_path, tool_package) = get_stack_tool(name)
+  return tool_package

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
index 95c5cba..05e6eb6 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
@@ -25,6 +25,8 @@ import tempfile
 
 from resource_management.core.logger import Logger
 from resource_management.core import shell
+from resource_management.libraries.functions import stack_tools
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 
 
 def get_component_version(stack_name, component_name):
@@ -48,10 +50,11 @@ def get_component_version(stack_name, component_name):
     tmpfile = tempfile.NamedTemporaryFile()
 
     get_stack_comp_version_cmd = ""
+    (stack_selector_name, stack_selector_path, stack_selector_package) = stack_tools.get_stack_tool(stack_tools.STACK_SELECTOR_NAME)
     try:
       # This is necessary because Ubuntu returns "stdin: is not a tty", see AMBARI-8088
       with open(tmpfile.name, 'r') as file:
-        get_stack_comp_version_cmd = '/usr/bin/hdp-select status %s > %s' % (component_name, tmpfile.name)
+        get_stack_comp_version_cmd = '%s status %s > %s' % (stack_selector_path, component_name, tmpfile.name)
         code, stdoutdata = shell.call(get_stack_comp_version_cmd)
         out = file.read()
 
@@ -62,7 +65,7 @@ def get_component_version(stack_name, component_name):
       matches = re.findall(r"([\d\.]+\-\d+)", out)
       version = matches[0] if matches and len(matches) > 0 else None
     except Exception, e:
-      Logger.error("Could not determine HDP version for component %s by calling '%s'. Return Code: %s, Output: %s." %
+      Logger.error("Could not determine stack version for component %s by calling '%s'. Return Code: %s, Output: %s." %
                    (component_name, get_stack_comp_version_cmd, str(code), str(out)))
   elif stack_name == "HDPWIN":
     pass
@@ -80,7 +83,7 @@ def get_component_version(stack_name, component_name):
 
 def get_versions_from_stack_root(stack_root):
   """
-  Given a stack install root (/usr/hdp), returns a list of stack versions currently installed.
+  Given a stack install root, returns a list of stack versions currently installed.
   The list of installed stack versions is determined purely based on the stack version directories
   found in the stack install root.
   Because each stack name may have different logic, the input is a generic dictionary.

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f3bf8aa/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 1b0116e..4cbf2d7 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -43,6 +43,7 @@ from resource_management.core.resources.packaging import Package
 from resource_management.libraries.functions.version_select_util import get_component_version
 from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions import stack_tools
 from resource_management.libraries.functions.constants import Direction
 from resource_management.libraries.functions import packages_analyzer
 from resource_management.libraries.script.config_dictionary import ConfigDictionary, UnknownConfiguration
@@ -70,7 +71,6 @@ USAGE = """Usage: {0} <COMMAND> <JSON_CONFIG> <BASEDIR> <STROUTPUT> <LOGGING_LEV
 """
 
 _PASSWORD_MAP = {"/configurations/cluster-env/hadoop.user.name":"/configurations/cluster-env/hadoop.user.password"}
-DISTRO_SELECT_PACKAGE_NAME = "hdp-select"
 STACK_VERSION_PLACEHOLDER = "${stack_version}"
 
 def get_path_from_configuration(name, configuration):
@@ -98,6 +98,7 @@ class Script(object):
   3 path to service metadata dir (Directory "package" inside service directory)
   4 path to file with structured command output (file will be created)
   """
+  config = None
   stack_version_from_distro_select = None
   structuredOut = {}
   command_data_file = ""
@@ -267,9 +268,11 @@ class Script(object):
       from resource_management.libraries.functions import stack_select
       Script.stack_version_from_distro_select = stack_select.get_stack_version_before_install(component_name)
       
-    # if hdp-select has not yet been done (situations like first install), we can use hdp-select version itself.
+    # If <stack-selector-tool> has not yet been done (situations like first install),
+    # we can use <stack-selector-tool> version itself.
     if not Script.stack_version_from_distro_select:
-      Script.stack_version_from_distro_select = packages_analyzer.getInstalledPackageVersion(DISTRO_SELECT_PACKAGE_NAME)
+      Script.stack_version_from_distro_select = packages_analyzer.getInstalledPackageVersion(
+              stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME))
       
     return Script.stack_version_from_distro_select
   
@@ -323,7 +326,7 @@ class Script(object):
   @staticmethod
   def get_component_from_role(role_directory_map, default_role):
     """
-    Gets the /usr/hdp/current/<component> component given an Ambari role,
+    Gets the <stack-root>/current/<component> component given an Ambari role,
     such as DATANODE or HBASE_MASTER.
     :return:  the component name, such as hbase-master
     """
@@ -342,7 +345,17 @@ class Script(object):
     :return: a stack name or None
     """
     from resource_management.libraries.functions.default import default
-    return default("/hostLevelParams/stack_name", None)
+    return default("/hostLevelParams/stack_name", "HDP")
+
+  @staticmethod
+  def get_stack_root():
+    """
+    Get the stack-specific install root directory
+    :return: stack_root
+    """
+    from resource_management.libraries.functions.default import default
+    stack_name = Script.get_stack_name()
+    return default("/configurations/cluster-env/stack_root", "/usr/{0}".format(stack_name.lower()))
 
   @staticmethod
   def get_stack_version():