You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/08 04:06:44 UTC

[9/9] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f33a250c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f33a250c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f33a250c

Branch: refs/heads/trunk
Commit: f33a250c0e7624b6cbc0a11ffce12506eaa95d9a
Parents: a795f38
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Jul 7 14:36:05 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Jul 7 23:00:23 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |   13 +
 .../libraries/functions/stack_tools.py          |   39 +
 .../libraries/script/script.py                  |   19 +-
 .../server/api/query/JpaPredicateVisitor.java   |    8 +-
 .../controller/ActionExecutionContext.java      |   26 +
 .../controller/AmbariActionExecutionHelper.java |   26 +-
 .../BlueprintConfigurationProcessor.java        |   59 +-
 .../ClusterStackVersionResourceProvider.java    |  163 ++-
 .../ambari/server/state/ConfigHelper.java       |   32 +
 .../ambari/server/topology/AmbariContext.java   |   18 +
 .../server/upgrade/UpgradeCatalog252.java       |   61 +
 .../package/alerts/alert_hive_metastore.py      |   11 +-
 .../package/alerts/alert_llap_app_status.py     |   12 +-
 .../package/alerts/alert_check_oozie_server.py  |    8 +-
 .../resources/host_scripts/alert_disk_space.py  |   10 +-
 .../host_scripts/alert_version_select.py        |   16 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |   16 +-
 .../HDP/2.0.6/properties/stack_features.json    |  852 +++++------
 .../HDP/2.0.6/properties/stack_tools.json       |   16 +-
 .../PERF/1.0/configuration/cluster-env.xml      |   16 +-
 .../PERF/1.0/properties/stack_features.json     |   38 +-
 .../stacks/PERF/1.0/properties/stack_tools.json |   16 +-
 .../BlueprintConfigurationProcessorTest.java    |   41 +-
 ...ClusterStackVersionResourceProviderTest.java |    4 +-
 .../ClusterConfigurationRequestTest.java        |   60 +-
 .../common-services/configs/hawq_default.json   |    6 +-
 .../python/host_scripts/TestAlertDiskSpace.py   |   16 +-
 .../2.5/configs/ranger-admin-default.json       |  990 ++++++-------
 .../2.5/configs/ranger-admin-secured.json       | 1108 +++++++--------
 .../stacks/2.5/configs/ranger-kms-default.json  | 1158 +++++++--------
 .../stacks/2.5/configs/ranger-kms-secured.json  | 1320 +++++++++---------
 .../2.6/configs/ranger-admin-default.json       |  953 +++++++------
 .../2.6/configs/ranger-admin-secured.json       | 1066 +++++++-------
 .../src/test/python/stacks/utils/RMFTestCase.py |    8 +-
 34 files changed, 4353 insertions(+), 3852 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index cbd32e7..576c138 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -43,6 +43,12 @@ def check_stack_feature(stack_feature, stack_version):
 
   from resource_management.libraries.functions.default import default
   from resource_management.libraries.functions.version import compare_versions
+
+  stack_name = default("/hostLevelParams/stack_name", None)
+  if stack_name is None:
+    Logger.warning("Cannot find the stack name in the command. Stack features cannot be loaded")
+    return False
+
   stack_features_config = default("/configurations/cluster-env/stack_features", None)
 
   if not stack_version:
@@ -51,6 +57,13 @@ def check_stack_feature(stack_feature, stack_version):
 
   if stack_features_config:
     data = json.loads(stack_features_config)
+
+    if stack_name not in data:
+      Logger.warning("Cannot find stack features for the stack named {0}".format(stack_name))
+      return False
+
+    data = data[stack_name]
+
     for feature in data["stack_features"]:
       if feature["name"] == stack_feature:
         if "min_version" in feature:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 02ae62d..420ae11 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -39,15 +39,33 @@ def get_stack_tool(name):
   :return: tool_name, tool_path, tool_package
   """
   from resource_management.libraries.functions.default import default
+
+  stack_name = default("/hostLevelParams/stack_name", None)
+  if stack_name is None:
+    Logger.warning("Cannot find the stack name in the command. Stack tools cannot be loaded")
+    return (None, None, None)
+
   stack_tools = None
   stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
   if stack_tools_config:
     stack_tools = json.loads(stack_tools_config)
 
+  if stack_tools is None:
+    Logger.warning("The stack tools could not be found in cluster-env")
+    return (None, None, None)
+
+  if stack_name not in stack_tools:
+    Logger.warning("Cannot find stack tools for the stack named {0}".format(stack_name))
+    return (None, None, None)
+
+  # load the stack tooks keyed by the stack name
+  stack_tools = stack_tools[stack_name]
+
   if not stack_tools or not name or name.lower() not in stack_tools:
     Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))
     return (None, None, None)
 
+
   tool_config = stack_tools[name.lower()]
 
   # Return fixed length (tool_name, tool_path tool_package) tuple
@@ -81,3 +99,24 @@ def get_stack_tool_package(name):
   """
   (tool_name, tool_path, tool_package) = get_stack_tool(name)
   return tool_package
+
+
+def get_stack_root(stack_name, stack_root_json):
+  """
+  Get the stack-specific install root directory from the raw, JSON-escaped properties.
+  :param stack_name:
+  :param stack_root_json:
+  :return: stack_root
+  """
+  from resource_management.libraries.functions.default import default
+
+  if stack_root_json is None:
+    return "/usr/{0}".format(stack_name.lower())
+
+  stack_root = json.loads(stack_root_json)
+
+  if stack_name not in stack_root:
+    Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+    return "/usr/{0}".format(stack_name.lower())
+
+  return stack_root[stack_name]

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 2c56a13..2b374c5 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -597,7 +597,11 @@ class Script(object):
     :return: a stack name or None
     """
     from resource_management.libraries.functions.default import default
-    return default("/hostLevelParams/stack_name", "HDP")
+    stack_name = default("/hostLevelParams/stack_name", None)
+    if stack_name is None:
+      stack_name = default("/configurations/cluster-env/stack_name", "HDP")
+
+    return stack_name
 
   @staticmethod
   def get_stack_root():
@@ -607,7 +611,18 @@ class Script(object):
     """
     from resource_management.libraries.functions.default import default
     stack_name = Script.get_stack_name()
-    return default("/configurations/cluster-env/stack_root", "/usr/{0}".format(stack_name.lower()))
+    stack_root_json = default("/configurations/cluster-env/stack_root", None)
+
+    if stack_root_json is None:
+      return "/usr/{0}".format(stack_name.lower())
+
+    stack_root = json.loads(stack_root_json)
+
+    if stack_name not in stack_root:
+      Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+      return "/usr/{0}".format(stack_name.lower())
+
+    return stack_root[stack_name]
 
   @staticmethod
   def get_stack_version():

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
index 984dc3b..84e9dd9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/JpaPredicateVisitor.java
@@ -63,11 +63,6 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
   final private CriteriaQuery<T> m_query;
 
   /**
-   * The entity class that the root of the query is built from.
-   */
-  final private Class<T> m_entityClass;
-
-  /**
    * The last calculated predicate.
    */
   private javax.persistence.criteria.Predicate m_lastPredicate = null;
@@ -92,7 +87,6 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
   public JpaPredicateVisitor(EntityManager entityManager, Class<T> entityClass) {
     m_entityManager = entityManager;
     m_builder = m_entityManager.getCriteriaBuilder();
-    m_entityClass = entityClass;
     m_query = m_builder.createQuery(entityClass);
     m_root = m_query.from(entityClass);
   }
@@ -178,7 +172,7 @@ public abstract class JpaPredicateVisitor<T> implements PredicateVisitor {
     }
 
     String operator = predicate.getOperator();
-    Comparable<?> value = predicate.getValue();
+    Comparable value = predicate.getValue();
 
     // convert string to enum for proper JPA comparisons
     if (lastSingularAttribute != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 42a95c0..34d6db9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -27,6 +27,7 @@ import org.apache.ambari.server.actionmanager.TargetHostType;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.controller.internal.RequestOperationLevel;
 import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+import org.apache.ambari.server.state.StackId;
 
 /**
  * The context required to create tasks and stages for a custom action
@@ -43,6 +44,7 @@ public class ActionExecutionContext {
   private String expectedComponentName;
   private boolean hostsInMaintenanceModeExcluded = true;
   private boolean allowRetry = false;
+  private StackId stackId;
 
   private List<ExecutionCommandVisitor> m_visitors = new ArrayList<>();
 
@@ -173,6 +175,30 @@ public class ActionExecutionContext {
   }
 
   /**
+   * Gets the stack to use for generating stack-associated values for a command.
+   * In some cases the cluster's stack is not the correct one to use, such as
+   * when distributing a repository.
+   *
+   * @return the stackId the stack to use when generating stack-specific content
+   *         for the command.
+   */
+  public StackId getStackId() {
+    return stackId;
+  }
+
+  /**
+   * Sets the stack to use for generating stack-associated values for a command.
+   * In some cases the cluster's stack is not the correct one to use, such as
+   * when distributing a repository.
+   *
+   * @param stackId
+   *          the stackId to use for stack-based properties on the command.
+   */
+  public void setStackId(StackId stackId) {
+    this.stackId = stackId;
+  }
+
+  /**
    * Adds a command visitor that will be invoked after a command is created.  Provides access
    * to the command.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 8f522b0..391daa9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -465,7 +465,10 @@ public class AmbariActionExecutionHelper {
 
       if (StringUtils.isNotBlank(serviceName)) {
         Service service = cluster.getService(serviceName);
-        addRepoInfoToHostLevelParams(service.getDesiredRepositoryVersion(), hostLevelParams, hostName);
+        addRepoInfoToHostLevelParams(actionContext, service.getDesiredRepositoryVersion(),
+            hostLevelParams, hostName);
+      } else {
+        addRepoInfoToHostLevelParams(actionContext, null, hostLevelParams, hostName);
       }
 
 
@@ -529,9 +532,19 @@ public class AmbariActionExecutionHelper {
   *
   * */
 
-  private void addRepoInfoToHostLevelParams(RepositoryVersionEntity repositoryVersion,
-      Map<String, String> hostLevelParams, String hostName) throws AmbariException {
+  private void addRepoInfoToHostLevelParams(ActionExecutionContext actionContext,
+      RepositoryVersionEntity repositoryVersion, Map<String, String> hostLevelParams,
+      String hostName) throws AmbariException {
+
+    // if the repo is null, see if any values from the context should go on the
+    // host params and then return
     if (null == repositoryVersion) {
+      if (null != actionContext.getStackId()) {
+        StackId stackId = actionContext.getStackId();
+        hostLevelParams.put(STACK_NAME, stackId.getStackName());
+        hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
+      }
+
       return;
     }
 
@@ -557,7 +570,10 @@ public class AmbariActionExecutionHelper {
 
     hostLevelParams.put(REPO_INFO, rootJsonObject.toString());
 
-    hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
-    hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
+    // set the host level params if not already set by whoever is creating this command
+    if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
+      hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
+      hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index e93b2f7..37284be 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -36,7 +36,9 @@ import java.util.regex.Pattern;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
 import org.apache.ambari.server.topology.Blueprint;
@@ -356,7 +358,7 @@ public class BlueprintConfigurationProcessor {
             final String originalValue = typeMap.get(propertyName);
             final String updatedValue =
               updater.updateForClusterCreate(propertyName, originalValue, clusterProps, clusterTopology);
-            
+
             if(updatedValue == null ) {
               continue;
             }
@@ -419,6 +421,7 @@ public class BlueprintConfigurationProcessor {
     }
 
     // Explicitly set any properties that are required but not currently provided in the stack definition.
+    setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
     setRetryConfiguration(clusterConfig, configTypesUpdated);
     setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
     addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
@@ -531,7 +534,7 @@ public class BlueprintConfigurationProcessor {
     try {
       String clusterName = clusterTopology.getAmbariContext().getClusterName(clusterTopology.getClusterId());
       Cluster cluster = clusterTopology.getAmbariContext().getController().getClusters().getCluster(clusterName);
-      authToLocalPerClusterMap = new HashMap<Long, Set<String>>();
+      authToLocalPerClusterMap = new HashMap<>();
       authToLocalPerClusterMap.put(Long.valueOf(clusterTopology.getClusterId()), clusterTopology.getAmbariContext().getController().getKerberosHelper().getKerberosDescriptor(cluster).getAllAuthToLocalProperties());
       } catch (AmbariException e) {
         LOG.error("Error while getting authToLocal properties. ", e);
@@ -2186,8 +2189,9 @@ public class BlueprintConfigurationProcessor {
       StringBuilder sb = new StringBuilder();
 
       Matcher m = REGEX_IN_BRACKETS.matcher(origValue);
-      if (m.matches())
+      if (m.matches()) {
         origValue = m.group("INNER");
+      }
 
       if (origValue != null) {
         sb.append("[");
@@ -2195,8 +2199,9 @@ public class BlueprintConfigurationProcessor {
         for (String value : origValue.split(",")) {
 
           m = REGEX_IN_QUOTES.matcher(value);
-          if (m.matches())
+          if (m.matches()) {
             value = m.group("INNER");
+          }
 
           if (!isFirst) {
             sb.append(",");
@@ -2230,6 +2235,7 @@ public class BlueprintConfigurationProcessor {
    */
   private static class OriginalValuePropertyUpdater implements PropertyUpdater {
 
+    @Override
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
@@ -2950,6 +2956,49 @@ public class BlueprintConfigurationProcessor {
 
 
   /**
+   * Sets the read-only properties for stack features & tools, overriding
+   * anything provided in the blueprint.
+   *
+   * @param configuration
+   *          the configuration to update with values from the stack.
+   * @param configTypesUpdated
+   *          the list of configuration types updated (cluster-env will be added
+   *          to this).
+   * @throws ConfigurationTopologyException
+   */
+  private void setStackToolsAndFeatures(Configuration configuration, Set<String> configTypesUpdated)
+      throws ConfigurationTopologyException {
+    ConfigHelper configHelper = clusterTopology.getAmbariContext().getConfigHelper();
+    Stack stack = clusterTopology.getBlueprint().getStack();
+    String stackName = stack.getName();
+    String stackVersion = stack.getVersion();
+
+    StackId stackId = new StackId(stackName, stackVersion);
+
+    Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_NAME_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY, ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+    try {
+      Map<String, Map<String, String>> defaultStackProperties = configHelper.getDefaultStackProperties(stackId);
+      Map<String,String> clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME);
+
+      for( String property : properties ){
+        if (defaultStackProperties.containsKey(property)) {
+          configuration.setProperty(CLUSTER_ENV_CONFIG_TYPE_NAME, property,
+              clusterEnvDefaultProperties.get(property));
+
+          // make sure to include the configuration type as being updated
+          configTypesUpdated.add(CLUSTER_ENV_CONFIG_TYPE_NAME);
+        }
+      }
+    } catch( AmbariException ambariException ){
+      throw new ConfigurationTopologyException("Unable to retrieve the stack tools and features",
+          ambariException);
+    }
+  }
+
+  /**
    * Ensure that the specified property exists.
    * If not, set a default value.
    *
@@ -3099,7 +3148,7 @@ public class BlueprintConfigurationProcessor {
 
     @Override
     public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
-      return !(this.propertyConfigType.equals(configType) &&
+      return !(propertyConfigType.equals(configType) &&
              this.propertyName.equals(propertyName));
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 93c02be..c4fce8a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -67,11 +67,13 @@ import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.RepositoryVersionState;
-import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
@@ -83,6 +85,7 @@ import org.apache.commons.lang.math.NumberUtils;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import com.google.gson.Gson;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
 import com.google.inject.persist.Transactional;
@@ -171,12 +174,20 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   @Inject
   private static RepositoryVersionHelper repoVersionHelper;
 
-
+  @Inject
+  private static Gson gson;
 
   @Inject
   private static Provider<Clusters> clusters;
 
   /**
+   * Used for updating the existing stack tools with those of the stack being
+   * distributed.
+   */
+  @Inject
+  private static Provider<ConfigHelper> configHelperProvider;
+
+  /**
    * Constructor.
    */
   public ClusterStackVersionResourceProvider(
@@ -287,8 +298,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     String clName;
     final String desiredRepoVersion;
-    String stackName;
-    String stackVersion;
 
     Map<String, Object> propertyMap = iterator.next();
 
@@ -327,30 +336,30 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
           cluster.getClusterName(), entity.getDirection().getText(false)));
     }
 
-    Set<StackId> stackIds = new HashSet<>();
-    if (propertyMap.containsKey(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID) &&
-            propertyMap.containsKey(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID)) {
-      stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
-      stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
-      StackId stackId = new StackId(stackName, stackVersion);
-      if (! ami.isSupportedStack(stackName, stackVersion)) {
-        throw new NoSuchParentResourceException(String.format("Stack %s is not supported",
-                stackId));
-      }
-      stackIds.add(stackId);
-    } else { // Using stack that is current for cluster
-      for (Service service : cluster.getServices().values()) {
-        stackIds.add(service.getDesiredStackId());
-      }
+    String stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
+    String stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+    if (StringUtils.isBlank(stackName) || StringUtils.isBlank(stackVersion)) {
+      String message = String.format(
+          "Both the %s and %s properties are required when distributing a new stack",
+          CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+
+      throw new SystemException(message);
     }
 
-    if (stackIds.size() > 1) {
-      throw new SystemException("Could not determine stack to add out of " + StringUtils.join(stackIds, ','));
+    StackId stackId = new StackId(stackName, stackVersion);
+
+    if (!ami.isSupportedStack(stackName, stackVersion)) {
+      throw new NoSuchParentResourceException(String.format("Stack %s is not supported", stackId));
     }
 
-    StackId stackId = stackIds.iterator().next();
-    stackName = stackId.getStackName();
-    stackVersion = stackId.getStackVersion();
+    // bootstrap the stack tools if necessary for the stack which is being
+    // distributed
+    try {
+      bootstrapStackTools(stackId, cluster);
+    } catch (AmbariException ambariException) {
+      throw new SystemException("Unable to modify stack tools for new stack being distributed",
+          ambariException);
+    }
 
     RepositoryVersionEntity repoVersionEntity = repositoryVersionDAO.findByStackAndVersion(
         stackId, desiredRepoVersion);
@@ -580,6 +589,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     }
 
     // determine packages for all services that are installed on host
+    List<ServiceOsSpecific.Package> packages = new ArrayList<>();
     Set<String> servicesOnHost = new HashSet<>();
     List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
     for (ServiceComponentHost component : components) {
@@ -600,16 +610,15 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     RequestResourceFilter filter = new RequestResourceFilter(null, null,
             Collections.singletonList(host.getHostName()));
 
-    ActionExecutionContext actionContext = new ActionExecutionContext(
-            cluster.getClusterName(), INSTALL_PACKAGES_ACTION,
-            Collections.singletonList(filter),
-            roleParams);
+    ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
+        INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), roleParams);
+
+    actionContext.setStackId(stackId);
     actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
 
     repoVersionHelper.addCommandRepository(actionContext, osFamily, repoVersion, repoInfo);
 
     return actionContext;
-
   }
 
 
@@ -698,4 +707,100 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
 
+  /**
+   * Ensures that the stack tools and stack features are set on
+   * {@link ConfigHelper#CLUSTER_ENV} for the stack of the repository being
+   * distributed. This step ensures that the new repository can be distributed
+   * with the correct tools.
+   * <p/>
+   * If the cluster's current stack name matches that of the new stack or the
+   * new stack's tools are already added in the configuration, then this method
+   * will not change anything.
+   *
+   * @param stackId
+   *          the stack of the repository being distributed (not {@code null}).
+   * @param cluster
+   *          the cluster the new stack/repo is being distributed for (not
+   *          {@code null}).
+   * @throws AmbariException
+   */
+  private void bootstrapStackTools(StackId stackId, Cluster cluster) throws AmbariException {
+    // if the stack name is the same as the cluster's current stack name, then
+    // there's no work to do
+    if (StringUtils.equals(stackId.getStackName(),
+        cluster.getCurrentStackVersion().getStackName())) {
+      return;
+    }
+
+    ConfigHelper configHelper = configHelperProvider.get();
+
+    // get the stack tools/features for the stack being distributed
+    Map<String, Map<String, String>> defaultStackConfigurationsByType = configHelper.getDefaultStackProperties(stackId);
+
+    Map<String, String> clusterEnvDefaults = defaultStackConfigurationsByType.get(
+        ConfigHelper.CLUSTER_ENV);
+
+    Config clusterEnv = cluster.getDesiredConfigByType(ConfigHelper.CLUSTER_ENV);
+    Map<String, String> clusterEnvProperties = clusterEnv.getProperties();
+
+    // the 3 properties we need to check and update
+    Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+    // any updates are stored here and merged into the existing config type
+    Map<String, String> updatedProperties = new HashMap<>();
+
+    for (String property : properties) {
+      // determine if the property exists in the stack being distributed (it
+      // kind of has to, but we'll be safe if it's not found)
+      String newStackDefaultJson = clusterEnvDefaults.get(property);
+      if (StringUtils.isBlank(newStackDefaultJson)) {
+        continue;
+      }
+
+      String existingPropertyJson = clusterEnvProperties.get(property);
+
+      // if the stack tools/features property doesn't exist, then just set the
+      // one from the new stack
+      if (StringUtils.isBlank(existingPropertyJson)) {
+        updatedProperties.put(property, newStackDefaultJson);
+        continue;
+      }
+
+      // now is the hard part - we need to check to see if the new stack tools
+      // exists alongside the current tools and if it doesn't, then add the new
+      // tools in
+      final Map<String, Object> existingJson;
+      final Map<String, ?> newStackJsonAsObject;
+      if (StringUtils.equals(property, ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY)) {
+        existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson, Map.class);
+        newStackJsonAsObject = gson.<Map<String, String>> fromJson(newStackDefaultJson, Map.class);
+      } else {
+        existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson,
+            Map.class);
+
+        newStackJsonAsObject = gson.<Map<String, Map<Object, Object>>> fromJson(newStackDefaultJson,
+            Map.class);
+      }
+
+      if (existingJson.keySet().contains(stackId.getStackName())) {
+        continue;
+      }
+
+      existingJson.put(stackId.getStackName(), newStackJsonAsObject.get(stackId.getStackName()));
+
+      String newJson = gson.toJson(existingJson);
+      updatedProperties.put(property, newJson);
+    }
+
+    if (!updatedProperties.isEmpty()) {
+      AmbariManagementController amc = getManagementController();
+      String serviceNote = String.format(
+          "Adding stack tools for %s while distributing a new repository", stackId.toString());
+
+      configHelper.updateConfigType(cluster, stackId, amc, clusterEnv.getType(), updatedProperties,
+          null, amc.getAuthName(), serviceNote);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 9f75bf9..a3a676d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -88,8 +88,10 @@ public class ConfigHelper {
   public static final String CLUSTER_ENV_RETRY_COMMANDS = "commands_to_retry";
   public static final String CLUSTER_ENV_RETRY_MAX_TIME_IN_SEC = "command_retry_max_time_in_sec";
   public static final String COMMAND_RETRY_MAX_TIME_IN_SEC_DEFAULT = "600";
+  public static final String CLUSTER_ENV_STACK_NAME_PROPERTY = "stack_name";
   public static final String CLUSTER_ENV_STACK_FEATURES_PROPERTY = "stack_features";
   public static final String CLUSTER_ENV_STACK_TOOLS_PROPERTY = "stack_tools";
+  public static final String CLUSTER_ENV_STACK_ROOT_PROPERTY = "stack_root";
 
   public static final String HTTP_ONLY = "HTTP_ONLY";
   public static final String HTTPS_ONLY = "HTTPS_ONLY";
@@ -1148,6 +1150,36 @@ public class ConfigHelper {
    *
    * @param stack
    *          the stack to pull stack-values from (not {@code null})
+   * @return a mapping of configuration type to map of key/value pairs for the
+   *         default configurations.
+   * @throws AmbariException
+   */
+  public Map<String, Map<String, String>> getDefaultStackProperties(StackId stack)
+      throws AmbariException {
+    Map<String, Map<String, String>> defaultPropertiesByType = new HashMap<>();
+
+    // populate the stack (non-service related) properties
+    Set<org.apache.ambari.server.state.PropertyInfo> stackConfigurationProperties = ambariMetaInfo.getStackProperties(
+        stack.getStackName(), stack.getStackVersion());
+
+    for (PropertyInfo stackDefaultProperty : stackConfigurationProperties) {
+      String type = ConfigHelper.fileNameToConfigType(stackDefaultProperty.getFilename());
+
+      if (!defaultPropertiesByType.containsKey(type)) {
+        defaultPropertiesByType.put(type, new HashMap<String, String>());
+      }
+
+      defaultPropertiesByType.get(type).put(stackDefaultProperty.getName(),
+          stackDefaultProperty.getValue());
+    }
+
+    return defaultPropertiesByType;
+  }
+
+  /**
+   *
+   * @param stack
+   *          the stack to pull stack-values from (not {@code null})
    * @param serviceName
    *          the service name {@code null}).
    * @return a mapping of configuration type to map of key/value pairs for the

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 0467b9b..9b64edc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -69,6 +69,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
@@ -80,6 +81,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
+import com.google.inject.Provider;
 
 
 /**
@@ -100,6 +102,12 @@ public class AmbariContext {
   @Inject
   ConfigFactory configFactory;
 
+  /**
+   * Used for getting configuration property values from stack and services.
+   */
+  @Inject
+  private Provider<ConfigHelper> configHelper;
+
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
   //todo: task id's.  Use existing mechanism for getting next task id sequence
@@ -674,6 +682,16 @@ public class AmbariContext {
     return String.format("%s:%s", bpName, hostGroupName);
   }
 
+  /**
+   * Gets an instance of {@link ConfigHelper} for classes which are not
+   * dependency injected.
+   *
+   * @return a {@link ConfigHelper} instance.
+   */
+  public ConfigHelper getConfigHelper() {
+    return configHelper.get();
+  }
+
   private synchronized HostResourceProvider getHostResourceProvider() {
     if (hostResourceProvider == null) {
       hostResourceProvider = (HostResourceProvider)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 74f8f35..fa3aea3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -18,10 +18,20 @@
 package org.apache.ambari.server.upgrade;
 
 import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
 
+import com.google.common.collect.Sets;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 
@@ -33,6 +43,8 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
   static final String CLUSTERCONFIG_TABLE = "clusterconfig";
   static final String SERVICE_DELETED_COLUMN = "service_deleted";
 
+  private static final String CLUSTER_ENV = "cluster-env";
+
   /**
    * Constructor.
    *
@@ -79,6 +91,7 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
+    resetStackToolsAndFeatures();
   }
 
   /**
@@ -91,4 +104,52 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
     dbAccessor.addColumn(CLUSTERCONFIG_TABLE,
         new DBColumnInfo(SERVICE_DELETED_COLUMN, Short.class, null, 0, false));
   }
+
+  /**
+   * Resets the following properties in {@code cluster-env} to their new
+   * defaults:
+   * <ul>
+   * <li>stack_root
+   * <li>stack_tools
+   * <li>stack_features
+   * <ul>
+   *
+   * @throws AmbariException
+   */
+  private void resetStackToolsAndFeatures() throws AmbariException {
+    Set<String> propertiesToReset = Sets.newHashSet("stack_tools", "stack_features", "stack_root");
+
+    Clusters clusters = injector.getInstance(Clusters.class);
+    ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+
+    Map<String, Cluster> clusterMap = clusters.getClusters();
+    for (Cluster cluster : clusterMap.values()) {
+      Config clusterEnv = cluster.getDesiredConfigByType(CLUSTER_ENV);
+      if (null == clusterEnv) {
+        continue;
+      }
+
+      Map<String, String> newStackProperties = new HashMap<>();
+      Set<PropertyInfo> stackProperties = configHelper.getStackProperties(cluster);
+      if (null == stackProperties) {
+        continue;
+      }
+
+      for (PropertyInfo propertyInfo : stackProperties) {
+        String fileName = propertyInfo.getFilename();
+        if (StringUtils.isEmpty(fileName)) {
+          continue;
+        }
+
+        if (StringUtils.equals(ConfigHelper.fileNameToConfigType(fileName), CLUSTER_ENV)) {
+          String stackPropertyName = propertyInfo.getName();
+          if (propertiesToReset.contains(stackPropertyName)) {
+            newStackProperties.put(stackPropertyName, propertyInfo.getValue());
+          }
+        }
+      }
+
+      updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, newStackProperties, true, false);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
index 32df7d3..5b4fd68 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
@@ -27,6 +27,7 @@ import logging
 from resource_management.core import global_lock
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_tools
 from resource_management.core.resources import Execute
 from resource_management.core.signal_utils import TerminateStrategy
 from ambari_commons.os_check import OSConst
@@ -56,6 +57,7 @@ SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
 SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
 SMOKEUSER_DEFAULT = 'ambari-qa'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 
 HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
@@ -78,7 +80,7 @@ def get_tokens():
   """
   return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
     HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-    STACK_ROOT)
+    STACK_NAME, STACK_ROOT)
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def get_tokens():
@@ -175,9 +177,10 @@ def execute(configurations={}, parameters={}, host_name=None):
     bin_dir = HIVE_BIN_DIR_LEGACY
 
 
-    if STACK_ROOT in configurations:
-      hive_conf_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/conf")
-      hive_bin_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/bin")
+    if STACK_NAME in configurations and STACK_ROOT in configurations:
+      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
+      hive_conf_dir = stack_root + format("/current/hive-metastore/conf")
+      hive_bin_dir = stack_root + format("/current/hive-metastore/bin")
 
       if os.path.exists(hive_conf_dir):
         conf_dir = hive_conf_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
index 98d1899..e46c896 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
@@ -26,7 +26,7 @@ import subprocess
 
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
-from ambari_commons.os_check import OSConst
+from resource_management.libraries.functions import stack_tools
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from resource_management.core import shell
 from resource_management.core.resources import Execute
@@ -58,6 +58,7 @@ HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
 HIVE_USER_KEY = '{{hive-env/hive_user}}'
 HIVE_USER_DEFAULT = 'default.smoke.user'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 STACK_ROOT_DEFAULT = Script.get_stack_root()
 
@@ -88,7 +89,7 @@ def get_tokens():
   to build the dictionary passed into execute
   """
   return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_PRINCIPAL_KEY, HIVE_PRINCIPAL_KEYTAB_KEY,
-          HIVE_USER_KEY, STACK_ROOT, LLAP_APP_NAME_KEY)
+          HIVE_USER_KEY, STACK_NAME, STACK_ROOT, LLAP_APP_NAME_KEY)
 
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -159,8 +160,11 @@ def execute(configurations={}, parameters={}, host_name=None):
 
 
     start_time = time.time()
-    if STACK_ROOT in configurations:
-      llap_status_cmd = configurations[STACK_ROOT] + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
+    if STACK_NAME in configurations and STACK_ROOT in configurations:
+      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME],
+        configurations[STACK_ROOT])
+
+      llap_status_cmd = stack_root + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
     else:
       llap_status_cmd = STACK_ROOT_DEFAULT + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
index 0e9fe74..54eef18 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
@@ -26,6 +26,7 @@ from resource_management.core.resources import Execute
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions import get_klist_path
+from resource_management.libraries.functions import stack_tools
 from ambari_commons.os_check import OSConst, OSCheck
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from urlparse import urlparse
@@ -66,6 +67,7 @@ USER_PRINCIPAL_DEFAULT = 'oozie@EXAMPLE.COM'
 # default user
 USER_DEFAULT = 'oozie'
 
+STACK_NAME_KEY = '{{cluster-env/stack_name}}'
 STACK_ROOT_KEY = '{{cluster-env/stack_root}}'
 STACK_ROOT_DEFAULT = '/usr/hdp'
 
@@ -86,7 +88,7 @@ def get_tokens():
   to build the dictionary passed into execute
   """
   return (OOZIE_URL_KEY, USER_PRINCIPAL_KEY, SECURITY_ENABLED, USER_KEYTAB_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_ROOT_KEY)
+          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_NAME_KEY, STACK_ROOT_KEY)
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def get_check_command(oozie_url, host_name, configurations):
@@ -158,8 +160,8 @@ def get_check_command(oozie_url, host_name, configurations, parameters, only_kin
 
   # Configure stack root
   stack_root = STACK_ROOT_DEFAULT
-  if STACK_ROOT_KEY in configurations:
-    stack_root = configurations[STACK_ROOT_KEY].lower()
+  if STACK_NAME_KEY in configurations and STACK_ROOT_KEY in configurations:
+    stack_root = stack_tools.get_stack_root(configurations[STACK_NAME_KEY], configurations[STACK_ROOT_KEY]).lower()
 
   # oozie configuration directory using a symlink
   oozie_config_directory = OOZIE_CONF_DIR.replace(STACK_ROOT_PATTERN, stack_root)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
index 4c5834f..f3c6406 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
@@ -23,6 +23,7 @@ import os
 import platform
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
+from resource_management.libraries.functions import stack_tools
 
 DiskInfo = collections.namedtuple('DiskInfo', 'total used free path')
 
@@ -36,6 +37,7 @@ MIN_FREE_SPACE_DEFAULT = 5000000000L
 PERCENT_USED_WARNING_DEFAULT = 50
 PERCENT_USED_CRITICAL_DEFAULT = 80
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 
 def get_tokens():
@@ -43,7 +45,7 @@ def get_tokens():
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (STACK_ROOT, )
+  return (STACK_NAME, STACK_ROOT)
 
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -64,10 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
   if configurations is None:
     return (('UNKNOWN', ['There were no configurations supplied to the script.']))
 
-  if not STACK_ROOT in configurations:
-    return (('STACK_ROOT', ['cluster-env/stack_root is not specified']))
+  if not STACK_NAME in configurations or not STACK_ROOT in configurations:
+    return (('STACK_ROOT', ['cluster-env/stack_name and cluster-env/stack_root are required']))
 
-  path = configurations[STACK_ROOT]
+  path = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
 
   try:
     disk_usage = _get_disk_usage(path)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/host_scripts/alert_version_select.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_version_select.py b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
index 0ce79e7..f54ccad 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_version_select.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
@@ -31,6 +31,7 @@ RESULT_STATE_WARNING = 'WARNING'
 RESULT_STATE_CRITICAL = 'CRITICAL'
 RESULT_STATE_UNKNOWN = 'UNKNOWN'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_TOOLS = '{{cluster-env/stack_tools}}'
 
 
@@ -42,7 +43,7 @@ def get_tokens():
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (STACK_TOOLS,)
+  return (STACK_NAME, STACK_TOOLS)
 
 
 def execute(configurations={}, parameters={}, host_name=None):
@@ -65,8 +66,10 @@ def execute(configurations={}, parameters={}, host_name=None):
     if STACK_TOOLS not in configurations:
       return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(STACK_TOOLS)])
 
+    stack_name = Script.get_stack_name()
+
     # Of the form,
-    # { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] }
+    # { "HDP" : { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] } }
     stack_tools_str = configurations[STACK_TOOLS]
 
     if stack_tools_str is None:
@@ -75,6 +78,7 @@ def execute(configurations={}, parameters={}, host_name=None):
     distro_select = "unknown-distro-select"
     try:
       stack_tools = json.loads(stack_tools_str)
+      stack_tools = stack_tools[stack_name]
       distro_select = stack_tools["stack_selector"][0]
     except:
       pass
@@ -87,18 +91,18 @@ def execute(configurations={}, parameters={}, host_name=None):
       (code, out, versions) = unsafe_get_stack_versions()
 
       if code == 0:
-        msg.append("Ok. {0}".format(distro_select))
+        msg.append("{0} ".format(distro_select))
         if versions is not None and type(versions) is list and len(versions) > 0:
-          msg.append("Versions: {0}".format(", ".join(versions)))
+          msg.append("reported the following versions: {0}".format(", ".join(versions)))
         return (RESULT_STATE_OK, ["\n".join(msg)])
       else:
-        msg.append("Failed, check dir {0} for unexpected contents.".format(stack_root_dir))
+        msg.append("{0} could not properly read {1}. Check this directory for unexpected contents.".format(distro_select, stack_root_dir))
         if out is not None:
           msg.append(out)
 
         return (RESULT_STATE_CRITICAL, ["\n".join(msg)])
     else:
-      msg.append("Ok. No stack root {0} to check.".format(stack_root_dir))
+      msg.append("No stack root {0} to check.".format(stack_root_dir))
       return (RESULT_STATE_OK, ["\n".join(msg)])
   except Exception, e:
     return (RESULT_STATE_CRITICAL, [e.message])

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index f7d5de5..e6ec285 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -220,6 +220,18 @@ gpgcheck=0</value>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
+  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_name</name>
+    <value>HDP</value>
+    <description>The name of the stack.</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
     <name>stack_tools</name>
@@ -252,8 +264,8 @@ gpgcheck=0</value>
   </property>
   <property>
     <name>stack_root</name>
-    <value>/usr/hdp</value>
-    <description>Stack root folder</description>
+    <value>{"HDP":"/usr/hdp"}</value>
+    <description>JSON which defines the stack root by stack name</description>
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index 878645b..31cf0c8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -1,427 +1,429 @@
 {
-  "stack_features": [
-    {
-      "name": "snappy",
-      "description": "Snappy compressor/decompressor support",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "lzo",
-      "description": "LZO libraries support",
-      "min_version": "2.2.1.0"
-    },
-    {
-      "name": "express_upgrade",
-      "description": "Express upgrade support",
-      "min_version": "2.1.0.0"
-    },
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "kafka_acl_migration_support",
-      "description": "ACL migration support",
-      "min_version": "2.3.4.0"
-    },
-    {
-      "name": "secure_zookeeper",
-      "description": "Protect ZNodes with SASL acl in secure clusters",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "datanode_non_root",
-      "description": "DataNode running as non-root support (AMBARI-7615)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "remove_ranger_hdfs_plugin_env",
-      "description": "HDFS removes Ranger env files (AMBARI-14299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger",
-      "description": "Ranger Service support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_tagsync_component",
-      "description": "Ranger Tagsync component support (AMBARI-14383)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix",
-      "description": "Phoenix Service support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "nfs",
-      "description": "NFS support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "tez_for_spark",
-      "description": "Tez dependency for Spark",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "timeline_state_store",
-      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "copy_tarball_to_hdfs",
-      "description": "Copy tarball to HDFS support (AMBARI-12113)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "spark_16plus",
-      "description": "Spark 1.6+",
-      "min_version": "2.4.0.0"
-    },
-    {
-      "name": "spark_thriftserver",
-      "description": "Spark Thrift Server",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "storm_kerberos",
-      "description": "Storm Kerberos support (AMBARI-7570)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "storm_ams",
-      "description": "Storm AMS integration (AMBARI-10710)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "create_kafka_broker_id",
-      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_listeners",
-      "description": "Kafka listeners (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_kerberos",
-      "description": "Kafka Kerberos support (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "pig_on_tez",
-      "description": "Pig on Tez support (AMBARI-7863)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_usersync_non_root",
-      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_audit_db_support",
-      "description": "Ranger Audit to DB support",
-      "min_version": "2.2.0.0",
-      "max_version": "2.4.99.99"
-    },
-    {
-      "name": "accumulo_kerberos_user_auth",
-      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "knox_versioned_data_dir",
-      "description": "Use versioned data dir for Knox (AMBARI-13164)",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "knox_sso_topology",
-      "description": "Knox SSO Topology support (AMBARI-13975)",
-      "min_version": "2.3.8.0"
-    },
-    {
-      "name": "atlas_rolling_upgrade",
-      "description": "Rolling upgrade support for Atlas",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "oozie_admin_user",
-      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_create_hive_tez_configs",
-      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_setup_shared_lib",
-      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_host_kerberos",
-      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "2.0.0.0"
-    },
-    {
-      "name": "falcon_extensions",
-      "description": "Falcon Extension",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_upgrade_schema",
-      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server_interactive",
-      "description": "Hive server interactive support (AMBARI-15573)",
-      "min_version": "2.5.0.0"
-     },
-    {
-      "name": "hive_webhcat_specific_configs",
-      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_purge_table",
-      "description": "Hive purge table support (AMBARI-12260)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server2_kerberized_env",
-      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-      "min_version": "2.2.3.0",
-      "max_version": "2.2.5.0"
-     },
-    {
-      "name": "hive_env_heapsize",
-      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_kms_hsm_support",
-      "description": "Ranger KMS HSM support (AMBARI-15752)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_log4j_support",
-      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kerberos_support",
-      "description": "Ranger Kerberos support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hive_metastore_site_support",
-      "description": "Hive Metastore site support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_usersync_password_jceks",
-      "description": "Saving Ranger Usersync credentials in jceks",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_install_infra_client",
-      "description": "Ambari Infra Service support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "falcon_atlas_support_2_3",
-      "description": "Falcon Atlas integration support for 2.3 stack",
-      "min_version": "2.3.99.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "falcon_atlas_support",
-      "description": "Falcon Atlas integration",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hbase_home_directory",
-      "description": "Hbase home directory in HDFS needed for HBASE backup",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy2",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "atlas_ranger_plugin_support",
-      "description": "Atlas Ranger plugin support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_conf_dir_in_path",
-      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
-      "min_version": "2.3.0.0",
-      "max_version": "2.4.99.99"
-    },
-    {
-      "name": "atlas_upgrade_support",
-      "description": "Atlas supports express and rolling upgrades",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "atlas_hook_support",
-      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_pid_support",
-      "description": "Ranger Service support pid generation AMBARI-16756",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kms_pid_support",
-      "description": "Ranger KMS Service support pid generation",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_admin_password_change",
-      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_setup_db_on_start",
-      "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "storm_metrics_apache_classes",
-      "description": "Metrics sink for Storm that uses Apache class names",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_java_opts_support",
-      "description": "Allow Spark to generate java-opts file",
-      "min_version": "2.2.0.0",
-      "max_version": "2.4.0.0"
-    },
-    {
-      "name": "atlas_hbase_setup",
-      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_hive_plugin_jdbc_url",
-      "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "zkfc_version_advertised",
-      "description": "ZKFC advertise version",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix_core_hdfs_site_required",
-      "description": "HDFS and CORE site required for Phoenix",
-      "max_version": "2.5.9.9"
-    },
-    {
-      "name": "ranger_tagsync_ssl_xml_support",
-      "description": "Ranger Tagsync ssl xml support.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "ranger_xml_configuration",
-      "description": "Ranger code base support xml configurations",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_ranger_plugin_support",
-      "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "yarn_ranger_plugin_support",
-      "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_solr_config_support",
-      "description": "Showing Ranger solrconfig.xml on UI",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "hive_interactive_atlas_hook_required",
-      "description": "Registering Atlas Hook for Hive Interactive.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "core_site_for_ranger_plugins",
-      "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "atlas_install_hook_package_support",
-      "description": "Stop installing packages from 2.6",
-      "max_version": "2.5.9.9"
-    },
-    {
-      "name": "atlas_hdfs_site_on_namenode_ha",
-      "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "hive_interactive_ga",
-      "description": "Hive Interactive GA support",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "secure_ranger_ssl_password",
-      "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "ranger_kms_ssl",
-      "description": "Ranger KMS SSL properties in ambari stack",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "nifi_encrypt_config",
-      "description": "Encrypt sensitive properties written to nifi property file",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "toolkit_config_update",
-      "description": "Support separate input and output for toolkit configuration",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "admin_toolkit_support",
-      "description": "Supports the nifi admin toolkit",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "tls_toolkit_san",
-      "description": "Support subject alternative name flag",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "nifi_jaas_conf_create",
-      "description": "Create NIFI jaas configuration when kerberos is enabled",
-      "min_version": "2.6.0.0"
-    }
-  ]
+  "HDP": {
+    "stack_features": [
+      {
+        "name": "snappy",
+        "description": "Snappy compressor/decompressor support",
+        "min_version": "2.0.0.0",
+        "max_version": "2.2.0.0"
+      },
+      {
+        "name": "lzo",
+        "description": "LZO libraries support",
+        "min_version": "2.2.1.0"
+      },
+      {
+        "name": "express_upgrade",
+        "description": "Express upgrade support",
+        "min_version": "2.1.0.0"
+      },
+      {
+        "name": "rolling_upgrade",
+        "description": "Rolling upgrade support",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "kafka_acl_migration_support",
+        "description": "ACL migration support",
+        "min_version": "2.3.4.0"
+      },
+      {
+        "name": "secure_zookeeper",
+        "description": "Protect ZNodes with SASL acl in secure clusters",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "config_versioning",
+        "description": "Configurable versions support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "datanode_non_root",
+        "description": "DataNode running as non-root support (AMBARI-7615)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "remove_ranger_hdfs_plugin_env",
+        "description": "HDFS removes Ranger env files (AMBARI-14299)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger",
+        "description": "Ranger Service support",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_tagsync_component",
+        "description": "Ranger Tagsync component support (AMBARI-14383)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "phoenix",
+        "description": "Phoenix Service support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "nfs",
+        "description": "NFS support",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "tez_for_spark",
+        "description": "Tez dependency for Spark",
+        "min_version": "2.2.0.0",
+        "max_version": "2.3.0.0"
+      },
+      {
+        "name": "timeline_state_store",
+        "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "copy_tarball_to_hdfs",
+        "description": "Copy tarball to HDFS support (AMBARI-12113)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "spark_16plus",
+        "description": "Spark 1.6+",
+        "min_version": "2.4.0.0"
+      },
+      {
+        "name": "spark_thriftserver",
+        "description": "Spark Thrift Server",
+        "min_version": "2.3.2.0"
+      },
+      {
+        "name": "storm_kerberos",
+        "description": "Storm Kerberos support (AMBARI-7570)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "storm_ams",
+        "description": "Storm AMS integration (AMBARI-10710)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "create_kafka_broker_id",
+        "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+        "min_version": "2.2.0.0",
+        "max_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_listeners",
+        "description": "Kafka listeners (AMBARI-10984)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_kerberos",
+        "description": "Kafka Kerberos support (AMBARI-10984)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "pig_on_tez",
+        "description": "Pig on Tez support (AMBARI-7863)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_usersync_non_root",
+        "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger_audit_db_support",
+        "description": "Ranger Audit to DB support",
+        "min_version": "2.2.0.0",
+        "max_version": "2.4.99.99"
+      },
+      {
+        "name": "accumulo_kerberos_user_auth",
+        "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "knox_versioned_data_dir",
+        "description": "Use versioned data dir for Knox (AMBARI-13164)",
+        "min_version": "2.3.2.0"
+      },
+      {
+        "name": "knox_sso_topology",
+        "description": "Knox SSO Topology support (AMBARI-13975)",
+        "min_version": "2.3.8.0"
+      },
+      {
+        "name": "atlas_rolling_upgrade",
+        "description": "Rolling upgrade support for Atlas",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "oozie_admin_user",
+        "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_create_hive_tez_configs",
+        "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_setup_shared_lib",
+        "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "oozie_host_kerberos",
+        "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+        "min_version": "2.0.0.0"
+      },
+      {
+        "name": "falcon_extensions",
+        "description": "Falcon Extension",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_metastore_upgrade_schema",
+        "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_server_interactive",
+        "description": "Hive server interactive support (AMBARI-15573)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_webhcat_specific_configs",
+        "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_purge_table",
+        "description": "Hive purge table support (AMBARI-12260)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "hive_server2_kerberized_env",
+        "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+        "min_version": "2.2.3.0",
+        "max_version": "2.2.5.0"
+      },
+      {
+        "name": "hive_env_heapsize",
+        "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+        "min_version": "2.2.0.0"
+      },
+      {
+        "name": "ranger_kms_hsm_support",
+        "description": "Ranger KMS HSM support (AMBARI-15752)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_log4j_support",
+        "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_kerberos_support",
+        "description": "Ranger Kerberos support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hive_metastore_site_support",
+        "description": "Hive Metastore site support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_usersync_password_jceks",
+        "description": "Saving Ranger Usersync credentials in jceks",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_install_infra_client",
+        "description": "Ambari Infra Service support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "falcon_atlas_support_2_3",
+        "description": "Falcon Atlas integration support for 2.3 stack",
+        "min_version": "2.3.99.0",
+        "max_version": "2.4.0.0"
+      },
+      {
+        "name": "falcon_atlas_support",
+        "description": "Falcon Atlas integration",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "hbase_home_directory",
+        "description": "Hbase home directory in HDFS needed for HBASE backup",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_livy",
+        "description": "Livy as slave component of spark",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_livy2",
+        "description": "Livy as slave component of spark",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "atlas_ranger_plugin_support",
+        "description": "Atlas Ranger plugin support",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "atlas_conf_dir_in_path",
+        "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+        "min_version": "2.3.0.0",
+        "max_version": "2.4.99.99"
+      },
+      {
+        "name": "atlas_upgrade_support",
+        "description": "Atlas supports express and rolling upgrades",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "atlas_hook_support",
+        "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_pid_support",
+        "description": "Ranger Service support pid generation AMBARI-16756",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_kms_pid_support",
+        "description": "Ranger KMS Service support pid generation",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_admin_password_change",
+        "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_setup_db_on_start",
+        "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "storm_metrics_apache_classes",
+        "description": "Metrics sink for Storm that uses Apache class names",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "spark_java_opts_support",
+        "description": "Allow Spark to generate java-opts file",
+        "min_version": "2.2.0.0",
+        "max_version": "2.4.0.0"
+      },
+      {
+        "name": "atlas_hbase_setup",
+        "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "ranger_hive_plugin_jdbc_url",
+        "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "zkfc_version_advertised",
+        "description": "ZKFC advertise version",
+        "min_version": "2.5.0.0"
+      },
+      {
+        "name": "phoenix_core_hdfs_site_required",
+        "description": "HDFS and CORE site required for Phoenix",
+        "max_version": "2.5.9.9"
+      },
+      {
+        "name": "ranger_tagsync_ssl_xml_support",
+        "description": "Ranger Tagsync ssl xml support.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "ranger_xml_configuration",
+        "description": "Ranger code base support xml configurations",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "kafka_ranger_plugin_support",
+        "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "yarn_ranger_plugin_support",
+        "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
+        "min_version": "2.3.0.0"
+      },
+      {
+        "name": "ranger_solr_config_support",
+        "description": "Showing Ranger solrconfig.xml on UI",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "hive_interactive_atlas_hook_required",
+        "description": "Registering Atlas Hook for Hive Interactive.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "core_site_for_ranger_plugins",
+        "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "atlas_install_hook_package_support",
+        "description": "Stop installing packages from 2.6",
+        "max_version": "2.5.9.9"
+      },
+      {
+        "name": "atlas_hdfs_site_on_namenode_ha",
+        "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "hive_interactive_ga",
+        "description": "Hive Interactive GA support",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "secure_ranger_ssl_password",
+        "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "ranger_kms_ssl",
+        "description": "Ranger KMS SSL properties in ambari stack",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "nifi_encrypt_config",
+        "description": "Encrypt sensitive properties written to nifi property file",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "toolkit_config_update",
+        "description": "Support separate input and output for toolkit configuration",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "admin_toolkit_support",
+        "description": "Supports the nifi admin toolkit",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "tls_toolkit_san",
+        "description": "Support subject alternative name flag",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "nifi_jaas_conf_create",
+        "description": "Create NIFI jaas configuration when kerberos is enabled",
+        "min_version": "2.6.0.0"
+      }
+    ]
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
index d1aab4b..c515d57 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
@@ -1,4 +1,14 @@
 {
-  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
-  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
-}
\ No newline at end of file
+  "HDP": {
+    "stack_selector": [
+      "hdp-select",
+      "/usr/bin/hdp-select",
+      "hdp-select"
+    ],
+    "conf_selector": [
+      "conf-select",
+      "/usr/bin/conf-select",
+      "conf-select"
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
index 7df00ee..f19ac52 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
@@ -20,6 +20,18 @@
  */
 -->
 <configuration>
+  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_name</name>
+    <value>PERF</value>
+    <description>The name of the stack.</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
 
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
@@ -55,8 +67,8 @@
 
   <property>
     <name>stack_root</name>
-    <value>/usr/perf</value>
-    <description>Stack root folder</description>
+    <value>{"PERF":"/usr/perf"}</value>
+    <description>JSON which defines the stack root by stack name</description>  
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/f33a250c/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
index e9e0ed2..839e8e6 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/properties/stack_features.json
@@ -1,19 +1,21 @@
 {
-  "stack_features": [
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "1.0.0.0"
-    },
-    {
-      "name": "secure_zookeeper",
-      "description": "Protect ZNodes with SASL acl in secure clusters",
-      "min_version": "2.6.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "1.0.0.0"
-    }
-  ]
-}
+  "PERF": {
+    "stack_features": [
+      {
+        "name": "rolling_upgrade",
+        "description": "Rolling upgrade support",
+        "min_version": "1.0.0.0"
+      },
+      {
+        "name": "secure_zookeeper",
+        "description": "Protect ZNodes with SASL acl in secure clusters",
+        "min_version": "2.6.0.0"
+      },
+      {
+        "name": "config_versioning",
+        "description": "Configurable versions support",
+        "min_version": "1.0.0.0"
+      }
+    ]
+  }
+}
\ No newline at end of file