You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/08 00:43:34 UTC

[10/10] ambari git commit: AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)

AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6a3bfd5d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6a3bfd5d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6a3bfd5d

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 6a3bfd5d03b5c6752f9b84c7c6ad530372622090
Parents: b6c5d78
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Jul 7 14:36:05 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Jul 7 20:40:56 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |   13 +
 .../libraries/functions/stack_tools.py          |   39 +
 .../libraries/script/script.py                  |   19 +-
 .../actionmanager/ExecutionCommandWrapper.java  |   20 +-
 .../controller/ActionExecutionContext.java      |   26 +
 .../controller/AmbariActionExecutionHelper.java |   21 +-
 .../BlueprintConfigurationProcessor.java        |  232 +--
 .../ClusterStackVersionResourceProvider.java    |  180 ++-
 .../internal/UpgradeResourceProvider.java       |   50 +-
 .../ambari/server/state/ConfigHelper.java       |   47 +-
 .../ambari/server/topology/AmbariContext.java   |   36 +-
 .../server/upgrade/UpgradeCatalog252.java       |   63 +-
 .../package/alerts/alert_hive_metastore.py      |   11 +-
 .../package/alerts/alert_llap_app_status.py     |   12 +-
 .../package/alerts/alert_check_oozie_server.py  |    8 +-
 .../resources/host_scripts/alert_disk_space.py  |   10 +-
 .../host_scripts/alert_version_select.py        |   16 +-
 .../4.0/configuration/cluster-env.xml           |   19 +-
 .../4.0/properties/stack_features.json          |  422 +++---
 .../BigInsights/4.0/properties/stack_tools.json |   14 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |   16 +-
 .../HDP/2.0.6/properties/stack_features.json    |  852 +++++------
 .../HDP/2.0.6/properties/stack_tools.json       |   16 +-
 .../PERF/1.0/configuration/cluster-env.xml      |   16 +-
 .../PERF/1.0/properties/stack_features.json     |   38 +-
 .../stacks/PERF/1.0/properties/stack_tools.json |   16 +-
 .../BlueprintConfigurationProcessorTest.java    |   38 +-
 ...ClusterStackVersionResourceProviderTest.java |  276 ++--
 .../ClusterConfigurationRequestTest.java        |  113 +-
 .../common-services/configs/hawq_default.json   |    6 +-
 .../python/host_scripts/TestAlertDiskSpace.py   |   16 +-
 .../2.5/configs/ranger-admin-default.json       |  990 ++++++-------
 .../2.5/configs/ranger-admin-secured.json       | 1108 +++++++--------
 .../stacks/2.5/configs/ranger-kms-default.json  | 1158 +++++++--------
 .../stacks/2.5/configs/ranger-kms-secured.json  | 1320 +++++++++---------
 .../2.6/configs/ranger-admin-default.json       |  953 +++++++------
 .../2.6/configs/ranger-admin-secured.json       | 1066 +++++++-------
 .../src/test/python/stacks/utils/RMFTestCase.py |    8 +-
 38 files changed, 4923 insertions(+), 4341 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index 2b3df5f..7811e26 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -42,6 +42,12 @@ def check_stack_feature(stack_feature, stack_version):
 
   from resource_management.libraries.functions.default import default
   from resource_management.libraries.functions.version import compare_versions
+
+  stack_name = default("/hostLevelParams/stack_name", None)
+  if stack_name is None:
+    Logger.warning("Cannot find the stack name in the command. Stack features cannot be loaded")
+    return False
+
   stack_features_config = default("/configurations/cluster-env/stack_features", None)
 
   if not stack_version:
@@ -50,6 +56,13 @@ def check_stack_feature(stack_feature, stack_version):
 
   if stack_features_config:
     data = json.loads(stack_features_config)
+
+    if stack_name not in data:
+      Logger.warning("Cannot find stack features for the stack named {0}".format(stack_name))
+      return False
+
+    data = data[stack_name]
+
     for feature in data["stack_features"]:
       if feature["name"] == stack_feature:
         if "min_version" in feature:

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 02ae62d..420ae11 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -39,15 +39,33 @@ def get_stack_tool(name):
   :return: tool_name, tool_path, tool_package
   """
   from resource_management.libraries.functions.default import default
+
+  stack_name = default("/hostLevelParams/stack_name", None)
+  if stack_name is None:
+    Logger.warning("Cannot find the stack name in the command. Stack tools cannot be loaded")
+    return (None, None, None)
+
   stack_tools = None
   stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
   if stack_tools_config:
     stack_tools = json.loads(stack_tools_config)
 
+  if stack_tools is None:
+    Logger.warning("The stack tools could not be found in cluster-env")
+    return (None, None, None)
+
+  if stack_name not in stack_tools:
+    Logger.warning("Cannot find stack tools for the stack named {0}".format(stack_name))
+    return (None, None, None)
+
+  # load the stack tooks keyed by the stack name
+  stack_tools = stack_tools[stack_name]
+
   if not stack_tools or not name or name.lower() not in stack_tools:
     Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))
     return (None, None, None)
 
+
   tool_config = stack_tools[name.lower()]
 
   # Return fixed length (tool_name, tool_path tool_package) tuple
@@ -81,3 +99,24 @@ def get_stack_tool_package(name):
   """
   (tool_name, tool_path, tool_package) = get_stack_tool(name)
   return tool_package
+
+
+def get_stack_root(stack_name, stack_root_json):
+  """
+  Get the stack-specific install root directory from the raw, JSON-escaped properties.
+  :param stack_name:
+  :param stack_root_json:
+  :return: stack_root
+  """
+  from resource_management.libraries.functions.default import default
+
+  if stack_root_json is None:
+    return "/usr/{0}".format(stack_name.lower())
+
+  stack_root = json.loads(stack_root_json)
+
+  if stack_name not in stack_root:
+    Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+    return "/usr/{0}".format(stack_name.lower())
+
+  return stack_root[stack_name]

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 04928de..0df6900 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -553,7 +553,11 @@ class Script(object):
     :return: a stack name or None
     """
     from resource_management.libraries.functions.default import default
-    return default("/hostLevelParams/stack_name", "HDP")
+    stack_name = default("/hostLevelParams/stack_name", None)
+    if stack_name is None:
+      stack_name = default("/configurations/cluster-env/stack_name", "HDP")
+
+    return stack_name
 
   @staticmethod
   def get_stack_root():
@@ -563,7 +567,18 @@ class Script(object):
     """
     from resource_management.libraries.functions.default import default
     stack_name = Script.get_stack_name()
-    return default("/configurations/cluster-env/stack_root", "/usr/{0}".format(stack_name.lower()))
+    stack_root_json = default("/configurations/cluster-env/stack_root", None)
+
+    if stack_root_json is None:
+      return "/usr/{0}".format(stack_name.lower())
+
+    stack_root = json.loads(stack_root_json)
+
+    if stack_name not in stack_root:
+      Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
+      return "/usr/{0}".format(stack_name.lower())
+
+    return stack_root[stack_name]
 
   @staticmethod
   def get_stack_version():

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
index fc66f53..28946e7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
@@ -204,19 +204,25 @@ public class ExecutionCommandWrapper {
             effectiveClusterVersion.getRepositoryVersion().getVersion());
       }
 
-      // add the stack and common-services folders to the command
+      // add the stack and common-services folders to the command, but only if
+      // they don't exist - they may have been put on here with specific values
+      // ahead of time
       StackId stackId = cluster.getDesiredStackVersion();
       StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(),
           stackId.getStackVersion());
 
-      commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+      if (!commandParams.containsKey(HOOKS_FOLDER)) {
+        commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+      }
 
-      String serviceName = executionCommand.getServiceName();
-      if (!StringUtils.isEmpty(serviceName)) {
-        ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
-            stackId.getStackVersion(), serviceName);
+      if (!commandParams.containsKey(SERVICE_PACKAGE_FOLDER)) {
+        String serviceName = executionCommand.getServiceName();
+        if (!StringUtils.isEmpty(serviceName)) {
+          ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
+              stackId.getStackVersion(), serviceName);
 
-        commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
+          commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
+        }
       }
     } catch (ClusterNotFoundException cnfe) {
       // it's possible that there are commands without clusters; in such cases,

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 3681eda..c361094 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@ -25,6 +25,7 @@ import java.util.Map;
 import org.apache.ambari.server.actionmanager.TargetHostType;
 import org.apache.ambari.server.controller.internal.RequestOperationLevel;
 import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+import org.apache.ambari.server.state.StackId;
 
 /**
  * The context required to create tasks and stages for a custom action
@@ -41,6 +42,7 @@ public class ActionExecutionContext {
   private String expectedComponentName;
   private boolean hostsInMaintenanceModeExcluded = true;
   private boolean allowRetry = false;
+  private StackId stackId;
 
   /**
    * {@code true} if slave/client component failures should be automatically
@@ -168,6 +170,30 @@ public class ActionExecutionContext {
     this.autoSkipFailures = autoSkipFailures;
   }
 
+  /**
+   * Gets the stack to use for generating stack-associated values for a command.
+   * In some cases the cluster's stack is not the correct one to use, such as
+   * when distributing a repository.
+   *
+   * @return the stackId the stack to use when generating stack-specific content
+   *         for the command.
+   */
+  public StackId getStackId() {
+    return stackId;
+  }
+
+  /**
+   * Sets the stack to use for generating stack-associated values for a command.
+   * In some cases the cluster's stack is not the correct one to use, such as
+   * when distributing a repository.
+   *
+   * @param stackId
+   *          the stackId to use for stack-based properties on the command.
+   */
+  public void setStackId(StackId stackId) {
+    this.stackId = stackId;
+  }
+
   @Override
   public String toString() {
     return "ActionExecutionContext{" +

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index d556b60..f75fb41 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -454,10 +454,12 @@ public class AmbariActionExecutionHelper {
       for (Map.Entry<String, String> dbConnectorName : configs.getDatabaseConnectorNames().entrySet()) {
         hostLevelParams.put(dbConnectorName.getKey(), dbConnectorName.getValue());
       }
+
       for (Map.Entry<String, String> previousDBConnectorName : configs.getPreviousDatabaseConnectorNames().entrySet()) {
         hostLevelParams.put(previousDBConnectorName.getKey(), previousDBConnectorName.getValue());
       }
-      addRepoInfoToHostLevelParams(cluster, hostLevelParams, hostName);
+
+      addRepoInfoToHostLevelParams(cluster, actionContext, hostLevelParams, hostName);
 
       Map<String, String> roleParams = execCmd.getRoleParams();
       if (roleParams == null) {
@@ -517,7 +519,8 @@ public class AmbariActionExecutionHelper {
   *
   * */
 
-  private void addRepoInfoToHostLevelParams(Cluster cluster, Map<String, String> hostLevelParams, String hostName) throws AmbariException {
+  private void addRepoInfoToHostLevelParams(Cluster cluster, ActionExecutionContext actionContext,
+      Map<String, String> hostLevelParams, String hostName) throws AmbariException {
     if (null == cluster) {
       return;
     }
@@ -526,6 +529,7 @@ public class AmbariActionExecutionHelper {
     JsonArray repositories = new JsonArray();
     ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(
         cluster.getClusterName());
+
     if (clusterVersionEntity != null && clusterVersionEntity.getRepositoryVersion() != null) {
       String hostOsFamily = clusters.getHost(hostName).getOsFamily();
       for (OperatingSystemEntity operatingSystemEntity : clusterVersionEntity.getRepositoryVersion().getOperatingSystems()) {
@@ -547,8 +551,15 @@ public class AmbariActionExecutionHelper {
 
     hostLevelParams.put(REPO_INFO, rootJsonObject.toString());
 
-    StackId stackId = cluster.getCurrentStackVersion();
-    hostLevelParams.put(STACK_NAME, stackId.getStackName());
-    hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
+    // set the host level params if not already set by whoever is creating this command
+    if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
+      // see if the action context has a stack ID set to use, otherwise use the
+      // cluster's current stack ID
+      StackId stackId = actionContext.getStackId() != null ? actionContext.getStackId()
+          : cluster.getCurrentStackVersion();
+
+      hostLevelParams.put(STACK_NAME, stackId.getStackName());
+      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 061cdf7..50cea9e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -34,7 +34,10 @@ import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
 import org.apache.ambari.server.topology.Blueprint;
@@ -84,31 +87,31 @@ public class BlueprintConfigurationProcessor {
    * Single host topology updaters
    */
   protected static Map<String, Map<String, PropertyUpdater>> singleHostTopologyUpdaters =
-      new HashMap<String, Map<String, PropertyUpdater>>();
+      new HashMap<>();
 
   /**
    * Multi host topology updaters
    */
   private static Map<String, Map<String, PropertyUpdater>> multiHostTopologyUpdaters =
-      new HashMap<String, Map<String, PropertyUpdater>>();
+      new HashMap<>();
 
   /**
    * Database host topology updaters
    */
   private static Map<String, Map<String, PropertyUpdater>> dbHostTopologyUpdaters =
-      new HashMap<String, Map<String, PropertyUpdater>>();
+      new HashMap<>();
 
   /**
    * Updaters for properties which need 'm' appended
    */
   private static Map<String, Map<String, PropertyUpdater>> mPropertyUpdaters =
-      new HashMap<String, Map<String, PropertyUpdater>>();
+      new HashMap<>();
 
   /**
    * Non topology related updaters
    */
   private static Map<String, Map<String, PropertyUpdater>> nonTopologyUpdaters =
-      new HashMap<String, Map<String, PropertyUpdater>>();
+      new HashMap<>();
 
   /**
    * Updaters that preserve the original property value, functions
@@ -117,13 +120,13 @@ public class BlueprintConfigurationProcessor {
    * cluster creation
    */
   private Map<String, Map<String, PropertyUpdater>> removePropertyUpdaters =
-    new HashMap<String, Map<String, PropertyUpdater>>();
+    new HashMap<>();
 
   /**
    * Collection of all updaters
    */
   private static Collection<Map<String, Map<String, PropertyUpdater>>> allUpdaters =
-      new ArrayList<Map<String, Map<String, PropertyUpdater>>>();
+      new ArrayList<>();
 
   /**
    * Compiled regex for hostgroup token.
@@ -152,7 +155,7 @@ public class BlueprintConfigurationProcessor {
    *   expected hostname information is not found.
    */
   private static Set<String> configPropertiesWithHASupport =
-    new HashSet<String>(Arrays.asList("fs.defaultFS", "hbase.rootdir", "instance.volumes", "policymgr_external_url", "xasecure.audit.destination.hdfs.dir"));
+    new HashSet<>(Arrays.asList("fs.defaultFS", "hbase.rootdir", "instance.volumes", "policymgr_external_url", "xasecure.audit.destination.hdfs.dir"));
 
   /**
    * Statically-defined list of filters to apply on property exports.
@@ -233,8 +236,8 @@ public class BlueprintConfigurationProcessor {
       singleHostTopologyUpdaters.put("oozie-env", oozieEnvUpdaters);
       singleHostTopologyUpdaters.put("oozie-site", oozieSiteUpdaters);
     } else {
-      Map<String, PropertyUpdater> oozieEnvOriginalValueMap = new HashMap<String, PropertyUpdater>();
-      Map<String, PropertyUpdater> oozieSiteOriginalValueMap = new HashMap<String, PropertyUpdater>();
+      Map<String, PropertyUpdater> oozieEnvOriginalValueMap = new HashMap<>();
+      Map<String, PropertyUpdater> oozieSiteOriginalValueMap = new HashMap<>();
       // register updaters for Oozie properties that may point to an external DB
       oozieEnvOriginalValueMap.put("oozie_existing_mysql_host", new OriginalValuePropertyUpdater());
       oozieEnvOriginalValueMap.put("oozie_existing_oracle_host", new OriginalValuePropertyUpdater());
@@ -247,7 +250,7 @@ public class BlueprintConfigurationProcessor {
       removePropertyUpdaters.put("oozie-site", oozieSiteOriginalValueMap);
     }
 
-    Map<String, PropertyUpdater> hiveEnvOriginalValueMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> hiveEnvOriginalValueMap = new HashMap<>();
     // register updaters for Hive properties that may point to an external DB
     hiveEnvOriginalValueMap.put("hive_existing_oracle_host", new OriginalValuePropertyUpdater());
     hiveEnvOriginalValueMap.put("hive_existing_mssql_server_2_host", new OriginalValuePropertyUpdater());
@@ -283,7 +286,7 @@ public class BlueprintConfigurationProcessor {
   }
 
   public Collection<String> getRequiredHostGroups() {
-    Collection<String> requiredHostGroups = new HashSet<String>();
+    Collection<String> requiredHostGroups = new HashSet<>();
 
     for (Map<String, Map<String, PropertyUpdater>> updaterMap : createCollectionOfUpdaters()) {
       for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) {
@@ -322,7 +325,7 @@ public class BlueprintConfigurationProcessor {
    * @return Set of config type names that were updated by this update call
    */
   public Set<String> doUpdateForClusterCreate() throws ConfigurationTopologyException {
-      Set<String> configTypesUpdated = new HashSet<String>();
+      Set<String> configTypesUpdated = new HashSet<>();
     Configuration clusterConfig = clusterTopology.getConfiguration();
     Map<String, HostGroupInfo> groupInfoMap = clusterTopology.getHostGroupInfo();
 
@@ -350,7 +353,7 @@ public class BlueprintConfigurationProcessor {
             final String originalValue = typeMap.get(propertyName);
             final String updatedValue =
               updater.updateForClusterCreate(propertyName, originalValue, clusterProps, clusterTopology);
-            
+
             if(updatedValue == null ) {
               continue;
             }
@@ -413,6 +416,7 @@ public class BlueprintConfigurationProcessor {
     }
 
     // Explicitly set any properties that are required but not currently provided in the stack definition.
+    setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
     setRetryConfiguration(clusterConfig, configTypesUpdated);
     setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
     addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
@@ -485,7 +489,7 @@ public class BlueprintConfigurationProcessor {
       doOozieServerHAUpdate();
     }
 
-    Collection<Configuration> allConfigs = new ArrayList<Configuration>();
+    Collection<Configuration> allConfigs = new ArrayList<>();
     allConfigs.add(clusterTopology.getConfiguration());
     for (HostGroupInfo groupInfo : clusterTopology.getHostGroupInfo().values()) {
       Configuration hgConfiguration = groupInfo.getConfiguration();
@@ -705,7 +709,7 @@ public class BlueprintConfigurationProcessor {
    */
   private Collection<Map<String, Map<String, PropertyUpdater>>> addNameNodeHAUpdaters(Collection<Map<String, Map<String, PropertyUpdater>>> updaters) {
     Collection<Map<String, Map<String, PropertyUpdater>>> highAvailabilityUpdaters =
-      new LinkedList<Map<String, Map<String, PropertyUpdater>>>();
+      new LinkedList<>();
 
     // always add the statically-defined list of updaters to the list to use
     // in processing cluster configuration
@@ -732,7 +736,7 @@ public class BlueprintConfigurationProcessor {
    */
   private Collection<Map<String, Map<String, PropertyUpdater>>> addYarnResourceManagerHAUpdaters(Collection<Map<String, Map<String, PropertyUpdater>>> updaters) {
     Collection<Map<String, Map<String, PropertyUpdater>>> highAvailabilityUpdaters =
-      new LinkedList<Map<String, Map<String, PropertyUpdater>>>();
+      new LinkedList<>();
 
     // always add the statically-defined list of updaters to the list to use
     // in processing cluster configuration
@@ -758,7 +762,7 @@ public class BlueprintConfigurationProcessor {
    */
   private Collection<Map<String, Map<String, PropertyUpdater>>> addOozieServerHAUpdaters(Collection<Map<String, Map<String, PropertyUpdater>>> updaters) {
     Collection<Map<String, Map<String, PropertyUpdater>>> highAvailabilityUpdaters =
-      new LinkedList<Map<String, Map<String, PropertyUpdater>>>();
+      new LinkedList<>();
 
     // always add the statically-defined list of updaters to the list to use
     // in processing cluster configuration
@@ -857,8 +861,8 @@ public class BlueprintConfigurationProcessor {
    * @return a Map of registered PropertyUpdaters for handling HA properties in hdfs-site
    */
   private Map<String, Map<String, PropertyUpdater>> createMapOfNameNodeHAUpdaters() {
-    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<String, Map<String, PropertyUpdater>>();
-    Map<String, PropertyUpdater> hdfsSiteUpdatersForAvailability = new HashMap<String, PropertyUpdater>();
+    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<>();
+    Map<String, PropertyUpdater> hdfsSiteUpdatersForAvailability = new HashMap<>();
     highAvailabilityUpdaters.put("hdfs-site", hdfsSiteUpdatersForAvailability);
 
     //todo: Do we need to call this for HG configurations?
@@ -888,8 +892,8 @@ public class BlueprintConfigurationProcessor {
    * @return a Map of registered PropertyUpdaters for handling HA properties in yarn-site
    */
   private Map<String, Map<String, PropertyUpdater>> createMapOfYarnResourceManagerHAUpdaters() {
-    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<String, Map<String, PropertyUpdater>>();
-    Map<String, PropertyUpdater> yarnSiteUpdatersForAvailability = new HashMap<String, PropertyUpdater>();
+    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<>();
+    Map<String, PropertyUpdater> yarnSiteUpdatersForAvailability = new HashMap<>();
     highAvailabilityUpdaters.put("yarn-site", yarnSiteUpdatersForAvailability);
 
     Map<String, String> yarnSiteConfig = clusterTopology.getConfiguration().getFullProperties().get("yarn-site");
@@ -915,8 +919,8 @@ public class BlueprintConfigurationProcessor {
    * @return a Map of registered PropertyUpdaters for handling HA properties in oozie-site
    */
   private Map<String, Map<String, PropertyUpdater>> createMapOfOozieServerHAUpdaters() {
-    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<String, Map<String, PropertyUpdater>>();
-    Map<String, PropertyUpdater> oozieSiteUpdatersForAvailability = new HashMap<String, PropertyUpdater>();
+    Map<String, Map<String, PropertyUpdater>> highAvailabilityUpdaters = new HashMap<>();
+    Map<String, PropertyUpdater> oozieSiteUpdatersForAvailability = new HashMap<>();
     highAvailabilityUpdaters.put("oozie-site", oozieSiteUpdatersForAvailability);
 
     // register a multi-host property updater for this Oozie property.
@@ -1190,7 +1194,7 @@ public class BlueprintConfigurationProcessor {
                   groupInfo.getHostGroupName() + "%");
             }
           }
-          Collection<String> addedGroups = new HashSet<String>();
+          Collection<String> addedGroups = new HashSet<>();
           String[] toks = propValue.split(",");
           boolean inBrackets = propValue.startsWith("[");
 
@@ -1232,7 +1236,7 @@ public class BlueprintConfigurationProcessor {
   //todo: replace this with parseHostGroupToken which would return a hostgroup or null
   private static Collection<String> getHostStrings(String val, ClusterTopology topology) {
 
-    Collection<String> hosts = new LinkedHashSet<String>();
+    Collection<String> hosts = new LinkedHashSet<>();
     Matcher m = HOSTGROUP_PORT_REGEX.matcher(val);
     while (m.find()) {
       String groupName = m.group(1);
@@ -1264,7 +1268,7 @@ public class BlueprintConfigurationProcessor {
    *         elements in this property
    */
   private static String[] splitAndTrimStrings(String propertyName) {
-    List<String> namesWithoutWhitespace = new LinkedList<String>();
+    List<String> namesWithoutWhitespace = new LinkedList<>();
     for (String service : propertyName.split(",")) {
       namesWithoutWhitespace.add(service.trim());
     }
@@ -1496,7 +1500,7 @@ public class BlueprintConfigurationProcessor {
         Collection<String> matchingGroups = topology.getHostGroupsForComponent(component);
         int matchingGroupCount = matchingGroups.size();
         if (matchingGroupCount != 0) {
-          return new HashSet<String>(matchingGroups);
+          return new HashSet<>(matchingGroups);
         } else {
           Cardinality cardinality = topology.getBlueprint().getStack().getCardinality(component);
           // if no matching host groups are found for a component whose configuration
@@ -1908,7 +1912,7 @@ public class BlueprintConfigurationProcessor {
      * @return list of hosts that have the given components
      */
     private Collection<String> getHostStringsFromLocalhost(String origValue, ClusterTopology topology) {
-      Set<String> hostStrings = new HashSet<String>();
+      Set<String> hostStrings = new HashSet<>();
       if(origValue.contains("localhost")) {
         Matcher localhostMatcher = LOCALHOST_PORT_REGEX.matcher(origValue);
         String port = null;
@@ -1950,7 +1954,7 @@ public class BlueprintConfigurationProcessor {
     private String removePorts(Collection<String> hostStrings) {
       String port = null;
       if(!usePortForEachHost && !hostStrings.isEmpty()) {
-        Set<String> temp = new HashSet<String>();
+        Set<String> temp = new HashSet<>();
 
         // extract port
         Iterator<String> i = hostStrings.iterator();
@@ -1985,7 +1989,7 @@ public class BlueprintConfigurationProcessor {
                                                     Map<String, Map<String, String>> properties,
                                                     ClusterTopology topology) {
 
-      Collection<String> requiredHostGroups = new HashSet<String>();
+      Collection<String> requiredHostGroups = new HashSet<>();
 
       // add all host groups specified in host group tokens
       Matcher m = HOSTGROUP_PORT_REGEX.matcher(origValue);
@@ -2158,8 +2162,9 @@ public class BlueprintConfigurationProcessor {
       StringBuilder sb = new StringBuilder();
 
       Matcher m = REGEX_IN_BRACKETS.matcher(origValue);
-      if (m.matches())
+      if (m.matches()) {
         origValue = m.group("INNER");
+      }
 
       if (origValue != null) {
         sb.append("[");
@@ -2167,8 +2172,9 @@ public class BlueprintConfigurationProcessor {
         for (String value : origValue.split(",")) {
 
           m = REGEX_IN_QUOTES.matcher(value);
-          if (m.matches())
+          if (m.matches()) {
             value = m.group("INNER");
+          }
 
           if (!isFirst) {
             sb.append(",");
@@ -2202,6 +2208,7 @@ public class BlueprintConfigurationProcessor {
    */
   private static class OriginalValuePropertyUpdater implements PropertyUpdater {
 
+    @Override
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
@@ -2232,7 +2239,7 @@ public class BlueprintConfigurationProcessor {
   private static class TempletonHivePropertyUpdater implements PropertyUpdater {
 
     private Map<String, PropertyUpdater> mapOfKeysToUpdaters =
-      new HashMap<String, PropertyUpdater>();
+      new HashMap<>();
 
     TempletonHivePropertyUpdater() {
       // the only known property that requires hostname substitution is hive.metastore.uris,
@@ -2297,7 +2304,7 @@ public class BlueprintConfigurationProcessor {
         return Collections.emptySet();
       }
 
-      Collection<String> requiredGroups = new HashSet<String>();
+      Collection<String> requiredGroups = new HashSet<>();
       // split out the key/value pairs
       String[] keyValuePairs = origValue.split(",");
       for (String keyValuePair : keyValuePairs) {
@@ -2344,57 +2351,57 @@ public class BlueprintConfigurationProcessor {
     allUpdaters.add(mPropertyUpdaters);
     allUpdaters.add(nonTopologyUpdaters);
 
-    Map<String, PropertyUpdater> amsSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hdfsSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> mapredSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> coreSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hbaseSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> yarnSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hiveSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hiveSiteNonTopologyMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> oozieSiteOriginalValueMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> oozieSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> stormSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> stormSiteNonTopologyMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> accumuloSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> falconStartupPropertiesMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> kafkaBrokerMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> kafkaBrokerNonTopologyMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> atlasPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> mapredEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> mHadoopEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> shHadoopEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hiveEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hiveInteractiveEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hiveInteractiveSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> oozieEnvMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> oozieEnvHeapSizeMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiWebhcatSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiHbaseSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiStormSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiCoreSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiHdfsSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiHiveSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiKafkaBrokerMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiSliderClientMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiYarnSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiOozieSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiAccumuloSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> multiRangerKmsSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerAdminPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerEnvPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerYarnAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerHdfsAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerHbaseAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerHiveAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerKnoxAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerKafkaAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerStormAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> rangerAtlasAuditPropsMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> hawqSiteMap = new HashMap<String, PropertyUpdater>();
-    Map<String, PropertyUpdater> zookeeperEnvMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> amsSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> hdfsSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> mapredSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> coreSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> hbaseSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> yarnSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> hiveSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> hiveSiteNonTopologyMap = new HashMap<>();
+    Map<String, PropertyUpdater> oozieSiteOriginalValueMap = new HashMap<>();
+    Map<String, PropertyUpdater> oozieSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> stormSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> stormSiteNonTopologyMap = new HashMap<>();
+    Map<String, PropertyUpdater> accumuloSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> falconStartupPropertiesMap = new HashMap<>();
+    Map<String, PropertyUpdater> kafkaBrokerMap = new HashMap<>();
+    Map<String, PropertyUpdater> kafkaBrokerNonTopologyMap = new HashMap<>();
+    Map<String, PropertyUpdater> atlasPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> mapredEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> mHadoopEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> shHadoopEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> hiveEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> hiveInteractiveEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> hiveInteractiveSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> oozieEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> oozieEnvHeapSizeMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiWebhcatSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiHbaseSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiStormSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiCoreSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiHdfsSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiHiveSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiKafkaBrokerMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiSliderClientMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiYarnSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiOozieSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiAccumuloSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> multiRangerKmsSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerAdminPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerEnvPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerYarnAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerHdfsAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerHbaseAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerHiveAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerKnoxAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerKafkaAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerStormAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> rangerAtlasAuditPropsMap = new HashMap<>();
+    Map<String, PropertyUpdater> hawqSiteMap = new HashMap<>();
+    Map<String, PropertyUpdater> zookeeperEnvMap = new HashMap<>();
 
     singleHostTopologyUpdaters.put("ams-site", amsSiteMap);
     singleHostTopologyUpdaters.put("hdfs-site", hdfsSiteMap);
@@ -2533,7 +2540,7 @@ public class BlueprintConfigurationProcessor {
         String atlasHookClass = "org.apache.atlas.hive.hook.HiveHook";
         String[] hiveHooks = origValue.split(",");
 
-        List<String> hiveHooksClean = new ArrayList<String>();
+        List<String> hiveHooksClean = new ArrayList<>();
         for(String hiveHook : hiveHooks) {
           if (!StringUtils.isBlank(hiveHook.trim())) {
             hiveHooksClean.add(hiveHook.trim());
@@ -2786,7 +2793,7 @@ public class BlueprintConfigurationProcessor {
 
   private Collection<String> setupHDFSProxyUsers(Configuration configuration, Set<String> configTypesUpdated) {
     // AMBARI-5206
-    final Map<String , String> userProps = new HashMap<String , String>();
+    final Map<String , String> userProps = new HashMap<>();
 
     Collection<String> services = clusterTopology.getBlueprint().getServices();
     if (services.contains("HDFS")) {
@@ -2925,6 +2932,49 @@ public class BlueprintConfigurationProcessor {
 
 
   /**
+   * Sets the read-only properties for stack features & tools, overriding
+   * anything provided in the blueprint.
+   *
+   * @param configuration
+   *          the configuration to update with values from the stack.
+   * @param configTypesUpdated
+   *          the list of configuration types updated (cluster-env will be added
+   *          to this).
+   * @throws ConfigurationTopologyException
+   */
+  private void setStackToolsAndFeatures(Configuration configuration, Set<String> configTypesUpdated)
+      throws ConfigurationTopologyException {
+    ConfigHelper configHelper = clusterTopology.getAmbariContext().getConfigHelper();
+    Stack stack = clusterTopology.getBlueprint().getStack();
+    String stackName = stack.getName();
+    String stackVersion = stack.getVersion();
+
+    StackId stackId = new StackId(stackName, stackVersion);
+
+    Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_NAME_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY, ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+    try {
+      Map<String, Map<String, String>> defaultStackProperties = configHelper.getDefaultStackProperties(stackId);
+      Map<String,String> clusterEnvDefaultProperties = defaultStackProperties.get(CLUSTER_ENV_CONFIG_TYPE_NAME);
+
+      for( String property : properties ){
+        if (defaultStackProperties.containsKey(property)) {
+          configuration.setProperty(CLUSTER_ENV_CONFIG_TYPE_NAME, property,
+              clusterEnvDefaultProperties.get(property));
+
+          // make sure to include the configuration type as being updated
+          configTypesUpdated.add(CLUSTER_ENV_CONFIG_TYPE_NAME);
+        }
+      }
+    } catch( AmbariException ambariException ){
+      throw new ConfigurationTopologyException("Unable to retrieve the stack tools and features",
+          ambariException);
+    }
+  }
+
+  /**
    * Ensure that the specified property exists.
    * If not, set a default value.
    *
@@ -3045,7 +3095,7 @@ public class BlueprintConfigurationProcessor {
 
     @Override
     public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
-      return !(this.propertyConfigType.equals(configType) &&
+      return !(propertyConfigType.equals(configType) &&
              this.propertyName.equals(propertyName));
     }
   }
@@ -3201,7 +3251,7 @@ public class BlueprintConfigurationProcessor {
      * namenode.
      */
     private final Set<String> setOfHDFSPropertyNamesNonHA =
-      Collections.unmodifiableSet( new HashSet<String>(Arrays.asList("dfs.namenode.http-address", "dfs.namenode.https-address", "dfs.namenode.rpc-address")));
+      Collections.unmodifiableSet( new HashSet<>(Arrays.asList("dfs.namenode.http-address", "dfs.namenode.https-address", "dfs.namenode.rpc-address")));
 
 
     /**
@@ -3271,7 +3321,7 @@ public class BlueprintConfigurationProcessor {
      * Set of HAWQ Property names that are only valid in a HA scenario.
      */
     private final Set<String> setOfHawqPropertyNamesNonHA =
-            Collections.unmodifiableSet( new HashSet<String>(Arrays.asList(HAWQ_SITE_HAWQ_STANDBY_ADDRESS_HOST)));
+            Collections.unmodifiableSet( new HashSet<>(Arrays.asList(HAWQ_SITE_HAWQ_STANDBY_ADDRESS_HOST)));
 
 
     /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 64e0b14..9ea6083 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -76,6 +76,8 @@ import org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -195,6 +197,13 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   private static HostComponentStateDAO hostComponentStateDAO;
 
   /**
+   * Used for updating the existing stack tools with those of the stack being
+   * distributed.
+   */
+  @Inject
+  private static Provider<ConfigHelper> configHelperProvider;
+
+  /**
    * We have to include such a hack here, because if we
    * make finalizeUpgradeAction field static and request injection
    * for it, there will be a circle dependency error
@@ -216,11 +225,11 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   @Override
   public Set<Resource> getResourcesAuthorized(Request request, Predicate predicate) throws
       SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    final Set<Resource> resources = new HashSet<Resource>();
+    final Set<Resource> resources = new HashSet<>();
     final Set<String> requestedIds = getRequestPropertyIds(request, predicate);
     final Set<Map<String, Object>> propertyMaps = getPropertyMaps(predicate);
 
-    List<ClusterVersionEntity> requestedEntities = new ArrayList<ClusterVersionEntity>();
+    List<ClusterVersionEntity> requestedEntities = new ArrayList<>();
     for (Map<String, Object> propertyMap: propertyMaps) {
       final String clusterName = propertyMap.get(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID).toString();
       final Long id;
@@ -244,7 +253,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     for (ClusterVersionEntity entity: requestedEntities) {
       final Resource resource = new ResourceImpl(Resource.Type.ClusterStackVersion);
 
-      final Map<String, List<String>> hostStates = new HashMap<String, List<String>>();
+      final Map<String, List<String>> hostStates = new HashMap<>();
       for (RepositoryVersionState state: RepositoryVersionState.values()) {
         hostStates.put(state.name(), new ArrayList<String>());
       }
@@ -295,12 +304,10 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     String clName;
     final String desiredRepoVersion;
-    String stackName;
-    String stackVersion;
 
     Map<String, Object> propertyMap = iterator.next();
 
-    Set<String> requiredProperties = new HashSet<String>();
+    Set<String> requiredProperties = new HashSet<>();
     requiredProperties.add(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
     requiredProperties.add(CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID);
     requiredProperties.add(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
@@ -335,19 +342,29 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
           cluster.getClusterName(), entity.getDirection().getText(false)));
     }
 
-    final StackId stackId;
-    if (propertyMap.containsKey(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID) &&
-            propertyMap.containsKey(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID)) {
-      stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
-      stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
-      stackId = new StackId(stackName, stackVersion);
-      if (! ami.isSupportedStack(stackName, stackVersion)) {
-        throw new NoSuchParentResourceException(String.format("Stack %s is not supported",
-                stackId));
-      }
-    } else { // Using stack that is current for cluster
-      StackId currentStackVersion = cluster.getCurrentStackVersion();
-      stackId = currentStackVersion;
+    String stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
+    String stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+    if (StringUtils.isBlank(stackName) || StringUtils.isBlank(stackVersion)) {
+      String message = String.format(
+          "Both the %s and %s properties are required when distributing a new stack",
+          CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+
+      throw new SystemException(message);
+    }
+
+    final StackId stackId = new StackId(stackName, stackVersion);
+
+    if (!ami.isSupportedStack(stackName, stackVersion)) {
+      throw new NoSuchParentResourceException(String.format("Stack %s is not supported", stackId));
+    }
+
+    // bootstrap the stack tools if necessary for the stack which is being
+    // distributed
+    try {
+      bootstrapStackTools(stackId, cluster);
+    } catch (AmbariException ambariException) {
+      throw new SystemException("Unable to modify stack tools for new stack being distributed",
+          ambariException);
     }
 
     RepositoryVersionEntity repoVersionEnt = repositoryVersionDAO.findByStackAndVersion(
@@ -491,7 +508,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     // build the list of OS repos
     List<OperatingSystemEntity> operatingSystems = repoVersionEnt.getOperatingSystems();
-    Map<String, List<RepositoryEntity>> perOsRepos = new HashMap<String, List<RepositoryEntity>>();
+    Map<String, List<RepositoryEntity>> perOsRepos = new HashMap<>();
     for (OperatingSystemEntity operatingSystem : operatingSystems) {
 
       if (operatingSystem.isAmbariManagedRepos()) {
@@ -504,7 +521,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     RequestStageContainer req = createRequest();
 
     Iterator<Host> hostIterator = hosts.iterator();
-    Map<String, String> hostLevelParams = new HashMap<String, String>();
+    Map<String, String> hostLevelParams = new HashMap<>();
     hostLevelParams.put(JDK_LOCATION, getManagementController().getJdkResourceUrl());
     String hostParamsJson = StageUtils.getGson().toJson(hostLevelParams);
 
@@ -538,7 +555,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     boolean hasStage = false;
 
-    ArrayList<Stage> stages = new ArrayList<Stage>(batchCount);
+    ArrayList<Stage> stages = new ArrayList<>(batchCount);
     for (int batchId = 1; batchId <= batchCount; batchId++) {
       // Create next stage
       String stageName;
@@ -618,8 +635,8 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
 
     // determine packages for all services that are installed on host
-    List<ServiceOsSpecific.Package> packages = new ArrayList<ServiceOsSpecific.Package>();
-    Set<String> servicesOnHost = new HashSet<String>();
+    List<ServiceOsSpecific.Package> packages = new ArrayList<>();
+    Set<String> servicesOnHost = new HashSet<>();
     List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
     for (ServiceComponentHost component : components) {
       if (repoServices.isEmpty() || repoServices.contains(component.getServiceName())) {
@@ -670,7 +687,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     final String packageList = gson.toJson(packages);
     final String repoList = gson.toJson(repoInfo);
 
-    Map<String, String> params = new HashMap<String, String>();
+    Map<String, String> params = new HashMap<>();
     params.put("stack_id", stackId.getStackId());
     params.put("repository_version", repoVersion.getVersion());
     params.put("base_urls", repoList);
@@ -689,19 +706,17 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
       params.put(KeyNames.PACKAGE_VERSION, xml.getPackageVersion(osFamily));
     }
 
-
     // add host to this stage
     RequestResourceFilter filter = new RequestResourceFilter(null, null,
             Collections.singletonList(host.getHostName()));
 
-    ActionExecutionContext actionContext = new ActionExecutionContext(
-            cluster.getClusterName(), INSTALL_PACKAGES_ACTION,
-            Collections.singletonList(filter),
-            params);
+    ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
+        INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), params);
+
+    actionContext.setStackId(stackId);
     actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
 
     return actionContext;
-
   }
 
 
@@ -787,7 +802,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
       }
       Map<String, Object> propertyMap = iterator.next();
 
-      Set<String> requiredProperties = new HashSet<String>();
+      Set<String> requiredProperties = new HashSet<>();
       requiredProperties.add(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
       requiredProperties.add(CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID);
       requiredProperties.add(CLUSTER_STACK_VERSION_STATE_PROPERTY_ID);
@@ -826,7 +841,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
       }
 
       if (!force) {
-        Map<String, String> args = new HashMap<String, String>();
+        Map<String, String> args = new HashMap<>();
         if (newStateStr.equals(RepositoryVersionState.CURRENT.toString())) {
           // Finalize upgrade workflow
           args.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
@@ -841,7 +856,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
         // Get a host name to populate the hostrolecommand table's hostEntity.
         String defaultHostName;
-        ArrayList<Host> hosts = new ArrayList<Host>(cluster.getHosts());
+        ArrayList<Host> hosts = new ArrayList<>(cluster.getHosts());
         if (!hosts.isEmpty()) {
           Collections.sort(hosts);
           defaultHostName = hosts.get(0).getHostName();
@@ -976,4 +991,101 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
 
+  /**
+   * Ensures that the stack tools and stack features are set on
+   * {@link ConfigHelper#CLUSTER_ENV} for the stack of the repository being
+   * distributed. This step ensures that the new repository can be distributed
+   * with the correct tools.
+   * <p/>
+   * If the cluster's current stack name matches that of the new stack or the
+   * new stack's tools are already added in the configuration, then this method
+   * will not change anything.
+   *
+   * @param stackId
+   *          the stack of the repository being distributed (not {@code null}).
+   * @param cluster
+   *          the cluster the new stack/repo is being distributed for (not
+   *          {@code null}).
+   * @throws AmbariException
+   */
+  private void bootstrapStackTools(StackId stackId, Cluster cluster) throws AmbariException {
+    // if the stack name is the same as the cluster's current stack name, then
+    // there's no work to do
+    if (StringUtils.equals(stackId.getStackName(),
+        cluster.getCurrentStackVersion().getStackName())) {
+      return;
+    }
+
+    ConfigHelper configHelper = configHelperProvider.get();
+
+    // get the stack tools/features for the stack being distributed
+    Map<String, Map<String, String>> defaultStackConfigurationsByType = configHelper.getDefaultProperties(
+        stackId, cluster);
+
+    Map<String, String> clusterEnvDefaults = defaultStackConfigurationsByType.get(
+        ConfigHelper.CLUSTER_ENV);
+
+    Config clusterEnv = cluster.getDesiredConfigByType(ConfigHelper.CLUSTER_ENV);
+    Map<String, String> clusterEnvProperties = clusterEnv.getProperties();
+
+    // the 3 properties we need to check and update
+    Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
+        ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
+
+    // any updates are stored here and merged into the existing config type
+    Map<String, String> updatedProperties = new HashMap<>();
+
+    for (String property : properties) {
+      // determine if the property exists in the stack being distributed (it
+      // kind of has to, but we'll be safe if it's not found)
+      String newStackDefaultJson = clusterEnvDefaults.get(property);
+      if (StringUtils.isBlank(newStackDefaultJson)) {
+        continue;
+      }
+
+      String existingPropertyJson = clusterEnvProperties.get(property);
+
+      // if the stack tools/features property doesn't exist, then just set the
+      // one from the new stack
+      if (StringUtils.isBlank(existingPropertyJson)) {
+        updatedProperties.put(property, newStackDefaultJson);
+        continue;
+      }
+
+      // now is the hard part - we need to check to see if the new stack tools
+      // exists alongside the current tools and if it doesn't, then add the new
+      // tools in
+      final Map<String, Object> existingJson;
+      final Map<String, ?> newStackJsonAsObject;
+      if (StringUtils.equals(property, ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY)) {
+        existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson, Map.class);
+        newStackJsonAsObject = gson.<Map<String, String>> fromJson(newStackDefaultJson, Map.class);
+      } else {
+        existingJson = gson.<Map<String, Object>> fromJson(existingPropertyJson,
+            Map.class);
+
+        newStackJsonAsObject = gson.<Map<String, Map<Object, Object>>> fromJson(newStackDefaultJson,
+            Map.class);
+      }
+
+      if (existingJson.keySet().contains(stackId.getStackName())) {
+        continue;
+      }
+
+      existingJson.put(stackId.getStackName(), newStackJsonAsObject.get(stackId.getStackName()));
+
+      String newJson = gson.toJson(existingJson);
+      updatedProperties.put(property, newJson);
+    }
+
+    if (!updatedProperties.isEmpty()) {
+      AmbariManagementController amc = getManagementController();
+      String serviceNote = String.format(
+          "Adding stack tools for %s while distributing a new repository", stackId.toString());
+
+      configHelper.updateConfigType(cluster, amc, clusterEnv.getType(), updatedProperties, null,
+          amc.getAuthName(), serviceNote);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 0dacb56..1130026 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -205,6 +205,12 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    */
   protected static final String UPGRADE_HOST_ORDERED_HOSTS = "Upgrade/host_order";
 
+  /**
+   * The role that will be used when creating HRC's for the type
+   * {@link StageWrapper.Type#UPGRADE_TASKS}.
+   */
+  protected static final String EXECUTE_TASK_ROLE = "ru_execute_tasks";
+
   /*
    * Lifted from RequestResourceProvider
    */
@@ -1327,6 +1333,32 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     }
   }
 
+  /**
+   * Creates an action stage using the {@link #EXECUTE_TASK_ROLE} custom action
+   * to execute some Python command.
+   *
+   * @param context
+   *          the upgrade context.
+   * @param request
+   *          the request object to add the stage to.
+   * @param effectiveStackId
+   *          the stack ID to use when generating content for the command. On
+   *          some upgrade types, this may change during the course of the
+   *          upgrade orchestration. An express upgrade changes this after
+   *          stopping all services.
+   * @param entity
+   *          the upgrade entity to set the stage information on
+   * @param wrapper
+   *          the stage wrapper containing information to generate the stage.
+   * @param skippable
+   *          {@code true} to mark the stage as being skippable if a failure
+   *          occurs.
+   * @param supportsAutoSkipOnFailure
+   *          {@code true} to automatically skip on a failure.
+   * @param allowRetry
+   *          {@code true} to be able to retry the failed stage.
+   * @throws AmbariException
+   */
   private void makeActionStage(UpgradeContext context, RequestStageContainer request,
       StackId effectiveStackId, UpgradeItemEntity entity, StageWrapper wrapper, boolean skippable,
       boolean supportsAutoSkipOnFailure, boolean allowRetry) throws AmbariException {
@@ -1356,21 +1388,22 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         && wrapper.getTasks().get(0).getService() != null) {
 
       AmbariMetaInfo ambariMetaInfo = s_metaProvider.get();
-      StackId stackId = context.getTargetStackId();
 
-      StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(),
-          stackId.getStackVersion());
+      StackInfo stackInfo = ambariMetaInfo.getStack(effectiveStackId.getStackName(),
+          effectiveStackId.getStackVersion());
 
       String serviceName = wrapper.getTasks().get(0).getService();
-      ServiceInfo serviceInfo = ambariMetaInfo.getService(stackId.getStackName(),
-          stackId.getStackVersion(), serviceName);
+      ServiceInfo serviceInfo = ambariMetaInfo.getService(effectiveStackId.getStackName(),
+          effectiveStackId.getStackVersion(), serviceName);
 
       params.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
       params.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
     }
 
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
-        "ru_execute_tasks", Collections.singletonList(filter), params);
+        EXECUTE_TASK_ROLE, Collections.singletonList(filter), params);
+
+    actionContext.setStackId(effectiveStackId);
 
     // hosts in maintenance mode are excluded from the upgrade
     actionContext.setMaintenanceModeHostExcluded(true);
@@ -1464,6 +1497,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
 
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         function, filters, commandParams);
+
+    actionContext.setStackId(effectiveStackId);
+
     actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isComponentFailureAutoSkipped());
@@ -1523,6 +1559,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
         "SERVICE_CHECK", filters, commandParams);
 
+    actionContext.setStackId(effectiveStackId);
     actionContext.setTimeout(wrapper.getMaxTimeout(s_configuration));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isServiceCheckFailureAutoSkipped());
@@ -1665,6 +1702,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         Role.AMBARI_SERVER_ACTION.toString(), Collections.<RequestResourceFilter> emptyList(),
         commandParams);
 
+    actionContext.setStackId(effectiveStackId);
     actionContext.setTimeout(Short.valueOf((short) -1));
     actionContext.setRetryAllowed(allowRetry);
     actionContext.setAutoSkipFailures(context.isComponentFailureAutoSkipped());

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 13114dd..ab8026c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -31,7 +31,6 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 
-import com.google.common.base.Objects;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
@@ -46,6 +45,7 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Objects;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.collect.Maps;
@@ -89,8 +89,10 @@ public class ConfigHelper {
   public static final String CLUSTER_ENV_RETRY_COMMANDS = "commands_to_retry";
   public static final String CLUSTER_ENV_RETRY_MAX_TIME_IN_SEC = "command_retry_max_time_in_sec";
   public static final String COMMAND_RETRY_MAX_TIME_IN_SEC_DEFAULT = "600";
+  public static final String CLUSTER_ENV_STACK_NAME_PROPERTY = "stack_name";
   public static final String CLUSTER_ENV_STACK_FEATURES_PROPERTY = "stack_features";
   public static final String CLUSTER_ENV_STACK_TOOLS_PROPERTY = "stack_tools";
+  public static final String CLUSTER_ENV_STACK_ROOT_PROPERTY = "stack_root";
 
   public static final String HTTP_ONLY = "HTTP_ONLY";
   public static final String HTTPS_ONLY = "HTTPS_ONLY";
@@ -652,7 +654,7 @@ public class ConfigHelper {
     }
 
     for (Service service : cluster.getServices().values()) {
-      Set<PropertyInfo> serviceProperties = new HashSet<PropertyInfo>(servicesMap.get(service.getName()).getProperties());
+      Set<PropertyInfo> serviceProperties = new HashSet<>(servicesMap.get(service.getName()).getProperties());
       for (PropertyInfo serviceProperty : serviceProperties) {
         if (serviceProperty.getPropertyTypes().contains(propertyType)) {
           String stackPropertyConfigType = fileNameToConfigType(serviceProperty.getFilename());
@@ -907,13 +909,16 @@ public class ConfigHelper {
     return properties;
   }
 
-  public Set<PropertyInfo> getStackProperties(Cluster cluster) throws AmbariException {
-    StackId stackId = cluster.getCurrentStackVersion();
+  public Set<PropertyInfo> getStackProperties(StackId stackId) throws AmbariException {
     StackInfo stack = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
-
     return ambariMetaInfo.getStackProperties(stack.getName(), stack.getVersion());
   }
 
+  public Set<PropertyInfo> getStackProperties(Cluster cluster) throws AmbariException {
+    StackId stackId = cluster.getCurrentStackVersion();
+    return getStackProperties(stackId);
+  }
+
   /**
    * A helper method to create a new {@link Config} for a given configuration
    * type and updates to the current values, if any. This method will perform the following tasks:
@@ -1128,6 +1133,38 @@ public class ConfigHelper {
   }
 
   /**
+   * Gets the default properties from the specified stack when a cluster is
+   * first installed.
+   *
+   * @param stack
+   *          the stack to pull stack-values from (not {@code null})
+   * @return a mapping of configuration type to map of key/value pairs for the
+   *         default configurations.
+   * @throws AmbariException
+   */
+  public Map<String, Map<String, String>> getDefaultStackProperties(StackId stack)
+      throws AmbariException {
+    Map<String, Map<String, String>> defaultPropertiesByType = new HashMap<>();
+
+    // populate the stack (non-service related) properties
+    Set<org.apache.ambari.server.state.PropertyInfo> stackConfigurationProperties = ambariMetaInfo.getStackProperties(
+        stack.getStackName(), stack.getStackVersion());
+
+    for (PropertyInfo stackDefaultProperty : stackConfigurationProperties) {
+      String type = ConfigHelper.fileNameToConfigType(stackDefaultProperty.getFilename());
+
+      if (!defaultPropertiesByType.containsKey(type)) {
+        defaultPropertiesByType.put(type, new HashMap<String, String>());
+      }
+
+      defaultPropertiesByType.get(type).put(stackDefaultProperty.getName(),
+          stackDefaultProperty.getValue());
+    }
+
+    return defaultPropertiesByType;
+  }
+
+  /**
    * Gets the default properties from the specified stack and services when a
    * cluster is first installed.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 106d7c8..a2c0b9b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -69,6 +69,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
@@ -79,6 +80,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
+import com.google.inject.Provider;
 
 
 /**
@@ -99,6 +101,12 @@ public class AmbariContext {
   @Inject
   ConfigFactory configFactory;
 
+  /**
+   * Used for getting configuration property values from stack and services.
+   */
+  @Inject
+  private Provider<ConfigHelper> configHelper;
+
   private static AmbariManagementController controller;
   private static ClusterController clusterController;
   //todo: task id's.  Use existing mechanism for getting next task id sequence
@@ -205,8 +213,8 @@ public class AmbariContext {
     } catch (AmbariException e) {
       throw new RuntimeException("Failed to persist service and component resources: " + e, e);
     }
-    Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
-    Set<ServiceComponentRequest> componentRequests = new HashSet<ServiceComponentRequest>();
+    Set<ServiceRequest> serviceRequests = new HashSet<>();
+    Set<ServiceComponentRequest> componentRequests = new HashSet<>();
     for (String service : services) {
       String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
       serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
@@ -223,13 +231,13 @@ public class AmbariContext {
     }
     // set all services state to INSTALLED->STARTED
     // this is required so the user can start failed services at the service level
-    Map<String, Object> installProps = new HashMap<String, Object>();
+    Map<String, Object> installProps = new HashMap<>();
     installProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "INSTALLED");
     installProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Map<String, Object> startProps = new HashMap<String, Object>();
+    Map<String, Object> startProps = new HashMap<>();
     startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
     startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    Predicate predicate = new EqualsPredicate<String>(
+    Predicate predicate = new EqualsPredicate<>(
         ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
     try {
       getServiceResourceProvider().updateResources(
@@ -262,7 +270,7 @@ public class AmbariContext {
     }
     String clusterName = cluster.getClusterName();
 
-    Map<String, Object> properties = new HashMap<String, Object>();
+    Map<String, Object> properties = new HashMap<>();
     properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
     properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, hostName);
     properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, host.getRackInfo());
@@ -275,7 +283,7 @@ public class AmbariContext {
           hostName, e.toString()), e);
     }
 
-    final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
+    final Set<ServiceComponentHostRequest> requests = new HashSet<>();
 
     for (Map.Entry<String, Collection<String>> entry : components.entrySet()) {
       String service = entry.getKey();
@@ -589,7 +597,7 @@ public class AmbariContext {
    * and the hosts associated with the host group are assigned to the config group.
    */
   private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String groupName) throws AmbariException {
-    Map<String, Map<String, Config>> groupConfigs = new HashMap<String, Map<String, Config>>();
+    Map<String, Map<String, Config>> groupConfigs = new HashMap<>();
     Stack stack = topology.getBlueprint().getStack();
 
     // get the host-group config with cluster creation template overrides
@@ -608,7 +616,7 @@ public class AmbariContext {
       //todo: attributes
       Map<String, Config> serviceConfigs = groupConfigs.get(service);
       if (serviceConfigs == null) {
-        serviceConfigs = new HashMap<String, Config>();
+        serviceConfigs = new HashMap<>();
         groupConfigs.put(service, serviceConfigs);
       }
       serviceConfigs.put(type, config);
@@ -669,6 +677,16 @@ public class AmbariContext {
     return String.format("%s:%s", bpName, hostGroupName);
   }
 
+  /**
+   * Gets an instance of {@link ConfigHelper} for classes which are not
+   * dependency injected.
+   *
+   * @return a {@link ConfigHelper} instance.
+   */
+  public ConfigHelper getConfigHelper() {
+    return configHelper.get();
+  }
+
   private synchronized HostResourceProvider getHostResourceProvider() {
     if (hostResourceProvider == null) {
       hostResourceProvider = (HostResourceProvider)

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 0ab8180..e7764b8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -18,10 +18,20 @@
 package org.apache.ambari.server.upgrade;
 
 import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
+
+import com.google.common.collect.Sets;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 
@@ -42,6 +52,8 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
   private static final String UPGRADE_ITEM_TABLE = "upgrade_item";
   private static final String UPGRADE_ID_COLUMN = "upgrade_id";
 
+  private static final String CLUSTER_ENV = "cluster-env";
+
   /**
    * Constructor.
    *
@@ -89,6 +101,7 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
+    resetStackToolsAndFeatures();
   }
 
   /**
@@ -135,4 +148,52 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
     dbAccessor.addFKConstraint(UPGRADE_TABLE, "FK_upgrade_to_repo_id",
         UPGRADE_TABLE_FROM_REPO_COLUMN, "repo_version", "repo_version_id", false);
   }
+
+  /**
+   * Resets the following properties in {@code cluster-env} to their new
+   * defaults:
+   * <ul>
+   * <li>stack_root
+   * <li>stack_tools
+   * <li>stack_features
+   * <ul>
+   *
+   * @throws AmbariException
+   */
+  private void resetStackToolsAndFeatures() throws AmbariException {
+    Set<String> propertiesToReset = Sets.newHashSet("stack_tools", "stack_features", "stack_root");
+
+    Clusters clusters = injector.getInstance(Clusters.class);
+    ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
+
+    Map<String, Cluster> clusterMap = clusters.getClusters();
+    for (Cluster cluster : clusterMap.values()) {
+      Config clusterEnv = cluster.getDesiredConfigByType(CLUSTER_ENV);
+      if (null == clusterEnv) {
+        continue;
+      }
+
+      Map<String, String> newStackProperties = new HashMap<>();
+      Set<PropertyInfo> stackProperties = configHelper.getStackProperties(cluster);
+      if (null == stackProperties) {
+        continue;
+      }
+
+      for (PropertyInfo propertyInfo : stackProperties) {
+        String fileName = propertyInfo.getFilename();
+        if (StringUtils.isEmpty(fileName)) {
+          continue;
+        }
+
+        if (StringUtils.equals(ConfigHelper.fileNameToConfigType(fileName), CLUSTER_ENV)) {
+          String stackPropertyName = propertyInfo.getName();
+          if (propertiesToReset.contains(stackPropertyName)) {
+            newStackProperties.put(stackPropertyName, propertyInfo.getValue());
+          }
+        }
+      }
+
+      updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, newStackProperties, true, false);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
index 32df7d3..5b4fd68 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
@@ -27,6 +27,7 @@ import logging
 from resource_management.core import global_lock
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_tools
 from resource_management.core.resources import Execute
 from resource_management.core.signal_utils import TerminateStrategy
 from ambari_commons.os_check import OSConst
@@ -56,6 +57,7 @@ SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
 SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
 SMOKEUSER_DEFAULT = 'ambari-qa'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 
 HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
@@ -78,7 +80,7 @@ def get_tokens():
   """
   return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
     HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-    STACK_ROOT)
+    STACK_NAME, STACK_ROOT)
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def get_tokens():
@@ -175,9 +177,10 @@ def execute(configurations={}, parameters={}, host_name=None):
     bin_dir = HIVE_BIN_DIR_LEGACY
 
 
-    if STACK_ROOT in configurations:
-      hive_conf_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/conf")
-      hive_bin_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/bin")
+    if STACK_NAME in configurations and STACK_ROOT in configurations:
+      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
+      hive_conf_dir = stack_root + format("/current/hive-metastore/conf")
+      hive_bin_dir = stack_root + format("/current/hive-metastore/bin")
 
       if os.path.exists(hive_conf_dir):
         conf_dir = hive_conf_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/6a3bfd5d/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
index 98d1899..e46c896 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
@@ -26,7 +26,7 @@ import subprocess
 
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
-from ambari_commons.os_check import OSConst
+from resource_management.libraries.functions import stack_tools
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from resource_management.core import shell
 from resource_management.core.resources import Execute
@@ -58,6 +58,7 @@ HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
 HIVE_USER_KEY = '{{hive-env/hive_user}}'
 HIVE_USER_DEFAULT = 'default.smoke.user'
 
+STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 STACK_ROOT_DEFAULT = Script.get_stack_root()
 
@@ -88,7 +89,7 @@ def get_tokens():
   to build the dictionary passed into execute
   """
   return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_PRINCIPAL_KEY, HIVE_PRINCIPAL_KEYTAB_KEY,
-          HIVE_USER_KEY, STACK_ROOT, LLAP_APP_NAME_KEY)
+          HIVE_USER_KEY, STACK_NAME, STACK_ROOT, LLAP_APP_NAME_KEY)
 
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -159,8 +160,11 @@ def execute(configurations={}, parameters={}, host_name=None):
 
 
     start_time = time.time()
-    if STACK_ROOT in configurations:
-      llap_status_cmd = configurations[STACK_ROOT] + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
+    if STACK_NAME in configurations and STACK_ROOT in configurations:
+      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME],
+        configurations[STACK_ROOT])
+
+      llap_status_cmd = stack_root + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
     else:
       llap_status_cmd = STACK_ROOT_DEFAULT + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")