You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/11 16:25:51 UTC

[18/50] [abbrv] ambari git commit: Revert "AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)"

Revert "AMBARI-21430 - Allow Multiple Versions of Stack Tools to Co-Exist (jonathanhurley)"

This reverts commit db5501028767b779cf100e640146910a5cfdc60a.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a0aca4f1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a0aca4f1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a0aca4f1

Branch: refs/heads/branch-2.5
Commit: a0aca4f1cb45343297f63c0ef4c02d8e74afa8d0
Parents: db55010
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Jul 7 14:35:06 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Jul 7 14:35:06 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_features.py       |  13 -
 .../libraries/functions/stack_tools.py          |  39 -
 .../libraries/script/script.py                  |  19 +-
 .../ClusterStackVersionResourceProvider.java    | 157 +---
 .../ambari/server/state/ConfigHelper.java       |   5 +-
 .../server/upgrade/UpgradeCatalog252.java       |  63 +-
 .../package/alerts/alert_hive_metastore.py      |  11 +-
 .../package/alerts/alert_llap_app_status.py     |  12 +-
 .../package/alerts/alert_check_oozie_server.py  |   8 +-
 .../resources/host_scripts/alert_disk_space.py  |  10 +-
 .../host_scripts/alert_version_select.py        |   8 +-
 .../4.0/configuration/cluster-env.xml           |  19 +-
 .../4.0/properties/stack_features.json          | 422 +++++----
 .../BigInsights/4.0/properties/stack_tools.json |  14 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     |  16 +-
 .../HDP/2.0.6/properties/stack_features.json    | 852 +++++++++----------
 .../HDP/2.0.6/properties/stack_tools.json       |  16 +-
 .../PERF/1.0/configuration/cluster-env.xml      |  16 +-
 .../PERF/1.0/properties/stack_features.json     |  38 +-
 .../stacks/PERF/1.0/properties/stack_tools.json |  16 +-
 20 files changed, 718 insertions(+), 1036 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index 7811e26..2b3df5f 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -42,12 +42,6 @@ def check_stack_feature(stack_feature, stack_version):
 
   from resource_management.libraries.functions.default import default
   from resource_management.libraries.functions.version import compare_versions
-
-  stack_name = default("/hostLevelParams/stack_name", None)
-  if stack_name is None:
-    Logger.warning("Cannot find the stack name in the command. Stack features cannot be loaded")
-    return False
-
   stack_features_config = default("/configurations/cluster-env/stack_features", None)
 
   if not stack_version:
@@ -56,13 +50,6 @@ def check_stack_feature(stack_feature, stack_version):
 
   if stack_features_config:
     data = json.loads(stack_features_config)
-
-    if stack_name not in data:
-      Logger.warning("Cannot find stack features for the stack named {0}".format(stack_name))
-      return False
-
-    data = data[stack_name]
-
     for feature in data["stack_features"]:
       if feature["name"] == stack_feature:
         if "min_version" in feature:

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 420ae11..02ae62d 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -39,33 +39,15 @@ def get_stack_tool(name):
   :return: tool_name, tool_path, tool_package
   """
   from resource_management.libraries.functions.default import default
-
-  stack_name = default("/hostLevelParams/stack_name", None)
-  if stack_name is None:
-    Logger.warning("Cannot find the stack name in the command. Stack tools cannot be loaded")
-    return (None, None, None)
-
   stack_tools = None
   stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
   if stack_tools_config:
     stack_tools = json.loads(stack_tools_config)
 
-  if stack_tools is None:
-    Logger.warning("The stack tools could not be found in cluster-env")
-    return (None, None, None)
-
-  if stack_name not in stack_tools:
-    Logger.warning("Cannot find stack tools for the stack named {0}".format(stack_name))
-    return (None, None, None)
-
-  # load the stack tooks keyed by the stack name
-  stack_tools = stack_tools[stack_name]
-
   if not stack_tools or not name or name.lower() not in stack_tools:
     Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))
     return (None, None, None)
 
-
   tool_config = stack_tools[name.lower()]
 
   # Return fixed length (tool_name, tool_path tool_package) tuple
@@ -99,24 +81,3 @@ def get_stack_tool_package(name):
   """
   (tool_name, tool_path, tool_package) = get_stack_tool(name)
   return tool_package
-
-
-def get_stack_root(stack_name, stack_root_json):
-  """
-  Get the stack-specific install root directory from the raw, JSON-escaped properties.
-  :param stack_name:
-  :param stack_root_json:
-  :return: stack_root
-  """
-  from resource_management.libraries.functions.default import default
-
-  if stack_root_json is None:
-    return "/usr/{0}".format(stack_name.lower())
-
-  stack_root = json.loads(stack_root_json)
-
-  if stack_name not in stack_root:
-    Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
-    return "/usr/{0}".format(stack_name.lower())
-
-  return stack_root[stack_name]

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 0df6900..04928de 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -553,11 +553,7 @@ class Script(object):
     :return: a stack name or None
     """
     from resource_management.libraries.functions.default import default
-    stack_name = default("/hostLevelParams/stack_name", None)
-    if stack_name is None:
-      stack_name = default("/configurations/cluster-env/stack_name", "HDP")
-
-    return stack_name
+    return default("/hostLevelParams/stack_name", "HDP")
 
   @staticmethod
   def get_stack_root():
@@ -567,18 +563,7 @@ class Script(object):
     """
     from resource_management.libraries.functions.default import default
     stack_name = Script.get_stack_name()
-    stack_root_json = default("/configurations/cluster-env/stack_root", None)
-
-    if stack_root_json is None:
-      return "/usr/{0}".format(stack_name.lower())
-
-    stack_root = json.loads(stack_root_json)
-
-    if stack_name not in stack_root:
-      Logger.warning("Cannot determine stack root for stack named {0}".format(stack_name))
-      return "/usr/{0}".format(stack_name.lower())
-
-    return stack_root[stack_name]
+    return default("/configurations/cluster-env/stack_root", "/usr/{0}".format(stack_name.lower()))
 
   @staticmethod
   def get_stack_version():

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 0300672..64e0b14 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -76,8 +76,6 @@ import org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.RepositoryVersionState;
@@ -197,13 +195,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   private static HostComponentStateDAO hostComponentStateDAO;
 
   /**
-   * Used for updating the existing stack tools with those of the stack being
-   * distributed.
-   */
-  @Inject
-  private static ConfigHelper configHelper;
-
-  /**
    * We have to include such a hack here, because if we
    * make finalizeUpgradeAction field static and request injection
    * for it, there will be a circle dependency error
@@ -225,11 +216,11 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   @Override
   public Set<Resource> getResourcesAuthorized(Request request, Predicate predicate) throws
       SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
-    final Set<Resource> resources = new HashSet<>();
+    final Set<Resource> resources = new HashSet<Resource>();
     final Set<String> requestedIds = getRequestPropertyIds(request, predicate);
     final Set<Map<String, Object>> propertyMaps = getPropertyMaps(predicate);
 
-    List<ClusterVersionEntity> requestedEntities = new ArrayList<>();
+    List<ClusterVersionEntity> requestedEntities = new ArrayList<ClusterVersionEntity>();
     for (Map<String, Object> propertyMap: propertyMaps) {
       final String clusterName = propertyMap.get(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID).toString();
       final Long id;
@@ -253,7 +244,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     for (ClusterVersionEntity entity: requestedEntities) {
       final Resource resource = new ResourceImpl(Resource.Type.ClusterStackVersion);
 
-      final Map<String, List<String>> hostStates = new HashMap<>();
+      final Map<String, List<String>> hostStates = new HashMap<String, List<String>>();
       for (RepositoryVersionState state: RepositoryVersionState.values()) {
         hostStates.put(state.name(), new ArrayList<String>());
       }
@@ -304,10 +295,12 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     String clName;
     final String desiredRepoVersion;
+    String stackName;
+    String stackVersion;
 
     Map<String, Object> propertyMap = iterator.next();
 
-    Set<String> requiredProperties = new HashSet<>();
+    Set<String> requiredProperties = new HashSet<String>();
     requiredProperties.add(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
     requiredProperties.add(CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID);
     requiredProperties.add(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
@@ -342,29 +335,19 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
           cluster.getClusterName(), entity.getDirection().getText(false)));
     }
 
-    String stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
-    String stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
-    if (StringUtils.isBlank(stackName) || StringUtils.isBlank(stackVersion)) {
-      String message = String.format(
-          "Both the %s and %s properties are required when distributing a new stack",
-          CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
-
-      throw new SystemException(message);
-    }
-
-    final StackId stackId = new StackId(stackName, stackVersion);
-
-    if (!ami.isSupportedStack(stackName, stackVersion)) {
-      throw new NoSuchParentResourceException(String.format("Stack %s is not supported", stackId));
-    }
-
-    // bootstrap the stack tools if necessary for the stack which is being
-    // distributed
-    try {
-      bootstrapStackTools(stackId, cluster);
-    } catch (AmbariException ambariException) {
-      throw new SystemException("Unable to modify stack tools for new stack being distributed",
-          ambariException);
+    final StackId stackId;
+    if (propertyMap.containsKey(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID) &&
+            propertyMap.containsKey(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID)) {
+      stackName = (String) propertyMap.get(CLUSTER_STACK_VERSION_STACK_PROPERTY_ID);
+      stackVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID);
+      stackId = new StackId(stackName, stackVersion);
+      if (! ami.isSupportedStack(stackName, stackVersion)) {
+        throw new NoSuchParentResourceException(String.format("Stack %s is not supported",
+                stackId));
+      }
+    } else { // Using stack that is current for cluster
+      StackId currentStackVersion = cluster.getCurrentStackVersion();
+      stackId = currentStackVersion;
     }
 
     RepositoryVersionEntity repoVersionEnt = repositoryVersionDAO.findByStackAndVersion(
@@ -508,7 +491,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     // build the list of OS repos
     List<OperatingSystemEntity> operatingSystems = repoVersionEnt.getOperatingSystems();
-    Map<String, List<RepositoryEntity>> perOsRepos = new HashMap<>();
+    Map<String, List<RepositoryEntity>> perOsRepos = new HashMap<String, List<RepositoryEntity>>();
     for (OperatingSystemEntity operatingSystem : operatingSystems) {
 
       if (operatingSystem.isAmbariManagedRepos()) {
@@ -521,7 +504,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     RequestStageContainer req = createRequest();
 
     Iterator<Host> hostIterator = hosts.iterator();
-    Map<String, String> hostLevelParams = new HashMap<>();
+    Map<String, String> hostLevelParams = new HashMap<String, String>();
     hostLevelParams.put(JDK_LOCATION, getManagementController().getJdkResourceUrl());
     String hostParamsJson = StageUtils.getGson().toJson(hostLevelParams);
 
@@ -555,7 +538,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
     boolean hasStage = false;
 
-    ArrayList<Stage> stages = new ArrayList<>(batchCount);
+    ArrayList<Stage> stages = new ArrayList<Stage>(batchCount);
     for (int batchId = 1; batchId <= batchCount; batchId++) {
       // Create next stage
       String stageName;
@@ -635,8 +618,8 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
 
     // determine packages for all services that are installed on host
-    List<ServiceOsSpecific.Package> packages = new ArrayList<>();
-    Set<String> servicesOnHost = new HashSet<>();
+    List<ServiceOsSpecific.Package> packages = new ArrayList<ServiceOsSpecific.Package>();
+    Set<String> servicesOnHost = new HashSet<String>();
     List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
     for (ServiceComponentHost component : components) {
       if (repoServices.isEmpty() || repoServices.contains(component.getServiceName())) {
@@ -687,7 +670,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     final String packageList = gson.toJson(packages);
     final String repoList = gson.toJson(repoInfo);
 
-    Map<String, String> params = new HashMap<>();
+    Map<String, String> params = new HashMap<String, String>();
     params.put("stack_id", stackId.getStackId());
     params.put("repository_version", repoVersion.getVersion());
     params.put("base_urls", repoList);
@@ -804,7 +787,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
       }
       Map<String, Object> propertyMap = iterator.next();
 
-      Set<String> requiredProperties = new HashSet<>();
+      Set<String> requiredProperties = new HashSet<String>();
       requiredProperties.add(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
       requiredProperties.add(CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID);
       requiredProperties.add(CLUSTER_STACK_VERSION_STATE_PROPERTY_ID);
@@ -843,7 +826,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
       }
 
       if (!force) {
-        Map<String, String> args = new HashMap<>();
+        Map<String, String> args = new HashMap<String, String>();
         if (newStateStr.equals(RepositoryVersionState.CURRENT.toString())) {
           // Finalize upgrade workflow
           args.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
@@ -858,7 +841,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
         // Get a host name to populate the hostrolecommand table's hostEntity.
         String defaultHostName;
-        ArrayList<Host> hosts = new ArrayList<>(cluster.getHosts());
+        ArrayList<Host> hosts = new ArrayList<Host>(cluster.getHosts());
         if (!hosts.isEmpty()) {
           Collections.sort(hosts);
           defaultHostName = hosts.get(0).getHostName();
@@ -993,88 +976,4 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
   }
 
 
-  /**
-   * Ensures that the stack tools and stack features are set on
-   * {@link ConfigHelper#CLUSTER_ENV} for the stack of the repository being
-   * distributed.
-   * <p/>
-   * This step ensures that the new repository can be distributed with the
-   * correct tools.
-   *
-   * @param stackId
-   * @param cluster
-   * @throws AmbariException
-   */
-  private void bootstrapStackTools(StackId stackId, Cluster cluster) throws AmbariException {
-    // if the stack name is the same as the cluster's current stack name, then
-    // there's no work to do
-    if (StringUtils.equals(stackId.getStackName(),
-        cluster.getCurrentStackVersion().getStackName())) {
-      return;
-    }
-
-    // get the stack tools/features for the stack being distributed
-    Map<String, Map<String, String>> defaultStackConfigurationsByType = configHelper.getDefaultProperties(
-        stackId, cluster);
-
-    Map<String, String> clusterEnvDefaults = defaultStackConfigurationsByType.get(
-        ConfigHelper.CLUSTER_ENV);
-
-    Config clusterEnv = cluster.getDesiredConfigByType(ConfigHelper.CLUSTER_ENV);
-    Map<String, String> clusterEnvProperties = clusterEnv.getProperties();
-
-    // the 3 properties we need to check and update
-    Set<String> properties = Sets.newHashSet(ConfigHelper.CLUSTER_ENV_STACK_ROOT_PROPERTY,
-        ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY,
-        ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY);
-
-    // any updates are stored here and merged into the existing config type
-    Map<String, String> updatedProperties = new HashMap<>();
-
-    for (String property : properties) {
-      // determine if the property exists in the stack being distributed (it
-      // kind of has to, but we'll be safe if it's not found)
-      String newStackDefaultJson = clusterEnvDefaults.get(property);
-      if (StringUtils.isBlank(newStackDefaultJson)) {
-        continue;
-      }
-
-      String existingPropertyJson = clusterEnvProperties.get(property);
-
-      // if the stack tools/features property doesn't exist, then just set the
-      // one from the new stack
-      if (StringUtils.isBlank(existingPropertyJson)) {
-        updatedProperties.put(property, newStackDefaultJson);
-        continue;
-      }
-
-      // now is the hard part - we need to check to see if the new stack tools
-      // exists alongside the current tools and if it doesn't, then add the new
-      // tools in
-      Map<String, Map<Object, Object>> existingJson = gson.<Map<String, Map<Object, Object>>> fromJson(
-          existingPropertyJson, Map.class);
-
-      if (existingJson.keySet().contains(stackId.getStackName())) {
-        continue;
-      }
-
-      Map<String, Map<Object, Object>> newStackJsonAsObject = gson.<Map<String, Map<Object, Object>>> fromJson(
-          newStackDefaultJson, Map.class);
-
-      existingJson.put(stackId.getStackName(), newStackJsonAsObject.get(stackId.getStackName()));
-
-      String newJson = gson.toJson(existingJson);
-      updatedProperties.put(property, newJson);
-    }
-
-    if (!updatedProperties.isEmpty()) {
-      AmbariManagementController amc = getManagementController();
-      String serviceNote = String.format(
-          "Adding stack tools for %s while distributing a new repository", stackId.toString());
-
-      configHelper.updateConfigType(cluster, amc, clusterEnv.getType(), updatedProperties, null,
-          amc.getAuthName(), serviceNote);
-    }
-
-  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 646e90c..13114dd 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -31,6 +31,7 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 
+import com.google.common.base.Objects;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
@@ -45,7 +46,6 @@ import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Objects;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.collect.Maps;
@@ -91,7 +91,6 @@ public class ConfigHelper {
   public static final String COMMAND_RETRY_MAX_TIME_IN_SEC_DEFAULT = "600";
   public static final String CLUSTER_ENV_STACK_FEATURES_PROPERTY = "stack_features";
   public static final String CLUSTER_ENV_STACK_TOOLS_PROPERTY = "stack_tools";
-  public static final String CLUSTER_ENV_STACK_ROOT_PROPERTY = "stack_root";
 
   public static final String HTTP_ONLY = "HTTP_ONLY";
   public static final String HTTPS_ONLY = "HTTPS_ONLY";
@@ -653,7 +652,7 @@ public class ConfigHelper {
     }
 
     for (Service service : cluster.getServices().values()) {
-      Set<PropertyInfo> serviceProperties = new HashSet<>(servicesMap.get(service.getName()).getProperties());
+      Set<PropertyInfo> serviceProperties = new HashSet<PropertyInfo>(servicesMap.get(service.getName()).getProperties());
       for (PropertyInfo serviceProperty : serviceProperties) {
         if (serviceProperty.getPropertyTypes().contains(propertyType)) {
           String stackPropertyConfigType = fileNameToConfigType(serviceProperty.getFilename());

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index e7764b8..0ab8180 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@ -18,20 +18,10 @@
 package org.apache.ambari.server.upgrade;
 
 import java.sql.SQLException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
-
-import com.google.common.collect.Sets;
+
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 
@@ -52,8 +42,6 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
   private static final String UPGRADE_ITEM_TABLE = "upgrade_item";
   private static final String UPGRADE_ID_COLUMN = "upgrade_id";
 
-  private static final String CLUSTER_ENV = "cluster-env";
-
   /**
    * Constructor.
    *
@@ -101,7 +89,6 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
-    resetStackToolsAndFeatures();
   }
 
   /**
@@ -148,52 +135,4 @@ public class UpgradeCatalog252 extends AbstractUpgradeCatalog {
     dbAccessor.addFKConstraint(UPGRADE_TABLE, "FK_upgrade_to_repo_id",
         UPGRADE_TABLE_FROM_REPO_COLUMN, "repo_version", "repo_version_id", false);
   }
-
-  /**
-   * Resets the following properties in {@code cluster-env} to their new
-   * defaults:
-   * <ul>
-   * <li>stack_root
-   * <li>stack_tools
-   * <li>stack_features
-   * <ul>
-   *
-   * @throws AmbariException
-   */
-  private void resetStackToolsAndFeatures() throws AmbariException {
-    Set<String> propertiesToReset = Sets.newHashSet("stack_tools", "stack_features", "stack_root");
-
-    Clusters clusters = injector.getInstance(Clusters.class);
-    ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
-
-    Map<String, Cluster> clusterMap = clusters.getClusters();
-    for (Cluster cluster : clusterMap.values()) {
-      Config clusterEnv = cluster.getDesiredConfigByType(CLUSTER_ENV);
-      if (null == clusterEnv) {
-        continue;
-      }
-
-      Map<String, String> newStackProperties = new HashMap<>();
-      Set<PropertyInfo> stackProperties = configHelper.getStackProperties(cluster);
-      if (null == stackProperties) {
-        continue;
-      }
-
-      for (PropertyInfo propertyInfo : stackProperties) {
-        String fileName = propertyInfo.getFilename();
-        if (StringUtils.isEmpty(fileName)) {
-          continue;
-        }
-
-        if (StringUtils.equals(ConfigHelper.fileNameToConfigType(fileName), CLUSTER_ENV)) {
-          String stackPropertyName = propertyInfo.getName();
-          if (propertiesToReset.contains(stackPropertyName)) {
-            newStackProperties.put(stackPropertyName, propertyInfo.getValue());
-          }
-        }
-      }
-
-      updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, newStackProperties, true, false);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
index 5b4fd68..32df7d3 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_hive_metastore.py
@@ -27,7 +27,6 @@ import logging
 from resource_management.core import global_lock
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions import stack_tools
 from resource_management.core.resources import Execute
 from resource_management.core.signal_utils import TerminateStrategy
 from ambari_commons.os_check import OSConst
@@ -57,7 +56,6 @@ SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
 SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
 SMOKEUSER_DEFAULT = 'ambari-qa'
 
-STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 
 HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
@@ -80,7 +78,7 @@ def get_tokens():
   """
   return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
     HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-    STACK_NAME, STACK_ROOT)
+    STACK_ROOT)
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def get_tokens():
@@ -177,10 +175,9 @@ def execute(configurations={}, parameters={}, host_name=None):
     bin_dir = HIVE_BIN_DIR_LEGACY
 
 
-    if STACK_NAME in configurations and STACK_ROOT in configurations:
-      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
-      hive_conf_dir = stack_root + format("/current/hive-metastore/conf")
-      hive_bin_dir = stack_root + format("/current/hive-metastore/bin")
+    if STACK_ROOT in configurations:
+      hive_conf_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/conf")
+      hive_bin_dir = configurations[STACK_ROOT] + format("/current/hive-metastore/bin")
 
       if os.path.exists(hive_conf_dir):
         conf_dir = hive_conf_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
index e46c896..98d1899 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_llap_app_status.py
@@ -26,7 +26,7 @@ import subprocess
 
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions import stack_tools
+from ambari_commons.os_check import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from resource_management.core import shell
 from resource_management.core.resources import Execute
@@ -58,7 +58,6 @@ HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
 HIVE_USER_KEY = '{{hive-env/hive_user}}'
 HIVE_USER_DEFAULT = 'default.smoke.user'
 
-STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 STACK_ROOT_DEFAULT = Script.get_stack_root()
 
@@ -89,7 +88,7 @@ def get_tokens():
   to build the dictionary passed into execute
   """
   return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_PRINCIPAL_KEY, HIVE_PRINCIPAL_KEYTAB_KEY,
-          HIVE_USER_KEY, STACK_NAME, STACK_ROOT, LLAP_APP_NAME_KEY)
+          HIVE_USER_KEY, STACK_ROOT, LLAP_APP_NAME_KEY)
 
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -160,11 +159,8 @@ def execute(configurations={}, parameters={}, host_name=None):
 
 
     start_time = time.time()
-    if STACK_NAME in configurations and STACK_ROOT in configurations:
-      stack_root = stack_tools.get_stack_root(configurations[STACK_NAME],
-        configurations[STACK_ROOT])
-
-      llap_status_cmd = stack_root + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
+    if STACK_ROOT in configurations:
+      llap_status_cmd = configurations[STACK_ROOT] + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name}  --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
     else:
       llap_status_cmd = STACK_ROOT_DEFAULT + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
index 54eef18..0e9fe74 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
@@ -26,7 +26,6 @@ from resource_management.core.resources import Execute
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions import get_klist_path
-from resource_management.libraries.functions import stack_tools
 from ambari_commons.os_check import OSConst, OSCheck
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from urlparse import urlparse
@@ -67,7 +66,6 @@ USER_PRINCIPAL_DEFAULT = 'oozie@EXAMPLE.COM'
 # default user
 USER_DEFAULT = 'oozie'
 
-STACK_NAME_KEY = '{{cluster-env/stack_name}}'
 STACK_ROOT_KEY = '{{cluster-env/stack_root}}'
 STACK_ROOT_DEFAULT = '/usr/hdp'
 
@@ -88,7 +86,7 @@ def get_tokens():
   to build the dictionary passed into execute
   """
   return (OOZIE_URL_KEY, USER_PRINCIPAL_KEY, SECURITY_ENABLED, USER_KEYTAB_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
-          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_NAME_KEY, STACK_ROOT_KEY)
+          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT, STACK_ROOT_KEY)
 
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def get_check_command(oozie_url, host_name, configurations):
@@ -160,8 +158,8 @@ def get_check_command(oozie_url, host_name, configurations, parameters, only_kin
 
   # Configure stack root
   stack_root = STACK_ROOT_DEFAULT
-  if STACK_NAME_KEY in configurations and STACK_ROOT_KEY in configurations:
-    stack_root = stack_tools.get_stack_root(configurations[STACK_NAME_KEY], configurations[STACK_ROOT_KEY]).lower()
+  if STACK_ROOT_KEY in configurations:
+    stack_root = configurations[STACK_ROOT_KEY].lower()
 
   # oozie configuration directory using a symlink
   oozie_config_directory = OOZIE_CONF_DIR.replace(STACK_ROOT_PATTERN, stack_root)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
index f3c6406..4c5834f 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
@@ -23,7 +23,6 @@ import os
 import platform
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from ambari_commons import OSConst
-from resource_management.libraries.functions import stack_tools
 
 DiskInfo = collections.namedtuple('DiskInfo', 'total used free path')
 
@@ -37,7 +36,6 @@ MIN_FREE_SPACE_DEFAULT = 5000000000L
 PERCENT_USED_WARNING_DEFAULT = 50
 PERCENT_USED_CRITICAL_DEFAULT = 80
 
-STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_ROOT = '{{cluster-env/stack_root}}'
 
 def get_tokens():
@@ -45,7 +43,7 @@ def get_tokens():
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (STACK_NAME, STACK_ROOT)
+  return (STACK_ROOT, )
 
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -66,10 +64,10 @@ def execute(configurations={}, parameters={}, host_name=None):
   if configurations is None:
     return (('UNKNOWN', ['There were no configurations supplied to the script.']))
 
-  if not STACK_NAME in configurations or not STACK_ROOT in configurations:
-    return (('STACK_ROOT', ['cluster-env/stack_name and cluster-env/stack_root are required']))
+  if not STACK_ROOT in configurations:
+    return (('STACK_ROOT', ['cluster-env/stack_root is not specified']))
 
-  path = stack_tools.get_stack_root(configurations[STACK_NAME], configurations[STACK_ROOT])
+  path = configurations[STACK_ROOT]
 
   try:
     disk_usage = _get_disk_usage(path)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/host_scripts/alert_version_select.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_version_select.py b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
index 49dee5b..f8755c9 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_version_select.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_version_select.py
@@ -31,7 +31,6 @@ RESULT_STATE_WARNING = 'WARNING'
 RESULT_STATE_CRITICAL = 'CRITICAL'
 RESULT_STATE_UNKNOWN = 'UNKNOWN'
 
-STACK_NAME = '{{cluster-env/stack_name}}'
 STACK_TOOLS = '{{cluster-env/stack_tools}}'
 
 
@@ -43,7 +42,7 @@ def get_tokens():
   Returns a tuple of tokens in the format {{site/property}} that will be used
   to build the dictionary passed into execute
   """
-  return (STACK_NAME, STACK_TOOLS)
+  return (STACK_TOOLS,)
 
 
 def execute(configurations={}, parameters={}, host_name=None):
@@ -66,10 +65,8 @@ def execute(configurations={}, parameters={}, host_name=None):
     if STACK_TOOLS not in configurations:
       return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(STACK_TOOLS)])
 
-    stack_name = Script.get_stack_name()
-
     # Of the form,
-    # { "HDP" : { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] } }
+    # { "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"], "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"] }
     stack_tools_str = configurations[STACK_TOOLS]
 
     if stack_tools_str is None:
@@ -78,7 +75,6 @@ def execute(configurations={}, parameters={}, host_name=None):
     distro_select = "unknown-distro-select"
     try:
       stack_tools = json.loads(stack_tools_str)
-      stack_tools = stack_tools[stack_name]
       distro_select = stack_tools["stack_selector"][0]
     except:
       pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
index 2d11ef3..93c7948 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
@@ -234,20 +234,7 @@ gpgcheck=0</value>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
-
-  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
-  <property>
-    <name>stack_name</name>
-    <value>BigInsights</value>
-    <description>The name of the stack.</description>
-    <value-attributes>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
+  
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
     <name>stack_tools</name>
@@ -280,8 +267,8 @@ gpgcheck=0</value>
   </property>
   <property>
     <name>stack_root</name>
-    <value>{"BigInsights":"/usr/iop"}</value>
-    <description>JSON which defines the stack root by stack name</description>  
+    <value>/usr/iop</value>
+    <description>Stack root folder</description>
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
index a6672e4..4627e73 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
@@ -1,214 +1,212 @@
 {
-  "BigInsights": {
-    "stack_features": [
-      {
-        "name": "snappy",
-        "description": "Snappy compressor/decompressor support",
-        "max_version": "4.0.0.0"
-      },
-      {
-        "name": "lzo",
-        "description": "LZO libraries support",
-        "min_version": "4.1.0.0"
-      },
-      {
-        "name": "express_upgrade",
-        "description": "Express upgrade support",
-        "min_version": "4.2.0.0"
-      },
-      {
-        "name": "rolling_upgrade",
-        "description": "Rolling upgrade support",
-        "min_version": "4.1.0.0"
-      },
-      {
-        "name": "config_versioning",
-        "description": "Configurable versions support",
-        "min_version": "4.1.0.0"
-      },
-      {
-        "name": "datanode_non_root",
-        "description": "DataNode running as non-root support (AMBARI-7615)",
-        "min_version": "4.1.0.0"
-      },
-      {
-        "name": "remove_ranger_hdfs_plugin_env",
-        "description": "HDFS removes Ranger env files (AMBARI-14299)",
-        "min_version": "4.2.0.0"
-      },
-      {
-        "name": "ranger",
-        "description": "Ranger Service support",
-        "min_version": "4.2.0.0"
-      },
-      {
-        "name": "ranger_tagsync_component",
-        "description": "Ranger Tagsync component support (AMBARI-14383)",
-        "min_version": "4.2.5.0"
-      },
-      {
-        "name": "phoenix",
-        "description": "Phoenix Service support",
-        "min_version": "4.2.0.0"
-      },
-      {
-        "name": "nfs",
-        "description": "NFS support",
-        "min_version": "4.1.0.0"
-      },
-      {
-        "name": "timeline_state_store",
-        "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-        "min_version": "4.0.0.0"
-      },
-      {
-        "name": "copy_tarball_to_hdfs",
-        "description": "Copy tarball to HDFS support (AMBARI-12113)",
-        "min_version": "4.1.0.0"
-      },
-      {
-        "name": "spark_16plus",
-        "description": "Spark 1.6+",
-        "min_version": "4.2.0.0"
-      },
-      {
-        "name": "spark_thriftserver",
-        "description": "Spark Thrift Server",
-        "min_version": "4.2.0.0"
-      },
-      {
-        "name": "create_kafka_broker_id",
-        "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-        "min_version": "4.0.0.0",
-        "max_version": "4.2.0.0"
-      },
-      {
-        "name": "kafka_listeners",
-        "description": "Kafka listeners (AMBARI-10984)",
-        "min_version": "4.2.0.0"
-      },
-      {
-        "name": "kafka_kerberos",
-        "description": "Kafka Kerberos support (AMBARI-10984)",
-        "min_version": "4.1.0.0"
-      },
-      {
-        "name": "ranger_usersync_non_root",
-        "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-        "min_version": "4.1.0.0"
-      },
-      {
-        "name": "ranger_audit_db_support",
-        "description": "Ranger Audit to DB support",
-        "min_version": "4.2.0.0",
-        "max_version": "4.2.0.0"
-      },
-      {
-        "name": "knox_versioned_data_dir",
-        "description": "Use versioned data dir for Knox (AMBARI-13164)",
-        "min_version": "4.2.0.0"
-      },
-      {
-        "name": "knox_sso_topology",
-        "description": "Knox SSO Topology support (AMBARI-13975)",
-        "min_version": "4.2.0.0"
-      },
-      {
-        "name": "oozie_admin_user",
-        "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-        "min_version": "4.0.0.0"
-      },
-      {
-        "name": "oozie_setup_shared_lib",
-        "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-        "min_version": "4.0.0.0"
-      },
-      {
-        "name": "oozie_host_kerberos",
-        "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-        "min_version": "4.0.0.0",
-        "max_version": "4.1.0.0"
-      },
-      {
-        "name": "hive_metastore_upgrade_schema",
-        "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-        "min_version": "4.1.0.0"
-      },
-      {
-        "name": "hive_server_interactive",
-        "description": "Hive server interactive support (AMBARI-15573)",
-        "min_version": "4.4.0.0"
-      },
-      {
-        "name": "hive_webhcat_specific_configs",
-        "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-        "min_version": "4.1.0.0"
-      },
-      {
-        "name": "hive_purge_table",
-        "description": "Hive purge table support (AMBARI-12260)",
-        "min_version": "4.1.0.0"
-      },
-      {
-        "name": "hive_server2_kerberized_env",
-        "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-        "max_version": "4.1.0.0"
-      },
-      {
-        "name": "hive_env_heapsize",
-        "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-        "min_version": "4.0.0.0"
-      },
-      {
-        "name": "ranger_kms_hsm_support",
-        "description": "Ranger KMS HSM support (AMBARI-15752)",
-        "min_version": "4.2.5.0"
-      },
-      {
-        "name": "ranger_log4j_support",
-        "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-        "min_version": "4.2.5.0"
-      },
-      {
-        "name": "ranger_kerberos_support",
-        "description": "Ranger Kerberos support",
-        "min_version": "4.2.5.0"
-      },
-      {
-        "name": "hive_metastore_site_support",
-        "description": "Hive Metastore site support",
-        "min_version": "4.2.5.0"
-      },
-      {
-        "name": "ranger_usersync_password_jceks",
-        "description": "Saving Ranger Usersync credentials in jceks",
-        "min_version": "4.2.5.0"
-      },
-      {
-        "name": "ranger_install_logsearch_client",
-        "description": "LogSearch Service support",
-        "min_version": "4.2.5.0"
-      },
-      {
-        "name": "hbase_home_directory",
-        "description": "Hbase home directory in HDFS needed for HBASE backup",
-        "min_version": "4.2.5.0"
-      },
-      {
-        "name": "spark_livy",
-        "description": "Livy as slave component of spark",
-        "min_version": "4.4.0.0"
-      },
-      {
-        "name": "ranger_pid_support",
-        "description": "Ranger Service support pid generation AMBARI-16756",
-        "min_version": "4.2.5.0"
-      },
-      {
-        "name": "ranger_kms_pid_support",
-        "description": "Ranger KMS Service support pid generation",
-        "min_version": "4.2.5.0"
-      }
-    ]
-  }
+  "stack_features": [
+    {
+      "name": "snappy",
+      "description": "Snappy compressor/decompressor support",
+      "max_version": "4.0.0.0"
+    },
+    {
+      "name": "lzo",
+      "description": "LZO libraries support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "express_upgrade",
+      "description": "Express upgrade support",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "rolling_upgrade",
+      "description": "Rolling upgrade support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "config_versioning",
+      "description": "Configurable versions support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "datanode_non_root",
+      "description": "DataNode running as non-root support (AMBARI-7615)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "remove_ranger_hdfs_plugin_env",
+      "description": "HDFS removes Ranger env files (AMBARI-14299)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "ranger",
+      "description": "Ranger Service support",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "ranger_tagsync_component",
+      "description": "Ranger Tagsync component support (AMBARI-14383)",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "phoenix",
+      "description": "Phoenix Service support",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "nfs",
+      "description": "NFS support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "timeline_state_store",
+      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "copy_tarball_to_hdfs",
+      "description": "Copy tarball to HDFS support (AMBARI-12113)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "spark_16plus",
+      "description": "Spark 1.6+",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "spark_thriftserver",
+      "description": "Spark Thrift Server",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "create_kafka_broker_id",
+      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+      "min_version": "4.0.0.0",
+      "max_version": "4.2.0.0"
+    },
+    {
+      "name": "kafka_listeners",
+      "description": "Kafka listeners (AMBARI-10984)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "kafka_kerberos",
+      "description": "Kafka Kerberos support (AMBARI-10984)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "ranger_usersync_non_root",
+      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "ranger_audit_db_support",
+      "description": "Ranger Audit to DB support",
+      "min_version": "4.2.0.0",
+      "max_version": "4.2.0.0"
+    },
+    {
+      "name": "knox_versioned_data_dir",
+      "description": "Use versioned data dir for Knox (AMBARI-13164)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "knox_sso_topology",
+      "description": "Knox SSO Topology support (AMBARI-13975)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "oozie_admin_user",
+      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "oozie_setup_shared_lib",
+      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "oozie_host_kerberos",
+      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+      "min_version": "4.0.0.0",
+      "max_version": "4.1.0.0"
+    },
+    {
+      "name": "hive_metastore_upgrade_schema",
+      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+      "min_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_server_interactive",
+      "description": "Hive server interactive support (AMBARI-15573)",
+      "min_version": "4.4.0.0"
+     },
+    {
+      "name": "hive_webhcat_specific_configs",
+      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+      "min_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_purge_table",
+      "description": "Hive purge table support (AMBARI-12260)",
+      "min_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_server2_kerberized_env",
+      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+      "max_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_env_heapsize",
+      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "ranger_kms_hsm_support",
+      "description": "Ranger KMS HSM support (AMBARI-15752)",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_log4j_support",
+      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_kerberos_support",
+      "description": "Ranger Kerberos support",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "hive_metastore_site_support",
+      "description": "Hive Metastore site support",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_usersync_password_jceks",
+      "description": "Saving Ranger Usersync credentials in jceks",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_install_logsearch_client",
+      "description": "LogSearch Service support",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "hbase_home_directory",
+      "description": "Hbase home directory in HDFS needed for HBASE backup",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "spark_livy",
+      "description": "Livy as slave component of spark",
+      "min_version": "4.4.0.0"
+    },
+    {
+      "name": "ranger_pid_support",
+      "description": "Ranger Service support pid generation AMBARI-16756",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_kms_pid_support",
+      "description": "Ranger KMS Service support pid generation",
+      "min_version": "4.2.5.0"
+    }
+  ]
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
index 92c9349..fdbbdf9 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
@@ -1,14 +1,4 @@
 {
-  "BigInsights": {
-    "stack_selector": [
-      "iop-select",
-      "/usr/bin/iop-select",
-      "iop-select"
-    ],
-    "conf_selector": [
-      "conf-select",
-      "/usr/bin/conf-select",
-      "conf-select"
-    ]
-  }
+  "stack_selector": ["iop-select", "/usr/bin/iop-select", "iop-select"],
+  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index c6b091d..a79e904 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -220,18 +220,6 @@ gpgcheck=0</value>
     </value-attributes>
     <on-ambari-upgrade add="true"/>
   </property>
-  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
-  <property>
-    <name>stack_name</name>
-    <value>HDP</value>
-    <description>The name of the stack.</description>
-    <value-attributes>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
     <name>stack_tools</name>
@@ -264,8 +252,8 @@ gpgcheck=0</value>
   </property>
   <property>
     <name>stack_root</name>
-    <value>{"HDP":"/usr/hdp"}</value>
-    <description>JSON which defines the stack root by stack name</description>
+    <value>/usr/hdp</value>
+    <description>Stack root folder</description>
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
index 31cf0c8..878645b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_features.json
@@ -1,429 +1,427 @@
 {
-  "HDP": {
-    "stack_features": [
-      {
-        "name": "snappy",
-        "description": "Snappy compressor/decompressor support",
-        "min_version": "2.0.0.0",
-        "max_version": "2.2.0.0"
-      },
-      {
-        "name": "lzo",
-        "description": "LZO libraries support",
-        "min_version": "2.2.1.0"
-      },
-      {
-        "name": "express_upgrade",
-        "description": "Express upgrade support",
-        "min_version": "2.1.0.0"
-      },
-      {
-        "name": "rolling_upgrade",
-        "description": "Rolling upgrade support",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "kafka_acl_migration_support",
-        "description": "ACL migration support",
-        "min_version": "2.3.4.0"
-      },
-      {
-        "name": "secure_zookeeper",
-        "description": "Protect ZNodes with SASL acl in secure clusters",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "config_versioning",
-        "description": "Configurable versions support",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "datanode_non_root",
-        "description": "DataNode running as non-root support (AMBARI-7615)",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "remove_ranger_hdfs_plugin_env",
-        "description": "HDFS removes Ranger env files (AMBARI-14299)",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "ranger",
-        "description": "Ranger Service support",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "ranger_tagsync_component",
-        "description": "Ranger Tagsync component support (AMBARI-14383)",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "phoenix",
-        "description": "Phoenix Service support",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "nfs",
-        "description": "NFS support",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "tez_for_spark",
-        "description": "Tez dependency for Spark",
-        "min_version": "2.2.0.0",
-        "max_version": "2.3.0.0"
-      },
-      {
-        "name": "timeline_state_store",
-        "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "copy_tarball_to_hdfs",
-        "description": "Copy tarball to HDFS support (AMBARI-12113)",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "spark_16plus",
-        "description": "Spark 1.6+",
-        "min_version": "2.4.0.0"
-      },
-      {
-        "name": "spark_thriftserver",
-        "description": "Spark Thrift Server",
-        "min_version": "2.3.2.0"
-      },
-      {
-        "name": "storm_kerberos",
-        "description": "Storm Kerberos support (AMBARI-7570)",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "storm_ams",
-        "description": "Storm AMS integration (AMBARI-10710)",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "create_kafka_broker_id",
-        "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-        "min_version": "2.2.0.0",
-        "max_version": "2.3.0.0"
-      },
-      {
-        "name": "kafka_listeners",
-        "description": "Kafka listeners (AMBARI-10984)",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "kafka_kerberos",
-        "description": "Kafka Kerberos support (AMBARI-10984)",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "pig_on_tez",
-        "description": "Pig on Tez support (AMBARI-7863)",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "ranger_usersync_non_root",
-        "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "ranger_audit_db_support",
-        "description": "Ranger Audit to DB support",
-        "min_version": "2.2.0.0",
-        "max_version": "2.4.99.99"
-      },
-      {
-        "name": "accumulo_kerberos_user_auth",
-        "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "knox_versioned_data_dir",
-        "description": "Use versioned data dir for Knox (AMBARI-13164)",
-        "min_version": "2.3.2.0"
-      },
-      {
-        "name": "knox_sso_topology",
-        "description": "Knox SSO Topology support (AMBARI-13975)",
-        "min_version": "2.3.8.0"
-      },
-      {
-        "name": "atlas_rolling_upgrade",
-        "description": "Rolling upgrade support for Atlas",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "oozie_admin_user",
-        "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "oozie_create_hive_tez_configs",
-        "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "oozie_setup_shared_lib",
-        "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "oozie_host_kerberos",
-        "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-        "min_version": "2.0.0.0"
-      },
-      {
-        "name": "falcon_extensions",
-        "description": "Falcon Extension",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "hive_metastore_upgrade_schema",
-        "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "hive_server_interactive",
-        "description": "Hive server interactive support (AMBARI-15573)",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "hive_webhcat_specific_configs",
-        "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "hive_purge_table",
-        "description": "Hive purge table support (AMBARI-12260)",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "hive_server2_kerberized_env",
-        "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-        "min_version": "2.2.3.0",
-        "max_version": "2.2.5.0"
-      },
-      {
-        "name": "hive_env_heapsize",
-        "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-        "min_version": "2.2.0.0"
-      },
-      {
-        "name": "ranger_kms_hsm_support",
-        "description": "Ranger KMS HSM support (AMBARI-15752)",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "ranger_log4j_support",
-        "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "ranger_kerberos_support",
-        "description": "Ranger Kerberos support",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "hive_metastore_site_support",
-        "description": "Hive Metastore site support",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "ranger_usersync_password_jceks",
-        "description": "Saving Ranger Usersync credentials in jceks",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "ranger_install_infra_client",
-        "description": "Ambari Infra Service support",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "falcon_atlas_support_2_3",
-        "description": "Falcon Atlas integration support for 2.3 stack",
-        "min_version": "2.3.99.0",
-        "max_version": "2.4.0.0"
-      },
-      {
-        "name": "falcon_atlas_support",
-        "description": "Falcon Atlas integration",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "hbase_home_directory",
-        "description": "Hbase home directory in HDFS needed for HBASE backup",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "spark_livy",
-        "description": "Livy as slave component of spark",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "spark_livy2",
-        "description": "Livy as slave component of spark",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "atlas_ranger_plugin_support",
-        "description": "Atlas Ranger plugin support",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "atlas_conf_dir_in_path",
-        "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
-        "min_version": "2.3.0.0",
-        "max_version": "2.4.99.99"
-      },
-      {
-        "name": "atlas_upgrade_support",
-        "description": "Atlas supports express and rolling upgrades",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "atlas_hook_support",
-        "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "ranger_pid_support",
-        "description": "Ranger Service support pid generation AMBARI-16756",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "ranger_kms_pid_support",
-        "description": "Ranger KMS Service support pid generation",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "ranger_admin_password_change",
-        "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "ranger_setup_db_on_start",
-        "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "storm_metrics_apache_classes",
-        "description": "Metrics sink for Storm that uses Apache class names",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "spark_java_opts_support",
-        "description": "Allow Spark to generate java-opts file",
-        "min_version": "2.2.0.0",
-        "max_version": "2.4.0.0"
-      },
-      {
-        "name": "atlas_hbase_setup",
-        "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "ranger_hive_plugin_jdbc_url",
-        "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "zkfc_version_advertised",
-        "description": "ZKFC advertise version",
-        "min_version": "2.5.0.0"
-      },
-      {
-        "name": "phoenix_core_hdfs_site_required",
-        "description": "HDFS and CORE site required for Phoenix",
-        "max_version": "2.5.9.9"
-      },
-      {
-        "name": "ranger_tagsync_ssl_xml_support",
-        "description": "Ranger Tagsync ssl xml support.",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "ranger_xml_configuration",
-        "description": "Ranger code base support xml configurations",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "kafka_ranger_plugin_support",
-        "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "yarn_ranger_plugin_support",
-        "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
-        "min_version": "2.3.0.0"
-      },
-      {
-        "name": "ranger_solr_config_support",
-        "description": "Showing Ranger solrconfig.xml on UI",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "hive_interactive_atlas_hook_required",
-        "description": "Registering Atlas Hook for Hive Interactive.",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "core_site_for_ranger_plugins",
-        "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "atlas_install_hook_package_support",
-        "description": "Stop installing packages from 2.6",
-        "max_version": "2.5.9.9"
-      },
-      {
-        "name": "atlas_hdfs_site_on_namenode_ha",
-        "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "hive_interactive_ga",
-        "description": "Hive Interactive GA support",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "secure_ranger_ssl_password",
-        "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "ranger_kms_ssl",
-        "description": "Ranger KMS SSL properties in ambari stack",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "nifi_encrypt_config",
-        "description": "Encrypt sensitive properties written to nifi property file",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "toolkit_config_update",
-        "description": "Support separate input and output for toolkit configuration",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "admin_toolkit_support",
-        "description": "Supports the nifi admin toolkit",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "tls_toolkit_san",
-        "description": "Support subject alternative name flag",
-        "min_version": "2.6.0.0"
-      },
-      {
-        "name": "nifi_jaas_conf_create",
-        "description": "Create NIFI jaas configuration when kerberos is enabled",
-        "min_version": "2.6.0.0"
-      }
-    ]
-  }
+  "stack_features": [
+    {
+      "name": "snappy",
+      "description": "Snappy compressor/decompressor support",
+      "min_version": "2.0.0.0",
+      "max_version": "2.2.0.0"
+    },
+    {
+      "name": "lzo",
+      "description": "LZO libraries support",
+      "min_version": "2.2.1.0"
+    },
+    {
+      "name": "express_upgrade",
+      "description": "Express upgrade support",
+      "min_version": "2.1.0.0"
+    },
+    {
+      "name": "rolling_upgrade",
+      "description": "Rolling upgrade support",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "kafka_acl_migration_support",
+      "description": "ACL migration support",
+      "min_version": "2.3.4.0"
+    },
+    {
+      "name": "secure_zookeeper",
+      "description": "Protect ZNodes with SASL acl in secure clusters",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "config_versioning",
+      "description": "Configurable versions support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "datanode_non_root",
+      "description": "DataNode running as non-root support (AMBARI-7615)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "remove_ranger_hdfs_plugin_env",
+      "description": "HDFS removes Ranger env files (AMBARI-14299)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "ranger",
+      "description": "Ranger Service support",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_tagsync_component",
+      "description": "Ranger Tagsync component support (AMBARI-14383)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "phoenix",
+      "description": "Phoenix Service support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "nfs",
+      "description": "NFS support",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "tez_for_spark",
+      "description": "Tez dependency for Spark",
+      "min_version": "2.2.0.0",
+      "max_version": "2.3.0.0"
+    },
+    {
+      "name": "timeline_state_store",
+      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "copy_tarball_to_hdfs",
+      "description": "Copy tarball to HDFS support (AMBARI-12113)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "spark_16plus",
+      "description": "Spark 1.6+",
+      "min_version": "2.4.0.0"
+    },
+    {
+      "name": "spark_thriftserver",
+      "description": "Spark Thrift Server",
+      "min_version": "2.3.2.0"
+    },
+    {
+      "name": "storm_kerberos",
+      "description": "Storm Kerberos support (AMBARI-7570)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "storm_ams",
+      "description": "Storm AMS integration (AMBARI-10710)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "create_kafka_broker_id",
+      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+      "min_version": "2.2.0.0",
+      "max_version": "2.3.0.0"
+    },
+    {
+      "name": "kafka_listeners",
+      "description": "Kafka listeners (AMBARI-10984)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "kafka_kerberos",
+      "description": "Kafka Kerberos support (AMBARI-10984)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "pig_on_tez",
+      "description": "Pig on Tez support (AMBARI-7863)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_usersync_non_root",
+      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "ranger_audit_db_support",
+      "description": "Ranger Audit to DB support",
+      "min_version": "2.2.0.0",
+      "max_version": "2.4.99.99"
+    },
+    {
+      "name": "accumulo_kerberos_user_auth",
+      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "knox_versioned_data_dir",
+      "description": "Use versioned data dir for Knox (AMBARI-13164)",
+      "min_version": "2.3.2.0"
+    },
+    {
+      "name": "knox_sso_topology",
+      "description": "Knox SSO Topology support (AMBARI-13975)",
+      "min_version": "2.3.8.0"
+    },
+    {
+      "name": "atlas_rolling_upgrade",
+      "description": "Rolling upgrade support for Atlas",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "oozie_admin_user",
+      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_create_hive_tez_configs",
+      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_setup_shared_lib",
+      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "oozie_host_kerberos",
+      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+      "min_version": "2.0.0.0"
+    },
+    {
+      "name": "falcon_extensions",
+      "description": "Falcon Extension",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hive_metastore_upgrade_schema",
+      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_server_interactive",
+      "description": "Hive server interactive support (AMBARI-15573)",
+      "min_version": "2.5.0.0"
+     },
+    {
+      "name": "hive_webhcat_specific_configs",
+      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_purge_table",
+      "description": "Hive purge table support (AMBARI-12260)",
+      "min_version": "2.3.0.0"
+     },
+    {
+      "name": "hive_server2_kerberized_env",
+      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+      "min_version": "2.2.3.0",
+      "max_version": "2.2.5.0"
+     },
+    {
+      "name": "hive_env_heapsize",
+      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+      "min_version": "2.2.0.0"
+    },
+    {
+      "name": "ranger_kms_hsm_support",
+      "description": "Ranger KMS HSM support (AMBARI-15752)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_log4j_support",
+      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_kerberos_support",
+      "description": "Ranger Kerberos support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hive_metastore_site_support",
+      "description": "Hive Metastore site support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_usersync_password_jceks",
+      "description": "Saving Ranger Usersync credentials in jceks",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_install_infra_client",
+      "description": "Ambari Infra Service support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "falcon_atlas_support_2_3",
+      "description": "Falcon Atlas integration support for 2.3 stack",
+      "min_version": "2.3.99.0",
+      "max_version": "2.4.0.0"
+    },
+    {
+      "name": "falcon_atlas_support",
+      "description": "Falcon Atlas integration",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "hbase_home_directory",
+      "description": "Hbase home directory in HDFS needed for HBASE backup",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "spark_livy",
+      "description": "Livy as slave component of spark",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "spark_livy2",
+      "description": "Livy as slave component of spark",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "atlas_ranger_plugin_support",
+      "description": "Atlas Ranger plugin support",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "atlas_conf_dir_in_path",
+      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",
+      "min_version": "2.3.0.0",
+      "max_version": "2.4.99.99"
+    },
+    {
+      "name": "atlas_upgrade_support",
+      "description": "Atlas supports express and rolling upgrades",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "atlas_hook_support",
+      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_pid_support",
+      "description": "Ranger Service support pid generation AMBARI-16756",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_kms_pid_support",
+      "description": "Ranger KMS Service support pid generation",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_admin_password_change",
+      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_setup_db_on_start",
+      "description": "Allows setup of ranger db and java patches to be called multiple times on each START",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "storm_metrics_apache_classes",
+      "description": "Metrics sink for Storm that uses Apache class names",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "spark_java_opts_support",
+      "description": "Allow Spark to generate java-opts file",
+      "min_version": "2.2.0.0",
+      "max_version": "2.4.0.0"
+    },
+    {
+      "name": "atlas_hbase_setup",
+      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "ranger_hive_plugin_jdbc_url",
+      "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "zkfc_version_advertised",
+      "description": "ZKFC advertise version",
+      "min_version": "2.5.0.0"
+    },
+    {
+      "name": "phoenix_core_hdfs_site_required",
+      "description": "HDFS and CORE site required for Phoenix",
+      "max_version": "2.5.9.9"
+    },
+    {
+      "name": "ranger_tagsync_ssl_xml_support",
+      "description": "Ranger Tagsync ssl xml support.",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "ranger_xml_configuration",
+      "description": "Ranger code base support xml configurations",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "kafka_ranger_plugin_support",
+      "description": "Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "yarn_ranger_plugin_support",
+      "description": "Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)",
+      "min_version": "2.3.0.0"
+    },
+    {
+      "name": "ranger_solr_config_support",
+      "description": "Showing Ranger solrconfig.xml on UI",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "hive_interactive_atlas_hook_required",
+      "description": "Registering Atlas Hook for Hive Interactive.",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "core_site_for_ranger_plugins",
+      "description": "Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "atlas_install_hook_package_support",
+      "description": "Stop installing packages from 2.6",
+      "max_version": "2.5.9.9"
+    },
+    {
+      "name": "atlas_hdfs_site_on_namenode_ha",
+      "description": "Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "hive_interactive_ga",
+      "description": "Hive Interactive GA support",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "secure_ranger_ssl_password",
+      "description": "Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "ranger_kms_ssl",
+      "description": "Ranger KMS SSL properties in ambari stack",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "nifi_encrypt_config",
+      "description": "Encrypt sensitive properties written to nifi property file",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "toolkit_config_update",
+      "description": "Support separate input and output for toolkit configuration",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "admin_toolkit_support",
+      "description": "Supports the nifi admin toolkit",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "tls_toolkit_san",
+      "description": "Support subject alternative name flag",
+      "min_version": "2.6.0.0"
+    },
+    {
+      "name": "nifi_jaas_conf_create",
+      "description": "Create NIFI jaas configuration when kerberos is enabled",
+      "min_version": "2.6.0.0"
+    }
+  ]
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
index c515d57..d1aab4b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/properties/stack_tools.json
@@ -1,14 +1,4 @@
 {
-  "HDP": {
-    "stack_selector": [
-      "hdp-select",
-      "/usr/bin/hdp-select",
-      "hdp-select"
-    ],
-    "conf_selector": [
-      "conf-select",
-      "/usr/bin/conf-select",
-      "conf-select"
-    ]
-  }
-}
+  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],
+  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a0aca4f1/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
index f19ac52..7df00ee 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/configuration/cluster-env.xml
@@ -20,18 +20,6 @@
  */
 -->
 <configuration>
-  <!-- Define stack_name property in the base stack. DO NOT override this property for each stack version -->
-  <property>
-    <name>stack_name</name>
-    <value>PERF</value>
-    <description>The name of the stack.</description>
-    <value-attributes>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
 
   <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
@@ -67,8 +55,8 @@
 
   <property>
     <name>stack_root</name>
-    <value>{"PERF":"/usr/perf"}</value>
-    <description>JSON which defines the stack root by stack name</description>  
+    <value>/usr/perf</value>
+    <description>Stack root folder</description>
     <value-attributes>
       <read-only>true</read-only>
       <overridable>false</overridable>