You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/05/23 21:35:22 UTC

[3/3] ambari git commit: AMBARI-21078 - Merging Configurations On Service/Patch Upgrades Should Create New Configurations Only For Included Services (jonathanhurley)

AMBARI-21078 - Merging Configurations On Service/Patch Upgrades Should Create New Configurations Only For Included Services (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c4148d80
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c4148d80
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c4148d80

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: c4148d805c4145d545712bbce6127e7518a7b7ce
Parents: a45f542
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri May 19 15:14:15 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue May 23 17:35:11 2017 -0400

----------------------------------------------------------------------
 .../controller/AmbariManagementController.java  |   2 +-
 .../AmbariManagementControllerImpl.java         |   4 +-
 .../internal/ConfigGroupResourceProvider.java   |   4 +-
 .../internal/UpgradeResourceProvider.java       | 287 +------------------
 .../ambari/server/orm/dao/ServiceConfigDAO.java |  18 +-
 .../apache/ambari/server/orm/dao/StackDAO.java  |  14 +
 .../orm/entities/ServiceConfigEntity.java       |  24 +-
 .../upgrades/ComponentVersionCheckAction.java   |   5 +-
 .../upgrades/FinalizeUpgradeAction.java         |  18 +-
 .../upgrades/UpdateDesiredStackAction.java      |   7 +-
 .../ambari/server/stack/MasterHostResolver.java |  16 +-
 .../org/apache/ambari/server/state/Cluster.java |  16 +-
 .../ambari/server/state/ConfigFactory.java      |   1 +
 .../ambari/server/state/ConfigHelper.java       |  56 ++--
 .../apache/ambari/server/state/ConfigImpl.java  |  13 +-
 .../ambari/server/state/UpgradeContext.java     |  68 ++++-
 .../ambari/server/state/UpgradeHelper.java      | 286 ++++++++++++++++--
 .../server/state/cluster/ClusterImpl.java       | 108 ++++---
 .../server/state/configgroup/ConfigGroup.java   |   2 +-
 .../state/configgroup/ConfigGroupFactory.java   |   5 +-
 .../state/configgroup/ConfigGroupImpl.java      |  35 ++-
 .../state/stack/upgrade/UpgradeScope.java       |   9 -
 .../RequiredConfigPropertiesValidator.java      |   3 +-
 .../server/upgrade/AbstractUpgradeCatalog.java  |   3 +-
 .../TestActionSchedulerThreading.java           |  35 ++-
 .../ambari/server/agent/AgentResourceTest.java  |   2 +
 .../AmbariManagementControllerTest.java         |  16 +-
 .../ConfigGroupResourceProviderTest.java        |   5 +-
 .../StackUpgradeConfigurationMergeTest.java     |  12 +-
 .../internal/UpgradeResourceProviderTest.java   | 115 ++++++--
 .../server/orm/dao/ServiceConfigDAOTest.java    |  11 +-
 .../ComponentVersionCheckActionTest.java        | 107 ++++---
 .../upgrades/UpgradeActionTest.java             | 230 +++------------
 .../ambari/server/state/ConfigGroupTest.java    |   2 +-
 .../ambari/server/state/ConfigHelperTest.java   |   2 +-
 .../ambari/server/state/UpgradeHelperTest.java  | 107 +++----
 .../server/state/cluster/ClusterTest.java       | 142 ++++++---
 .../svccomphost/ServiceComponentHostTest.java   |   6 +-
 .../upgrade/AbstractUpgradeCatalogTest.java     |   6 +-
 .../server/upgrade/UpgradeCatalog210Test.java   |   4 +-
 .../server/upgrade/UpgradeCatalog211Test.java   |   2 +-
 .../server/upgrade/UpgradeCatalog220Test.java   |   4 +-
 .../server/upgrade/UpgradeCatalog221Test.java   |   4 +-
 .../server/upgrade/UpgradeCatalog222Test.java   |   4 +-
 .../server/upgrade/UpgradeCatalog240Test.java   |  36 +--
 .../server/upgrade/UpgradeCatalog250Test.java   |  34 +--
 .../server/upgrade/UpgradeCatalog300Test.java   |   6 +-
 47 files changed, 1030 insertions(+), 866 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
index fe01a0d..807bded 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
@@ -115,7 +115,7 @@ public interface AmbariManagementController {
    * TODO move this method to Cluster? doesn't seem to be on its place
    * @return config created
    */
-  Config createConfig(StackId stackId, Cluster cluster, String type, Map<String, String> properties,
+  Config createConfig(Cluster cluster, StackId stackId, String type, Map<String, String> properties,
                       String versionTag, Map<String, Map<String, String>> propertiesAttributes);
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index faa9c54..3a5a4e6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -930,7 +930,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       stackId = cluster.getDesiredStackVersion();
     }
 
-    Config config = createConfig(stackId, cluster, request.getType(), requestProperties,
+    Config config = createConfig(cluster, stackId, request.getType(), requestProperties,
       request.getVersionTag(), propertiesAttributes);
 
     LOG.info(MessageFormat.format("Creating configuration with tag ''{0}'' to cluster ''{1}''  for configuration type {2}",
@@ -942,7 +942,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   }
 
   @Override
-  public Config createConfig(StackId stackId, Cluster cluster, String type, Map<String, String> properties,
+  public Config createConfig(Cluster cluster, StackId stackId, String type, Map<String, String> properties,
                              String versionTag, Map<String, Map<String, String>> propertiesAttributes) {
 
     Config config = configFactory.createNew(stackId, cluster, type, versionTag, properties,

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
index cf6b717..71f2be4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
@@ -579,13 +579,11 @@ public class ConfigGroupResourceProvider extends
 
       verifyConfigs(request.getConfigs(), cluster.getClusterName());
 
-      ConfigGroup configGroup = configGroupFactory.createNew(cluster,
+      ConfigGroup configGroup = configGroupFactory.createNew(cluster, serviceName,
         request.getGroupName(),
         request.getTag(), request.getDescription(),
         request.getConfigs(), hosts);
 
-      configGroup.setServiceName(serviceName);
-
       cluster.addConfigGroup(configGroup);
       if (serviceName != null) {
         cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 115a043..de2386a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -27,7 +27,6 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -65,13 +64,11 @@ import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.orm.dao.HostRoleCommandStatusSummaryDTO;
-import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.RequestDAO;
 import org.apache.ambari.server.orm.dao.UpgradeDAO;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.RequestEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.orm.entities.UpgradeGroupEntity;
 import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
@@ -82,9 +79,7 @@ import org.apache.ambari.server.security.authorization.ResourceType;
 import org.apache.ambari.server.security.authorization.RoleAuthorization;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceInfo;
@@ -98,7 +93,6 @@ import org.apache.ambari.server.state.stack.ConfigUpgradePack;
 import org.apache.ambari.server.state.stack.UpgradePack;
 import org.apache.ambari.server.state.stack.upgrade.ConfigureTask;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
-import org.apache.ambari.server.state.stack.upgrade.Grouping;
 import org.apache.ambari.server.state.stack.upgrade.ManualTask;
 import org.apache.ambari.server.state.stack.upgrade.ServerSideActionTask;
 import org.apache.ambari.server.state.stack.upgrade.StageWrapper;
@@ -208,9 +202,6 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
   private static Provider<AmbariMetaInfo> s_metaProvider = null;
 
   @Inject
-  private static RepositoryVersionDAO s_repoVersionDAO = null;
-
-  @Inject
   private static Provider<RequestFactory> s_requestFactory;
 
   @Inject
@@ -275,9 +266,6 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     PROPERTY_IDS.add(REQUEST_STATUS_PROPERTY_ID);
     PROPERTY_IDS.add(REQUEST_TYPE_ID);
 
-    PROPERTY_IDS.add("Upgrade/from_version");
-    PROPERTY_IDS.add("Upgrade/to_version");
-
     // keys
     KEY_PROPERTY_IDS.put(Resource.Type.Upgrade, UPGRADE_REQUEST_ID);
     KEY_PROPERTY_IDS.put(Resource.Type.Cluster, UPGRADE_CLUSTER_NAME);
@@ -688,16 +676,11 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     at the appropriate moment during the orchestration.
     **/
     if (pack.getType() == UpgradeType.ROLLING) {
-      // Desired configs must be set before creating stages because the config tag
-      // names are read and set on the command for filling in later
-      applyStackAndProcessConfigurations(upgradeContext);
-
-      // move component desired version and upgrade state
-      s_upgradeHelper.putComponentsToUpgradingState(upgradeContext);
+      s_upgradeHelper.updateDesiredRepositoriesAndConfigs(upgradeContext);
     }
 
     @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES, comment = "This is wrong")
-    StackId configurationPackSourceStackId = upgradeContext.getRepositoryVersion().getStackId();
+    StackId configurationPackSourceStackId = upgradeContext.getSourceVersions().values().iterator().next().getStackId();
 
     // resolve or build a proper config upgrade pack - always start out with the config pack
     // for the current stack and merge into that
@@ -801,272 +784,6 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     return upgradeEntity;
   }
 
-  /**
-   * Handles the creation or resetting of configurations based on whether an
-   * upgrade or downgrade is occurring. This method will not do anything when
-   * the target stack version is the same as the cluster's current stack version
-   * since, by definition, no new configurations are automatically created when
-   * upgrading with the same stack (ie HDP 2.2.0.0 -> HDP 2.2.1.0).
-   * <p/>
-   * When upgrading or downgrade between stacks (HDP 2.2.0.0 -> HDP 2.3.0.0)
-   * then this will perform the following:
-   * <ul>
-   * <li>Upgrade: Create new configurations that are a merge between the current
-   * stack and the desired stack. If a value has changed between stacks, then
-   * the target stack value should be taken unless the cluster's value differs
-   * from the old stack. This can occur if a property has been customized after
-   * installation.</li>
-   * <li>Downgrade: Reset the latest configurations from the cluster's original
-   * stack. The new configurations that were created on upgrade must be left
-   * intact until all components have been reverted, otherwise heartbeats will
-   * fail due to missing configurations.</li>
-   * </ul>
-   *
-   * @param upgradeContext  the upgrade context (not {@code null}).
-   * @throws AmbariException
-   */
-  public void applyStackAndProcessConfigurations(UpgradeContext upgradeContext)
-    throws AmbariException {
-
-    Cluster cluster = upgradeContext.getCluster();
-    Direction direction = upgradeContext.getDirection();
-    UpgradePack upgradePack = upgradeContext.getUpgradePack();
-    String stackName = upgradeContext.getRepositoryVersion().getStackId().getStackName();
-    String version = upgradeContext.getRepositoryVersion().getStackId().getStackVersion();
-    String userName = getManagementController().getAuthName();
-
-    RepositoryVersionEntity targetRve = s_repoVersionDAO.findByStackNameAndVersion(stackName, version);
-    if (null == targetRve) {
-      LOG.info("Could not find version entity for {}; not setting new configs", version);
-      return;
-    }
-
-    if (null == userName) {
-      userName = getManagementController().getAuthName();
-    }
-
-    // if the current and target stacks are the same (ie HDP 2.2.0.0 -> 2.2.1.0)
-    // then we should never do anything with configs on either upgrade or
-    // downgrade; however if we are going across stacks, we have to do the stack
-    // checks differently depending on whether this is an upgrade or downgrade
-    StackEntity targetStack = targetRve.getStack();
-    StackId currentStackId = cluster.getCurrentStackVersion();
-    StackId desiredStackId = cluster.getDesiredStackVersion();
-    StackId targetStackId = new StackId(targetStack);
-    // Only change configs if moving to a different stack.
-    switch (direction) {
-      case UPGRADE:
-        if (currentStackId.equals(targetStackId)) {
-          return;
-        }
-        break;
-      case DOWNGRADE:
-        if (desiredStackId.equals(targetStackId)) {
-          return;
-        }
-        break;
-    }
-
-    Map<String, Map<String, String>> newConfigurationsByType = null;
-    ConfigHelper configHelper = getManagementController().getConfigHelper();
-
-    if (direction == Direction.UPGRADE) {
-      // populate a map of default configurations for the old stack (this is
-      // used when determining if a property has been customized and should be
-      // overriden with the new stack value)
-      Map<String, Map<String, String>> oldStackDefaultConfigurationsByType = configHelper.getDefaultProperties(
-          currentStackId, cluster, true);
-
-      // populate a map with default configurations from the new stack
-      newConfigurationsByType = configHelper.getDefaultProperties(targetStackId, cluster, true);
-
-      // We want to skip updating config-types of services that are not in the upgrade pack.
-      // Care should be taken as some config-types could be in services that are in and out
-      // of the upgrade pack. We should never ignore config-types of services in upgrade pack.
-      Set<String> skipConfigTypes = new HashSet<>();
-      Set<String> upgradePackServices = new HashSet<>();
-      Set<String> upgradePackConfigTypes = new HashSet<>();
-      AmbariMetaInfo ambariMetaInfo = s_metaProvider.get();
-
-      // ensure that we get the service info from the target stack
-      // (since it could include new configuration types for a service)
-      Map<String, ServiceInfo> stackServicesMap = ambariMetaInfo.getServices(
-          targetStack.getStackName(), targetStack.getStackVersion());
-
-      for (Grouping group : upgradePack.getGroups(direction)) {
-        for (UpgradePack.OrderService service : group.services) {
-          if (service.serviceName == null || upgradePackServices.contains(service.serviceName)) {
-            // No need to re-process service that has already been looked at
-            continue;
-          }
-
-          upgradePackServices.add(service.serviceName);
-          ServiceInfo serviceInfo = stackServicesMap.get(service.serviceName);
-          if (serviceInfo == null) {
-            continue;
-          }
-
-          // add every configuration type for all services defined in the
-          // upgrade pack
-          Set<String> serviceConfigTypes = serviceInfo.getConfigTypeAttributes().keySet();
-          for (String serviceConfigType : serviceConfigTypes) {
-            if (!upgradePackConfigTypes.contains(serviceConfigType)) {
-              upgradePackConfigTypes.add(serviceConfigType);
-            }
-          }
-        }
-      }
-
-      // build a set of configurations that should not be merged since their
-      // services are not installed
-      Set<String> servicesNotInUpgradePack = new HashSet<>(stackServicesMap.keySet());
-      servicesNotInUpgradePack.removeAll(upgradePackServices);
-      for (String serviceNotInUpgradePack : servicesNotInUpgradePack) {
-        ServiceInfo serviceInfo = stackServicesMap.get(serviceNotInUpgradePack);
-        Set<String> configTypesOfServiceNotInUpgradePack = serviceInfo.getConfigTypeAttributes().keySet();
-        for (String configType : configTypesOfServiceNotInUpgradePack) {
-          if (!upgradePackConfigTypes.contains(configType) && !skipConfigTypes.contains(configType)) {
-            skipConfigTypes.add(configType);
-          }
-        }
-      }
-
-      // remove any configurations from the target stack that are not used
-      // because the services are not installed
-      Iterator<String> iterator = newConfigurationsByType.keySet().iterator();
-      while (iterator.hasNext()) {
-        String configType = iterator.next();
-        if (skipConfigTypes.contains(configType)) {
-          LOG.info("Stack Upgrade: Removing configs for config-type {}", configType);
-          iterator.remove();
-        }
-      }
-
-      // now that the map has been populated with the default configurations
-      // from the stack/service, overlay the existing configurations on top
-      Map<String, DesiredConfig> existingDesiredConfigurationsByType = cluster.getDesiredConfigs();
-      for (Map.Entry<String, DesiredConfig> existingEntry : existingDesiredConfigurationsByType.entrySet()) {
-        String configurationType = existingEntry.getKey();
-        if(skipConfigTypes.contains(configurationType)) {
-          LOG.info("Stack Upgrade: Skipping config-type {} as upgrade-pack contains no updates to its service", configurationType);
-          continue;
-        }
-
-        // NPE sanity, although shouldn't even happen since we are iterating
-        // over the desired configs to start with
-        Config currentClusterConfig = cluster.getDesiredConfigByType(configurationType);
-        if (null == currentClusterConfig) {
-          continue;
-        }
-
-        // get current stack default configurations on install
-        Map<String, String> configurationTypeDefaultConfigurations = oldStackDefaultConfigurationsByType.get(
-            configurationType);
-
-        // NPE sanity for current stack defaults
-        if (null == configurationTypeDefaultConfigurations) {
-          configurationTypeDefaultConfigurations = Collections.emptyMap();
-        }
-
-        // get the existing configurations
-        Map<String, String> existingConfigurations = currentClusterConfig.getProperties();
-
-        // if the new stack configurations don't have the type, then simply add
-        // all of the existing in
-        Map<String, String> newDefaultConfigurations = newConfigurationsByType.get(
-            configurationType);
-
-        if (null == newDefaultConfigurations) {
-          newConfigurationsByType.put(configurationType, existingConfigurations);
-          continue;
-        } else {
-          // TODO, should we remove existing configs whose value is NULL even though they don't have a value in the new stack?
-
-          // Remove any configs in the new stack whose value is NULL, unless they currently exist and the value is not NULL.
-          Iterator<Map.Entry<String, String>> iter = newDefaultConfigurations.entrySet().iterator();
-          while (iter.hasNext()) {
-            Map.Entry<String, String> entry = iter.next();
-            if (entry.getValue() == null) {
-              iter.remove();
-            }
-          }
-        }
-
-        // for every existing configuration, see if an entry exists; if it does
-        // not exist, then put it in the map, otherwise we'll have to compare
-        // the existing value to the original stack value to see if its been
-        // customized
-        for (Map.Entry<String, String> existingConfigurationEntry : existingConfigurations.entrySet()) {
-          String existingConfigurationKey = existingConfigurationEntry.getKey();
-          String existingConfigurationValue = existingConfigurationEntry.getValue();
-
-          // if there is already an entry, we now have to try to determine if
-          // the value was customized after stack installation
-          if (newDefaultConfigurations.containsKey(existingConfigurationKey)) {
-            String newDefaultConfigurationValue = newDefaultConfigurations.get(
-                existingConfigurationKey);
-
-            if (!StringUtils.equals(existingConfigurationValue, newDefaultConfigurationValue)) {
-              // the new default is different from the existing cluster value;
-              // only override the default value if the existing value differs
-              // from the original stack
-              String oldDefaultValue = configurationTypeDefaultConfigurations.get(
-                  existingConfigurationKey);
-
-              if (!StringUtils.equals(existingConfigurationValue, oldDefaultValue)) {
-                // at this point, we've determined that there is a difference
-                // between default values between stacks, but the value was
-                // also customized, so keep the customized value
-                newDefaultConfigurations.put(existingConfigurationKey, existingConfigurationValue);
-              }
-            }
-          } else {
-            // there is no entry in the map, so add the existing key/value pair
-            newDefaultConfigurations.put(existingConfigurationKey, existingConfigurationValue);
-          }
-        }
-
-        /*
-        for every new configuration which does not exist in the existing
-        configurations, see if it was present in the current stack
-
-        stack 2.x has foo-site/property (on-ambari-upgrade is false)
-        stack 2.y has foo-site/property
-        the current cluster (on 2.x) does not have it
-
-        In this case, we should NOT add it back as clearly stack advisor has removed it
-        */
-        Iterator<Map.Entry<String, String>> newDefaultConfigurationsIterator = newDefaultConfigurations.entrySet().iterator();
-        while( newDefaultConfigurationsIterator.hasNext() ){
-          Map.Entry<String, String> newConfigurationEntry = newDefaultConfigurationsIterator.next();
-          String newConfigurationPropertyName = newConfigurationEntry.getKey();
-          if (configurationTypeDefaultConfigurations.containsKey(newConfigurationPropertyName)
-              && !existingConfigurations.containsKey(newConfigurationPropertyName)) {
-            LOG.info(
-                "The property {}/{} exists in both {} and {} but is not part of the current set of configurations and will therefore not be included in the configuration merge",
-                configurationType, newConfigurationPropertyName, currentStackId, targetStackId);
-
-            // remove the property so it doesn't get merged in
-            newDefaultConfigurationsIterator.remove();
-          }
-        }
-      }
-    } else {
-      // downgrade
-      cluster.applyLatestConfigurations(cluster.getCurrentStackVersion());
-    }
-
-    // !!! update the stack
-    cluster.setDesiredStackVersion(
-        new StackId(targetStack.getStackName(), targetStack.getStackVersion()));
-
-    // !!! configs must be created after setting the stack version
-    if (null != newConfigurationsByType) {
-      configHelper.createConfigTypes(cluster, getManagementController(), newConfigurationsByType,
-          userName, "Configuration created for Upgrade");
-    }
-  }
-
   private RequestStageContainer createRequest(UpgradeContext upgradeContext) {
     ActionManager actionManager = getManagementController().getActionManager();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigDAO.java
index 49ad682..72666e5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigDAO.java
@@ -65,7 +65,7 @@ public class ServiceConfigDAO {
             "WHERE scv.serviceName=?1 AND scv.version=?2", ServiceConfigEntity.class);
     return daoUtils.selectOne(query, serviceName, version);
   }
-  
+
   @RequiresSession
   public List<ServiceConfigEntity> findByService(Long clusterId, String serviceName) {
     TypedQuery<ServiceConfigEntity> query = entityManagerProvider.get().
@@ -145,29 +145,31 @@ public class ServiceConfigDAO {
   }
 
   /**
-   * Get all service configurations for the specified cluster and stack. This
-   * will return different versions of the same configuration (HDFS v1 and v2)
-   * if they exist.
+   * Get service configurations for the specified cluster and stack. This will
+   * return different versions of the same configuration (HDFS v1 and v2) if
+   * they exist.
    *
    * @param clusterId
    *          the cluster (not {@code null}).
    * @param stackId
    *          the stack (not {@code null}).
+   * @param service
    * @return all service configurations for the cluster and stack.
    */
   @RequiresSession
-  public List<ServiceConfigEntity> getAllServiceConfigsForClusterAndStack(Long clusterId,
-      StackId stackId) {
+  public List<ServiceConfigEntity> getServiceConfigsForServiceAndStack(Long clusterId,
+      StackId stackId, String serviceName) {
 
     StackEntity stackEntity = stackDAO.find(stackId.getStackName(),
         stackId.getStackVersion());
 
     TypedQuery<ServiceConfigEntity> query = entityManagerProvider.get().createNamedQuery(
-        "ServiceConfigEntity.findAllServiceConfigsByStack",
+        "ServiceConfigEntity.findServiceConfigsByStack",
         ServiceConfigEntity.class);
 
     query.setParameter("clusterId", clusterId);
     query.setParameter("stack", stackEntity);
+    query.setParameter("serviceName", serviceName);
 
     return daoUtils.selectList(query);
   }
@@ -266,7 +268,7 @@ public class ServiceConfigDAO {
 
   @Transactional
   public void removeHostFromServiceConfigs(final Long hostId) {
-    List<ServiceConfigEntity> allServiceConfigs = this.findAll();
+    List<ServiceConfigEntity> allServiceConfigs = findAll();
     for (ServiceConfigEntity serviceConfigEntity : allServiceConfigs) {
       List<Long> hostIds = serviceConfigEntity.getHostIds();
       if (hostIds != null && hostIds.contains(hostId)) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StackDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StackDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StackDAO.java
index 1385990..c0c7792 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StackDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StackDAO.java
@@ -25,6 +25,7 @@ import javax.persistence.TypedQuery;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.orm.RequiresSession;
 import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.state.StackId;
 
 import com.google.inject.Inject;
 import com.google.inject.Provider;
@@ -94,6 +95,19 @@ public class StackDAO {
   }
 
   /**
+   * Gets the stack that matches the specified stack ID by name and version.
+   *
+   * @param stackId
+   *          the stack ID to find (not {@code null}).
+   * @return the stack matching the specified name and version or {@code null}
+   *         if none.
+   */
+  @RequiresSession
+  public StackEntity find(StackId stackId) {
+    return find(stackId.getStackName(), stackId.getStackVersion());
+  }
+
+  /**
    * Persists a new stack instance.
    *
    * @param stack

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java
index a7ee0f6..b1409ed 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java
@@ -46,12 +46,24 @@ import javax.persistence.TableGenerator;
   , initialValue = 1
 )
 @NamedQueries({
-    @NamedQuery(name = "ServiceConfigEntity.findAll", query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId=:clusterId ORDER BY serviceConfig.version DESC"),
-    @NamedQuery(name = "ServiceConfigEntity.findNextServiceConfigVersion", query = "SELECT COALESCE(MAX(serviceConfig.version), 0) + 1 AS nextVersion FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.serviceName=:serviceName AND serviceConfig.clusterId=:clusterId"),
-    @NamedQuery(name = "ServiceConfigEntity.findAllServiceConfigsByStack", query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId=:clusterId AND serviceConfig.stack=:stack"),
-    @NamedQuery(name = "ServiceConfigEntity.findLatestServiceConfigsByStack", query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId = :clusterId AND (serviceConfig.groupId = null OR serviceConfig.groupId IN (SELECT cg.groupId from ConfigGroupEntity cg)) AND serviceConfig.version = (SELECT MAX(serviceConfig2.version) FROM ServiceConfigEntity serviceConfig2 WHERE serviceConfig2.clusterId= :clusterId AND serviceConfig2.stack = :stack AND serviceConfig2.serviceName = serviceConfig.serviceName)"),
-    @NamedQuery(name = "ServiceConfigEntity.findLatestServiceConfigsByService", query = "SELECT scv FROM ServiceConfigEntity scv WHERE scv.clusterId = :clusterId AND scv.serviceName = :serviceName AND (scv.groupId = null OR scv.groupId IN (SELECT cg.groupId from ConfigGroupEntity cg)) AND scv.version = (SELECT MAX(scv2.version) FROM ServiceConfigEntity scv2 WHERE (scv2.serviceName = :serviceName AND scv2.clusterId = :clusterId) AND (scv2.groupId = scv.groupId OR (scv2.groupId IS NULL AND scv.groupId IS NULL)))"),
-    @NamedQuery(name = "ServiceConfigEntity.findLatestServiceConfigsByCluster", query = "SELECT scv FROM ServiceConfigEntity scv WHERE scv.clusterId = :clusterId AND scv.serviceConfigId IN (SELECT MAX(scv1.serviceConfigId) FROM ServiceConfigEntity scv1 WHERE (scv1.clusterId = :clusterId) AND (scv1.groupId IS NULL) GROUP BY scv1.serviceName)")})
+    @NamedQuery(
+        name = "ServiceConfigEntity.findAll",
+        query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId=:clusterId ORDER BY serviceConfig.version DESC"),
+    @NamedQuery(
+        name = "ServiceConfigEntity.findNextServiceConfigVersion",
+        query = "SELECT COALESCE(MAX(serviceConfig.version), 0) + 1 AS nextVersion FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.serviceName=:serviceName AND serviceConfig.clusterId=:clusterId"),
+    @NamedQuery(
+        name = "ServiceConfigEntity.findServiceConfigsByStack",
+        query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId=:clusterId AND serviceConfig.stack=:stack AND serviceConfig.serviceName=:serviceName"),
+    @NamedQuery(
+        name = "ServiceConfigEntity.findLatestServiceConfigsByStack",
+        query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId = :clusterId AND (serviceConfig.groupId = null OR serviceConfig.groupId IN (SELECT cg.groupId from ConfigGroupEntity cg)) AND serviceConfig.version = (SELECT MAX(serviceConfig2.version) FROM ServiceConfigEntity serviceConfig2 WHERE serviceConfig2.clusterId= :clusterId AND serviceConfig2.stack = :stack AND serviceConfig2.serviceName = serviceConfig.serviceName)"),
+    @NamedQuery(
+        name = "ServiceConfigEntity.findLatestServiceConfigsByService",
+        query = "SELECT scv FROM ServiceConfigEntity scv WHERE scv.clusterId = :clusterId AND scv.serviceName = :serviceName AND (scv.groupId = null OR scv.groupId IN (SELECT cg.groupId from ConfigGroupEntity cg)) AND scv.version = (SELECT MAX(scv2.version) FROM ServiceConfigEntity scv2 WHERE (scv2.serviceName = :serviceName AND scv2.clusterId = :clusterId) AND (scv2.groupId = scv.groupId OR (scv2.groupId IS NULL AND scv.groupId IS NULL)))"),
+    @NamedQuery(
+        name = "ServiceConfigEntity.findLatestServiceConfigsByCluster",
+        query = "SELECT scv FROM ServiceConfigEntity scv WHERE scv.clusterId = :clusterId AND scv.serviceConfigId IN (SELECT MAX(scv1.serviceConfigId) FROM ServiceConfigEntity scv1 WHERE (scv1.clusterId = :clusterId) AND (scv1.groupId IS NULL) GROUP BY scv1.serviceName)") })
 public class ServiceConfigEntity {
   @Id
   @Column(name = "service_config_id")

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
index dc7bc10..1d0cc76 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
@@ -46,8 +46,6 @@ public class ComponentVersionCheckAction extends FinalizeUpgradeAction {
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
       throws AmbariException, InterruptedException {
 
-    Map<String, String> commandParams = getExecutionCommand().getCommandParams();
-
     String clusterName = getExecutionCommand().getClusterName();
 
     Cluster cluster = m_clusters.getCluster(clusterName);
@@ -59,8 +57,7 @@ public class ComponentVersionCheckAction extends FinalizeUpgradeAction {
     StringBuilder errSB = new StringBuilder();
 
     if (errors.isEmpty()) {
-      outSB.append("No version mismatches found for components");
-      errSB.append("No errors found for components");
+      outSB.append("All service components are reporting the correct version.");
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outSB.toString(), errSB.toString());
     } else {
       String structuredOut = getErrors(outSB, errSB, errors);

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
index 55ec84b..6e79e84 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
@@ -226,6 +226,7 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
       Cluster cluster = upgradeContext.getCluster();
       RepositoryVersionEntity downgradeFromRepositoryVersion = upgradeContext.getRepositoryVersion();
       String downgradeFromVersion = downgradeFromRepositoryVersion.getVersion();
+      Set<String> servicesInUpgrade = upgradeContext.getSupportedServices();
 
       String message;
 
@@ -234,7 +235,6 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
             "Finalizing the downgrade from {0} for all cluster services.",
             downgradeFromVersion);
       } else {
-        Set<String> servicesInUpgrade = upgradeContext.getSupportedServices();
         message = MessageFormat.format(
             "Finalizing the downgrade from {0} for the following services: {1}",
             downgradeFromVersion, StringUtils.join(servicesInUpgrade, ','));
@@ -291,6 +291,22 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
         }
       }
 
+      // remove any configurations for services which crossed a stack boundary
+      for( String serviceName : servicesInUpgrade ){
+        RepositoryVersionEntity sourceRepositoryVersion = upgradeContext.getSourceRepositoryVersion(serviceName);
+        RepositoryVersionEntity targetRepositoryVersion = upgradeContext.getTargetRepositoryVersion(serviceName);
+        StackId sourceStackId = sourceRepositoryVersion.getStackId();
+        StackId targetStackId = targetRepositoryVersion.getStackId();
+        // only work with configurations when crossing stacks
+        if (!sourceStackId.equals(targetStackId)) {
+          outSB.append(
+              String.format("Removing %s configurations for %s", sourceStackId,
+                  serviceName)).append(System.lineSeparator());
+
+          cluster.removeConfigurations(sourceStackId, serviceName);
+        }
+      }
+
       // ensure that when downgrading, we set the desired back to the
       // original value
       versionEventPublisher.publish(new StackUpgradeFinishEvent(cluster));

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
index 2eec581..84ca326 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
@@ -29,8 +29,6 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariServer;
-import org.apache.ambari.server.controller.internal.UpgradeResourceProvider;
 import org.apache.ambari.server.orm.dao.HostVersionDAO;
 import org.apache.ambari.server.orm.entities.HostVersionEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
@@ -159,9 +157,8 @@ public class UpdateDesiredStackAction extends AbstractUpgradeServerAction {
         }
       }
 
-      UpgradeResourceProvider upgradeResourceProvider = new UpgradeResourceProvider(AmbariServer.getController());
-      upgradeResourceProvider.applyStackAndProcessConfigurations(upgradeContext);
-      m_upgradeHelper.putComponentsToUpgradingState(upgradeContext);
+      // move repositories to the right version and create/revert configs
+      m_upgradeHelper.updateDesiredRepositoriesAndConfigs(upgradeContext);
 
       // a downgrade must force host versions back to INSTALLED for the
       // repository which failed to be upgraded.

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
index 3f1d859..466b695 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
@@ -38,6 +38,7 @@ import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.UpgradeState;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.apache.ambari.server.utils.HTTPUtils;
 import org.apache.ambari.server.utils.HostAndPort;
@@ -80,10 +81,10 @@ public class MasterHostResolver {
    * @param upgradeContext
    *          the upgrade context
    */
-  public MasterHostResolver(ConfigHelper configHelper, UpgradeContext upgradeContext) {
+  public MasterHostResolver(Cluster cluster, ConfigHelper configHelper, UpgradeContext upgradeContext) {
     m_configHelper = configHelper;
     m_upgradeContext = upgradeContext;
-    m_cluster = upgradeContext.getCluster();
+    m_cluster = cluster;
   }
 
   /**
@@ -209,11 +210,20 @@ public class MasterHostResolver {
           continue;
         }
 
-        if(m_upgradeContext.getDirection() == Direction.UPGRADE){
+        if (sch.getUpgradeState() == UpgradeState.FAILED) {
           upgradeHosts.add(hostName);
           continue;
         }
 
+        if(m_upgradeContext.getDirection() == Direction.UPGRADE){
+          RepositoryVersionEntity targetRepositoryVersion = m_upgradeContext.getRepositoryVersion();
+          if (!StringUtils.equals(targetRepositoryVersion.getVersion(), sch.getVersion())) {
+            upgradeHosts.add(hostName);
+          }
+
+          continue;
+        }
+
         // it's a downgrade ...
         RepositoryVersionEntity downgradeToRepositoryVersion = m_upgradeContext.getTargetRepositoryVersion(service);
         String downgradeToVersion = downgradeToRepositoryVersion.getVersion();

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index 4d943f4..f72ab4f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -544,9 +544,8 @@ public interface Cluster {
   Map<String, Object> getSessionAttributes();
 
   /**
-   * Makes the most recent configurations in the specified stack the current set
-   * of configurations. This method will first ensure that the cluster's current
-   * stack matches that of the configuration stack specified.
+   * Makes the most recent configurations for the specified stack current. This
+   * will only modify configurations for the given service.
    * <p/>
    * When completed, all other configurations for any other stack will remain,
    * but will not be marked as selected.
@@ -554,18 +553,21 @@ public interface Cluster {
    * @param stackId
    *          the stack to use when finding the latest configurations (not
    *          {@code null}).
+   * @param serviceName
+   *          the service to modify configurations for (not {@code null}).
    */
-  void applyLatestConfigurations(StackId stackId);
+  void applyLatestConfigurations(StackId stackId, String serviceName);
 
   /**
-   * Removes all cluster configurations and service configurations that belong
-   * to the specified stack.
+   * Removes all configurations for the specified service and stack.
    *
    * @param stackId
    *          the stack to use when finding the configurations to remove (not
    *          {@code null}).
+   * @param serviceName
+   *          the service to rmeove configurations for (not {@code null}).
    */
-  void removeConfigurations(StackId stackId);
+  void removeConfigurations(StackId stackId, String serviceName);
 
   /**
    * Returns whether this cluster was provisioned by a Blueprint or not.

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
index 78f10cd..475c274 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
@@ -34,6 +34,7 @@ public interface ConfigFactory {
    * Creates a new {@link Config} object using provided values.
    *
    * @param cluster
+   * @param stackId
    * @param type
    * @param tag
    * @param map

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 96c2dd0..66c9e21 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -1029,7 +1029,8 @@ public class ConfigHelper {
                                String serviceVersionNote) throws AmbariException {
 
     // create the configuration history entry
-    Config baseConfig = createConfig(cluster, controller, configType, FIRST_VERSION_TAG, properties,
+    Config baseConfig = createConfig(cluster, controller, cluster.getDesiredStackVersion(),
+        configType, FIRST_VERSION_TAG, properties,
         propertyAttributes);
 
     if (baseConfig != null) {
@@ -1070,13 +1071,14 @@ public class ConfigHelper {
    * Create configurations and assign them for services.
    * @param cluster               the cluster
    * @param controller            the controller
+   * @param stackId               the stack to create the new properties for
    * @param batchProperties       the type->config map batch of properties
    * @param authenticatedUserName the user that initiated the change
    * @param serviceVersionNote    the service version note
    * @throws AmbariException
    */
   public void createConfigTypes(Cluster cluster,
-      AmbariManagementController controller,
+      AmbariManagementController controller, StackId stackId,
       Map<String, Map<String, String>> batchProperties, String authenticatedUserName,
       String serviceVersionNote) throws AmbariException {
 
@@ -1086,8 +1088,8 @@ public class ConfigHelper {
       String type = entry.getKey();
       Map<String, String> properties = entry.getValue();
 
-      Config baseConfig = createConfig(cluster, controller, type, FIRST_VERSION_TAG, properties,
-        Collections.<String, Map<String,String>>emptyMap());
+      Config baseConfig = createConfig(cluster, controller, stackId, type, FIRST_VERSION_TAG,
+          properties, Collections.<String, Map<String, String>> emptyMap());
 
       if (null != baseConfig) {
         try {
@@ -1122,6 +1124,8 @@ public class ConfigHelper {
    * @param controller
    *          the controller which actually creates the configuration (not
    *          {@code null}).
+   * @param stackId
+   *          the stack to create the new properties for
    * @param type
    *          the new configuration type (not {@code null}).
    * @param tag
@@ -1134,8 +1138,8 @@ public class ConfigHelper {
    * @return
    * @throws AmbariException
    */
-  Config createConfig(Cluster cluster, AmbariManagementController controller, String type,
-      String tag, Map<String, String> properties,
+  Config createConfig(Cluster cluster, AmbariManagementController controller, StackId stackId,
+      String type, String tag, Map<String, String> properties,
       Map<String, Map<String, String>> propertyAttributes) throws AmbariException {
 
     // if the configuration is not new, then create a timestamp tag
@@ -1158,24 +1162,22 @@ public class ConfigHelper {
       }
     }
 
-    return controller.createConfig(cluster.getDesiredStackVersion(), cluster, type, properties, tag, propertyAttributes);
+    return controller.createConfig(cluster, stackId, type, properties, tag, propertyAttributes);
   }
 
   /**
-   * Gets the default properties from the specified stack and services when a
-   * cluster is first installed.
+   * Gets the default properties for the specified service. These properties
+   * represent those which would be used when a service is first installed.
    *
    * @param stack
    *          the stack to pull stack-values from (not {@code null})
-   * @param cluster
-   *          the cluster to use when determining which services default
-   *          configurations to include (not {@code null}).
-   * @param onStackUpgradeFilter if true skip {@code <on-stack-upgrade merge="false"/>} properties
+   * @param serviceName
+   *          the service name {@code null}).
    * @return a mapping of configuration type to map of key/value pairs for the
    *         default configurations.
    * @throws AmbariException
    */
-  public Map<String, Map<String, String>> getDefaultProperties(StackId stack, Cluster cluster, boolean onStackUpgradeFilter)
+  public Map<String, Map<String, String>> getDefaultProperties(StackId stack, String serviceName)
       throws AmbariException {
     Map<String, Map<String, String>> defaultPropertiesByType = new HashMap<>();
 
@@ -1189,28 +1191,26 @@ public class ConfigHelper {
       if (!defaultPropertiesByType.containsKey(type)) {
         defaultPropertiesByType.put(type, new HashMap<String, String>());
       }
-      if (!onStackUpgradeFilter || stackDefaultProperty.getPropertyStackUpgradeBehavior().isMerge()) {
+      if (stackDefaultProperty.getPropertyStackUpgradeBehavior().isMerge()) {
         defaultPropertiesByType.get(type).put(stackDefaultProperty.getName(),
             stackDefaultProperty.getValue());
       }
     }
 
     // for every installed service, populate the default service properties
-    for (String serviceName : cluster.getServices().keySet()) {
-      Set<org.apache.ambari.server.state.PropertyInfo> serviceConfigurationProperties = ambariMetaInfo.getServiceProperties(
-          stack.getStackName(), stack.getStackVersion(), serviceName);
+    Set<org.apache.ambari.server.state.PropertyInfo> serviceConfigurationProperties = ambariMetaInfo.getServiceProperties(
+        stack.getStackName(), stack.getStackVersion(), serviceName);
 
-      // !!! use new stack as the basis
-      for (PropertyInfo serviceDefaultProperty : serviceConfigurationProperties) {
-        String type = ConfigHelper.fileNameToConfigType(serviceDefaultProperty.getFilename());
+    // !!! use new stack as the basis
+    for (PropertyInfo serviceDefaultProperty : serviceConfigurationProperties) {
+      String type = ConfigHelper.fileNameToConfigType(serviceDefaultProperty.getFilename());
 
-        if (!defaultPropertiesByType.containsKey(type)) {
-          defaultPropertiesByType.put(type, new HashMap<String, String>());
-        }
-        if (!onStackUpgradeFilter || serviceDefaultProperty.getPropertyStackUpgradeBehavior().isMerge()) {
-          defaultPropertiesByType.get(type).put(serviceDefaultProperty.getName(),
-              serviceDefaultProperty.getValue());
-        }
+      if (!defaultPropertiesByType.containsKey(type)) {
+        defaultPropertiesByType.put(type, new HashMap<String, String>());
+      }
+      if (serviceDefaultProperty.getPropertyStackUpgradeBehavior().isMerge()) {
+        defaultPropertiesByType.get(type).put(serviceDefaultProperty.getName(),
+            serviceDefaultProperty.getValue());
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index 0adf1bd..2ee1b26 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -32,8 +32,10 @@ import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.logging.LockFactory;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -99,10 +101,11 @@ public class ConfigImpl implements Config {
   ConfigImpl(@Assisted Cluster cluster, @Assisted("type") String type,
       @Assisted("tag") @Nullable String tag,
       @Assisted Map<String, String> properties,
-      @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+      @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes,
+      ClusterDAO clusterDAO, StackDAO stackDAO,
       Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
     this(cluster.getDesiredStackVersion(), cluster, type, tag, properties, propertiesAttributes,
-        clusterDAO, gson, eventPublisher, lockFactory);
+        clusterDAO, stackDAO, gson, eventPublisher, lockFactory);
   }
 
 
@@ -110,7 +113,8 @@ public class ConfigImpl implements Config {
   ConfigImpl(@Assisted @Nullable StackId stackId, @Assisted Cluster cluster, @Assisted("type") String type,
       @Assisted("tag") @Nullable String tag,
       @Assisted Map<String, String> properties,
-      @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+      @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes,
+      ClusterDAO clusterDAO, StackDAO stackDAO,
       Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
 
     propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
@@ -133,6 +137,7 @@ public class ConfigImpl implements Config {
     this.tag = tag;
 
     ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
 
     ClusterConfigEntity entity = new ClusterConfigEntity();
     entity.setClusterEntity(clusterEntity);
@@ -141,7 +146,7 @@ public class ConfigImpl implements Config {
     entity.setVersion(version);
     entity.setTag(this.tag);
     entity.setTimestamp(System.currentTimeMillis());
-    entity.setStack(clusterEntity.getDesiredStack());
+    entity.setStack(stackEntity);
     entity.setData(gson.toJson(properties));
 
     if (null != propertiesAttributes) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 5c29fb5..f07bd37 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -186,6 +186,14 @@ public class UpgradeContext {
   private final Map<String, RepositoryVersionEntity> m_targetRepositoryMap = new HashMap<>();
 
   /**
+   * A mapping of service to source (from) repository. On an upgrade, this will
+   * be the current desired repository of every service. When downgrading, this
+   * will be the same for all components and will represent the value returned
+   * from {@link #getRepositoryVersion()}.
+   */
+  private final Map<String, RepositoryVersionEntity> m_sourceRepositoryMap = new HashMap<>();
+
+  /**
    * Used by some {@link Grouping}s to generate commands. It is exposed here
    * mainly for injection purposes since the XML is not created by Guice.
    */
@@ -303,8 +311,10 @@ public class UpgradeContext {
         }
 
         // populate the target repository map for all services in the upgrade
-        for (String service : m_services) {
-          m_targetRepositoryMap.put(service, m_repositoryVersion);
+        for (String serviceName : m_services) {
+          Service service = cluster.getService(serviceName);
+          m_sourceRepositoryMap.put(serviceName, service.getDesiredRepositoryVersion());
+          m_targetRepositoryMap.put(serviceName, m_repositoryVersion);
         }
 
         break;
@@ -315,9 +325,10 @@ public class UpgradeContext {
 
         m_repositoryVersion = upgrade.getRepositoryVersion();
 
-        // populate the target repository map for all services in the upgrade
+        // populate the repository maps for all services in the upgrade
         for (UpgradeHistoryEntity history : upgrade.getHistory()) {
           m_services.add(history.getServiceName());
+          m_sourceRepositoryMap.put(history.getServiceName(), m_repositoryVersion);
           m_targetRepositoryMap.put(history.getServiceName(), history.getFromReposistoryVersion());
         }
 
@@ -376,7 +387,7 @@ public class UpgradeContext {
     m_autoSkipServiceCheckFailures = skipServiceCheckFailures;
     m_autoSkipManualVerification = skipManualVerification;
 
-    m_resolver = new MasterHostResolver(configHelper, this);
+    m_resolver = new MasterHostResolver(m_cluster, configHelper, this);
   }
 
   /**
@@ -405,7 +416,9 @@ public class UpgradeContext {
     List<UpgradeHistoryEntity> allHistory = upgradeEntity.getHistory();
     for (UpgradeHistoryEntity history : allHistory) {
       String serviceName = history.getServiceName();
+      RepositoryVersionEntity sourceRepositoryVersion = history.getFromReposistoryVersion();
       RepositoryVersionEntity targetRepositoryVersion = history.getTargetRepositoryVersion();
+      m_sourceRepositoryMap.put(serviceName, sourceRepositoryVersion);
       m_targetRepositoryMap.put(serviceName, targetRepositoryVersion);
       m_services.add(serviceName);
     }
@@ -416,7 +429,7 @@ public class UpgradeContext {
     Map<String, UpgradePack> packs = m_metaInfo.getUpgradePacks(stackId.getStackName(), stackId.getStackVersion());
     m_upgradePack = packs.get(upgradePackage);
 
-    m_resolver = new MasterHostResolver(configHelper, this);
+    m_resolver = new MasterHostResolver(m_cluster, configHelper, this);
   }
 
   /**
@@ -448,6 +461,50 @@ public class UpgradeContext {
   }
 
   /**
+   * Gets the version that components are being considered to be "coming from".
+   * <p/>
+   * With a {@link Direction#UPGRADE}, this value represent the services'
+   * desired repository. However, {@link Direction#DOWNGRADE} will use the same
+   * value for all services which is the version that the downgrade is coming
+   * from.
+   *
+   * @return the source version for the upgrade
+   */
+  public Map<String, RepositoryVersionEntity> getSourceVersions() {
+    return new HashMap<>(m_sourceRepositoryMap);
+  }
+
+  /**
+   * Gets the version that service is being considered to be "coming from".
+   * <p/>
+   * With a {@link Direction#UPGRADE}, this value represent the services'
+   * desired repository. However, {@link Direction#DOWNGRADE} will use the same
+   * value for all services which is the version that the downgrade is coming
+   * from.
+   *
+   * @return the source repository for the upgrade
+   */
+  public RepositoryVersionEntity getSourceRepositoryVersion(String serviceName) {
+    return m_sourceRepositoryMap.get(serviceName);
+  }
+
+  /**
+   * Gets the version that service is being considered to be "coming from".
+   * <p/>
+   * With a {@link Direction#UPGRADE}, this value represent the services'
+   * desired repository. However, {@link Direction#DOWNGRADE} will use the same
+   * value for all services which is the version that the downgrade is coming
+   * from.
+   *
+   * @return the source repository for the upgrade
+   * @see #getSourceRepositoryVersion(String)
+   */
+  public String getSourceVersion(String serviceName) {
+    RepositoryVersionEntity serviceSourceVersion = m_sourceRepositoryMap.get(serviceName);
+    return serviceSourceVersion.getVersion();
+  }
+
+  /**
    * Gets the version being upgraded to or downgraded to for all services
    * participating. This is the version that the service will be on if the
    * upgrade or downgrade succeeds.
@@ -487,6 +544,7 @@ public class UpgradeContext {
    * the original repository that the service was on.
    *
    * @return the target version for the upgrade
+   * @see #getTargetRepositoryVersion(String)
    */
   public String getTargetVersion(String serviceName) {
     RepositoryVersionEntity serviceTargetVersion = m_targetRepositoryMap.get(serviceName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index 0f39e60..b228988 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
@@ -33,6 +34,7 @@ import org.apache.ambari.annotations.Experimental;
 import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.internal.TaskResourceProvider;
 import org.apache.ambari.server.controller.predicate.AndPredicate;
 import org.apache.ambari.server.controller.spi.ClusterController;
@@ -49,7 +51,10 @@ import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.events.listeners.upgrade.StackVersionListener;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
+import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
 import org.apache.ambari.server.stack.HostsType;
 import org.apache.ambari.server.stack.MasterHostResolver;
 import org.apache.ambari.server.state.stack.UpgradePack;
@@ -177,25 +182,46 @@ public class UpgradeHelper {
    * {@link StageWrapperBuilder} has finished building out all of the stages.
    */
   @Inject
-  private Provider<ConfigHelper> m_configHelper;
+  private Provider<ConfigHelper> m_configHelperProvider;
 
   @Inject
-  private Provider<AmbariMetaInfo> m_ambariMetaInfo;
+  private Provider<AmbariMetaInfo> m_ambariMetaInfoProvider;
 
   @Inject
-  private Provider<Clusters> clusters;
+  private Provider<Clusters> m_clusters;
 
   @Inject
-  private Provider<RepositoryVersionDAO> s_repoVersionDAO;
+  private Provider<RepositoryVersionDAO> m_repoVersionProvider;
 
   /**
-   * Get right Upgrade Pack, depends on stack, direction and upgrade type information
-   * @param clusterName The name of the cluster
-   * @param upgradeFromVersion Current stack version
-   * @param upgradeToVersion Target stack version
-   * @param direction {@code Direction} of the upgrade
-   * @param upgradeType The {@code UpgradeType}
-   * @param preferredUpgradePackName For unit test, need to prefer an upgrade pack since multiple matches can be found.
+   * Used to update the configuration properties.
+   */
+  @Inject
+  private Provider<AmbariManagementController> m_controllerProvider;
+
+  /**
+   * Used to get configurations by service name.
+   */
+  @Inject
+  private ServiceConfigDAO m_serviceConfigDAO;
+
+  /**
+   * Get right Upgrade Pack, depends on stack, direction and upgrade type
+   * information
+   *
+   * @param clusterName
+   *          The name of the cluster
+   * @param upgradeFromVersion
+   *          Current stack version
+   * @param upgradeToVersion
+   *          Target stack version
+   * @param direction
+   *          {@code Direction} of the upgrade
+   * @param upgradeType
+   *          The {@code UpgradeType}
+   * @param preferredUpgradePackName
+   *          For unit test, need to prefer an upgrade pack since multiple
+   *          matches can be found.
    * @return {@code UpgradeType} object
    * @throws AmbariException
    */
@@ -203,7 +229,7 @@ public class UpgradeHelper {
     Direction direction, UpgradeType upgradeType, String preferredUpgradePackName) throws AmbariException {
 
     // Find upgrade packs based on current stack. This is where to upgrade from
-    Cluster cluster = clusters.get().getCluster(clusterName);
+    Cluster cluster = m_clusters.get().getCluster(clusterName);
     StackId stack =  cluster.getCurrentStackVersion();
 
     String repoVersion = upgradeToVersion;
@@ -213,13 +239,14 @@ public class UpgradeHelper {
       repoVersion = upgradeFromVersion;
     }
 
-    RepositoryVersionEntity versionEntity = s_repoVersionDAO.get().findByStackNameAndVersion(stack.getStackName(), repoVersion);
+    RepositoryVersionEntity versionEntity = m_repoVersionProvider.get().findByStackNameAndVersion(
+        stack.getStackName(), repoVersion);
 
     if (versionEntity == null) {
       throw new AmbariException(String.format("Repository version %s was not found", repoVersion));
     }
 
-    Map<String, UpgradePack> packs = m_ambariMetaInfo.get().getUpgradePacks(stack.getStackName(), stack.getStackVersion());
+    Map<String, UpgradePack> packs = m_ambariMetaInfoProvider.get().getUpgradePacks(stack.getStackName(), stack.getStackVersion());
     UpgradePack pack = null;
 
     if (StringUtils.isNotEmpty(preferredUpgradePackName) && packs.containsKey(preferredUpgradePackName)) {
@@ -595,7 +622,7 @@ public class UpgradeHelper {
           value = ctx.getDirection().getText(p == Placeholder.DIRECTION_TEXT_PROPER);
           break;
         default:
-          value = m_configHelper.get().getPlaceholderValueFromDesiredConfigurations(
+          value = m_configHelperProvider.get().getPlaceholderValueFromDesiredConfigurations(
               cluster, token);
           break;
       }
@@ -701,7 +728,7 @@ public class UpgradeHelper {
   private void setDisplayNames(UpgradeContext context, String service, String component) {
     StackId stackId = context.getCluster().getDesiredStackVersion();
     try {
-      ServiceInfo serviceInfo = m_ambariMetaInfo.get().getService(stackId.getStackName(),
+      ServiceInfo serviceInfo = m_ambariMetaInfoProvider.get().getService(stackId.getStackName(),
           stackId.getStackVersion(), service);
       context.setServiceDisplay(service, serviceInfo.getDisplayName());
 
@@ -714,6 +741,32 @@ public class UpgradeHelper {
   }
 
   /**
+   * Updates the various repositories and configurations for services
+   * participating in the upgrade or downgrade. The following actions are
+   * performed in order:
+   * <ul>
+   * <li>The desired repository for every service and component is changed<
+   * <li>The {@link UpgradeState} of every component host is moved to either
+   * {@link UpgradeState#IN_PROGRESS} or {@link UpgradeState#NONE}.
+   * <li>In the case of an upgrade, new configurations and service
+   * configurations are created if necessary. In the case of a downgrade, any
+   * configurations created by the upgrade are reverted.
+   * </ul>
+   *
+   * @param upgradeContext
+   *          the upgrade context holding all relevent upgrade information (not
+   *          {@code null}).
+   * @throws AmbariException
+   */
+  @Transactional
+  @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES)
+  public void updateDesiredRepositoriesAndConfigs(UpgradeContext upgradeContext)
+      throws AmbariException {
+    setDesiredRepositories(upgradeContext);
+    processConfigurationsIfRequired(upgradeContext);
+  }
+
+  /**
    * Transitions all affected components to {@link UpgradeState#IN_PROGRESS}.
    * Transition is performed only for components that advertise their version.
    * Additionally sets the service component desired version to the specified
@@ -726,10 +779,8 @@ public class UpgradeHelper {
    * @param upgradeContext
    *          the upgrade context (not {@code null}).
    */
-  @Transactional
   @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES)
-  public void putComponentsToUpgradingState(UpgradeContext upgradeContext) throws AmbariException {
-
+  private void setDesiredRepositories(UpgradeContext upgradeContext) throws AmbariException {
     Cluster cluster = upgradeContext.getCluster();
     Set<String> services = upgradeContext.getSupportedServices();
 
@@ -743,7 +794,7 @@ public class UpgradeHelper {
       for (ServiceComponent serviceComponent : components) {
         boolean versionAdvertised = false;
         try {
-          ComponentInfo ci = m_ambariMetaInfo.get().getComponent(targetStack.getStackName(),
+          ComponentInfo ci = m_ambariMetaInfoProvider.get().getComponent(targetStack.getStackName(),
               targetStack.getStackVersion(), serviceComponent.getServiceName(),
               serviceComponent.getName());
 
@@ -777,4 +828,199 @@ public class UpgradeHelper {
       }
     }
   }
+
+  /**
+   * Handles the creation or resetting of configurations based on whether an
+   * upgrade or downgrade is occurring. This method will not do anything when
+   * the service is not crossing major stack versions, since, by definition, no
+   * new configurations are automatically created when upgrading with the same
+   * stack (ie HDP 2.2.0.0 -> HDP 2.2.1.0).
+   * <p/>
+   * When upgrading or downgrade between stacks (HDP 2.2.0.0 -> HDP 2.3.0.0)
+   * then this will perform the following:
+   * <ul>
+   * <li>Upgrade: Create new configurations that are a merge between the source
+   * stack and the target stack. If a value has changed between stacks, then the
+   * target stack value should be taken unless the cluster's value differs from
+   * the old stack. This can occur if a property has been customized after
+   * installation.</li>
+   * <li>Downgrade: Reset the latest configurations from the service's original
+   * stack. The new configurations that were created on upgrade must be left
+   * intact until all components have been reverted, otherwise heartbeats will
+   * fail due to missing configurations.</li>
+   * </ul>
+   *
+   * @param upgradeContext
+   *          the upgrade context (not {@code null}).
+   * @throws AmbariException
+   */
+  private void processConfigurationsIfRequired(UpgradeContext upgradeContext)
+      throws AmbariException {
+
+    AmbariManagementController controller = m_controllerProvider.get();
+
+    Cluster cluster = upgradeContext.getCluster();
+    Direction direction = upgradeContext.getDirection();
+    String userName = controller.getAuthName();
+    Set<String> servicesInUpgrade = upgradeContext.getSupportedServices();
+
+    // merge or revert configurations for any service that needs it
+    for( String serviceName : servicesInUpgrade ){
+      RepositoryVersionEntity sourceRepositoryVersion = upgradeContext.getSourceRepositoryVersion(serviceName);
+      RepositoryVersionEntity targetRepositoryVersion = upgradeContext.getTargetRepositoryVersion(serviceName);
+      StackId sourceStackId = sourceRepositoryVersion.getStackId();
+      StackId targetStackId = targetRepositoryVersion.getStackId();
+
+      // only work with configurations when crossing stacks
+      if (sourceStackId.equals(targetStackId)) {
+        RepositoryVersionEntity associatedRepositoryVersion = upgradeContext.getRepositoryVersion();
+        LOG.info(
+            "The {} {} {} will not change stack configurations for {} since the source and target are both {}",
+            direction.getText(false), direction.getPreposition(),
+            associatedRepositoryVersion.getVersion(), serviceName, targetStackId);
+
+        continue;
+      }
+
+      ConfigHelper configHelper = m_configHelperProvider.get();
+
+      // downgrade is easy - just remove the new and make the old current
+      if (direction == Direction.DOWNGRADE) {
+        cluster.applyLatestConfigurations(targetStackId, serviceName);
+        return;
+      }
+
+      // upgrade is a bit harder - we have to merge new stack configurations in
+
+      // populate a map of default configurations for the service on the old
+      // stack (this is used when determining if a property has been
+      // customized and should be overriden with the new stack value)
+      Map<String, Map<String, String>> oldServiceDefaultConfigsByType = configHelper.getDefaultProperties(
+          sourceStackId, serviceName);
+
+      // populate a map with default configurations from the new stack
+      Map<String, Map<String, String>> newServiceDefaultConfigsByType = configHelper.getDefaultProperties(
+          targetStackId, serviceName);
+
+      // find the current, existing configurations for the service
+      List<Config> existingServiceConfigs = new ArrayList<>();
+      List<ServiceConfigEntity> latestServiceConfigs = m_serviceConfigDAO.getLastServiceConfigsForService(
+          cluster.getClusterId(), serviceName);
+
+      for (ServiceConfigEntity serviceConfig : latestServiceConfigs) {
+        List<ClusterConfigEntity> existingConfigurations = serviceConfig.getClusterConfigEntities();
+        for (ClusterConfigEntity currentServiceConfig : existingConfigurations) {
+          String configurationType = currentServiceConfig.getType();
+          Config currentClusterConfigForService = cluster.getDesiredConfigByType(configurationType);
+          existingServiceConfigs.add(currentClusterConfigForService);
+        }
+      }
+
+      // now that we have found, old, new, and existing confgs, overlay the
+      // existing on top of the new
+      for (Config existingServiceConfig : existingServiceConfigs) {
+        String configurationType = existingServiceConfig.getType();
+
+        // get current stack default configurations on install
+        Map<String, String> oldServiceDefaultConfigs = oldServiceDefaultConfigsByType.get(
+            configurationType);
+
+        // NPE sanity for current stack defaults
+        if (null == oldServiceDefaultConfigs) {
+          oldServiceDefaultConfigs = Collections.emptyMap();
+        }
+
+        // get the existing configurations
+        Map<String, String> existingConfigurations = existingServiceConfig.getProperties();
+
+        // get the new configurations
+        Map<String, String> newDefaultConfigurations = newServiceDefaultConfigsByType.get(
+            configurationType);
+
+        // if the new stack configurations don't have the type, then simply add
+        // all of the existing in
+        if (null == newDefaultConfigurations) {
+          newServiceDefaultConfigsByType.put(configurationType, existingConfigurations);
+          continue;
+        } else {
+          // Remove any configs in the new stack whose value is NULL, unless
+          // they currently exist and the value is not NULL.
+          Iterator<Map.Entry<String, String>> iter = newDefaultConfigurations.entrySet().iterator();
+          while (iter.hasNext()) {
+            Map.Entry<String, String> entry = iter.next();
+            if (entry.getValue() == null) {
+              iter.remove();
+            }
+          }
+        }
+
+        // process every existing configuration property for this configuration
+        // type
+        for (Map.Entry<String, String> existingConfigurationEntry : existingConfigurations.entrySet()) {
+          String existingConfigurationKey = existingConfigurationEntry.getKey();
+          String existingConfigurationValue = existingConfigurationEntry.getValue();
+
+          // if there is already an entry, we now have to try to determine if
+          // the value was customized after stack installation
+          if (newDefaultConfigurations.containsKey(existingConfigurationKey)) {
+            String newDefaultConfigurationValue = newDefaultConfigurations.get(
+                existingConfigurationKey);
+
+            if (!StringUtils.equals(existingConfigurationValue, newDefaultConfigurationValue)) {
+              // the new default is different from the existing cluster value;
+              // only override the default value if the existing value differs
+              // from the original stack
+              String oldDefaultValue = oldServiceDefaultConfigs.get(existingConfigurationKey);
+
+              if (!StringUtils.equals(existingConfigurationValue, oldDefaultValue)) {
+                // at this point, we've determined that there is a
+                // difference
+                // between default values between stacks, but the value was
+                // also customized, so keep the customized value
+                newDefaultConfigurations.put(existingConfigurationKey, existingConfigurationValue);
+              }
+            }
+          } else {
+            // there is no entry in the map, so add the existing key/value
+            // pair
+            newDefaultConfigurations.put(existingConfigurationKey, existingConfigurationValue);
+          }
+        }
+
+        /*
+        for every new configuration which does not exist in the existing
+        configurations, see if it was present in the current stack
+
+        stack 2.x has foo-site/property (on-ambari-upgrade is false)
+        stack 2.y has foo-site/property
+        the current cluster (on 2.x) does not have it
+
+        In this case, we should NOT add it back as clearly stack advisor has removed it
+        */
+        Iterator<Map.Entry<String, String>> newDefaultConfigurationsIterator = newDefaultConfigurations.entrySet().iterator();
+        while (newDefaultConfigurationsIterator.hasNext()) {
+          Map.Entry<String, String> newConfigurationEntry = newDefaultConfigurationsIterator.next();
+          String newConfigurationPropertyName = newConfigurationEntry.getKey();
+          if (oldServiceDefaultConfigs.containsKey(newConfigurationPropertyName)
+              && !existingConfigurations.containsKey(newConfigurationPropertyName)) {
+            LOG.info(
+                "The property {}/{} exists in both {} and {} but is not part of the current set of configurations and will therefore not be included in the configuration merge",
+                configurationType, newConfigurationPropertyName, sourceStackId, targetStackId);
+
+            // remove the property so it doesn't get merged in
+            newDefaultConfigurationsIterator.remove();
+          }
+        }
+      }
+
+      if (null != newServiceDefaultConfigsByType) {
+        Set<String> configTypes = newServiceDefaultConfigsByType.keySet();
+        LOG.info("The upgrade will create the following configurations for stack {}: {}",
+            targetStackId, StringUtils.join(configTypes, ','));
+
+        configHelper.createConfigTypes(cluster, controller, targetStackId,
+            newServiceDefaultConfigsByType, userName, "Configuration created for Upgrade");
+      }
+    }
+  }
 }