You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/19 14:20:06 UTC

[04/13] ambari git commit: AMBARI-21450 - Fixing Unit Test Compilation Issues From trunk Merge (jonathanhurley)

http://git-wip-us.apache.org/repos/asf/ambari/blob/56362fd6/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index 037e47b..6907f32 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,13 +18,11 @@
 package org.apache.ambari.server.serveraction.upgrades;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.lang.reflect.Field;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -42,28 +40,26 @@ import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.AmbariServer;
 import org.apache.ambari.server.controller.ServiceConfigVersionResponse;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
-import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
+import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.HostVersionDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.RequestDAO;
-import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.dao.UpgradeDAO;
-import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.HostVersionEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.RequestEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentHistoryEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
@@ -79,10 +75,11 @@ import org.apache.ambari.server.state.ServiceComponentHostFactory;
 import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
+import org.apache.ambari.server.state.UpgradeState;
 import org.apache.ambari.server.state.stack.UpgradePack;
-import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
 import org.apache.ambari.server.utils.EventBusSynchronizer;
+import org.apache.commons.lang.StringUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -103,11 +100,12 @@ public class UpgradeActionTest {
   private static final String HDP_2_1_1_1 = "2.1.1.1-2";
 
   private static final String HDP_2_2_0_1 = "2.2.0.1-3";
-  private static final String HDP_2_2_0_2 = "2.2.0.2-4";
 
   private static final StackId HDP_21_STACK = new StackId("HDP-2.1.1");
   private static final StackId HDP_22_STACK = new StackId("HDP-2.2.0");
 
+  private RepositoryVersionEntity sourceRepositoryVersion;
+
   private Injector m_injector;
 
   private AmbariManagementController amc;
@@ -118,8 +116,6 @@ public class UpgradeActionTest {
   @Inject
   private Clusters clusters;
   @Inject
-  private ClusterVersionDAO clusterVersionDAO;
-  @Inject
   private HostVersionDAO hostVersionDAO;
   @Inject
   private HostDAO hostDAO;
@@ -136,8 +132,6 @@ public class UpgradeActionTest {
   @Inject
   private UpgradeDAO upgradeDAO;
   @Inject
-  private ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
-  @Inject
   private StackDAO stackDAO;
   @Inject
   private AmbariMetaInfo ambariMetaInfo;
@@ -146,10 +140,12 @@ public class UpgradeActionTest {
   @Inject
   private ConfigFactory configFactory;
 
+  @Inject
+  private HostComponentStateDAO hostComponentStateDAO;
+
   private RepositoryVersionEntity repositoryVersion2110;
   private RepositoryVersionEntity repositoryVersion2111;
   private RepositoryVersionEntity repositoryVersion2201;
-  private RepositoryVersionEntity repositoryVersion2202;
 
   @Before
   public void setup() throws Exception {
@@ -169,7 +165,6 @@ public class UpgradeActionTest {
     repositoryVersion2110 = m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_1_1_0);
     repositoryVersion2111 = m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_1_1_1);
     repositoryVersion2201 = m_helper.getOrCreateRepositoryVersion(HDP_22_STACK, HDP_2_2_0_1);
-    repositoryVersion2202 = m_helper.getOrCreateRepositoryVersion(HDP_22_STACK, HDP_2_2_0_2);
   }
 
   @After
@@ -178,12 +173,11 @@ public class UpgradeActionTest {
     H2DatabaseCleaner.clearDatabase(m_injector.getProvider(EntityManager.class).get());
   }
 
-  private void makeDowngradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack, String targetRepo) throws Exception {
+  private void makeDowngradeCluster(RepositoryVersionEntity sourceRepoVersion,
+      RepositoryVersionEntity targetRepoVersion) throws Exception {
     String hostName = "h1";
 
-    clusters.addCluster(clusterName, sourceStack);
-
-    Cluster c = clusters.getCluster(clusterName);
+    clusters.addCluster(clusterName, sourceRepoVersion.getStackId());
 
     // add a host component
     clusters.addHost(hostName);
@@ -195,97 +189,18 @@ public class UpgradeActionTest {
     hostAttributes.put("os_release_version", "6");
     host.setHostAttributes(hostAttributes);
 
-    // Create the starting repo version
-    c.createClusterVersion(sourceStack, sourceRepo, "admin", RepositoryVersionState.INSTALLING);
-    c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.CURRENT);
-
-    // Start upgrading the newer repo
-    c.createClusterVersion(targetStack, targetRepo, "admin", RepositoryVersionState.INSTALLING);
-    c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED);
-
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-            RepositoryVersionState.CURRENT);
-
     HostVersionEntity entity = new HostVersionEntity();
     entity.setHostEntity(hostDAO.findByName(hostName));
-    entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo));
+    entity.setRepositoryVersion(targetRepoVersion);
     entity.setState(RepositoryVersionState.INSTALLING);
     hostVersionDAO.create(entity);
   }
 
-  private void makeTwoUpgradesWhereLastDidNotComplete(
-      RepositoryVersionEntity sourceRepositoryVersion, RepositoryVersionEntity midRepositoryVersion,
-      RepositoryVersionEntity targetRepositoryVersion) throws Exception {
-    StackId sourceStack = sourceRepositoryVersion.getStackId();
-    StackId midStack = midRepositoryVersion.getStackId();
-    StackId targetStack = targetRepositoryVersion.getStackId();
-
-    String hostName = "h1";
-
-    clusters.addCluster(clusterName, sourceStack);
-
-    Cluster c = clusters.getCluster(clusterName);
-
-    // add a host component
-    clusters.addHost(hostName);
-
-    Host host = clusters.getHost(hostName);
-
-    Map<String, String> hostAttributes = new HashMap<>();
-    hostAttributes.put("os_family", "redhat");
-    hostAttributes.put("os_release_version", "6");
-    host.setHostAttributes(hostAttributes);
-
-    // Create the starting repo version
-    c.createClusterVersion(sourceStack, sourceRepositoryVersion.getVersion(), "admin", RepositoryVersionState.INSTALLING);
-    c.transitionClusterVersion(sourceStack, sourceRepositoryVersion.getVersion(), RepositoryVersionState.CURRENT);
-
-    // Start upgrading the mid repo
-    c.setDesiredStackVersion(midStack);
-    c.createClusterVersion(midStack, midRepositoryVersion.getVersion(), "admin", RepositoryVersionState.INSTALLING);
-    c.transitionClusterVersion(midStack, midRepositoryVersion.getVersion(), RepositoryVersionState.INSTALLED);
-    c.transitionClusterVersion(midStack, midRepositoryVersion.getVersion(), RepositoryVersionState.CURRENT);
-
-    // Set original version as INSTALLED
-    c.transitionClusterVersion(sourceStack, sourceRepositoryVersion.getVersion(), RepositoryVersionState.INSTALLED);
-
-    // Notice that we have not yet changed the cluster current stack to the mid stack to simulate
-    // the user skipping this step.
-    c.setDesiredStackVersion(targetStack);
-    c.createClusterVersion(targetStack, targetRepositoryVersion.getVersion(), "admin", RepositoryVersionState.INSTALLING);
-    c.transitionClusterVersion(targetStack, targetRepositoryVersion.getVersion(), RepositoryVersionState.INSTALLED);
-
-    // Create a host version for the starting repo in INSTALLED
-    HostVersionEntity entitySource = new HostVersionEntity();
-    entitySource.setHostEntity(hostDAO.findByName(hostName));
-    entitySource.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(sourceStack, sourceRepositoryVersion.getVersion()));
-    entitySource.setState(RepositoryVersionState.INSTALL_FAILED);
-    hostVersionDAO.create(entitySource);
-
-    // Create a host version for the mid repo in CURRENT
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-            RepositoryVersionState.CURRENT);
-
-    // Create a host version for the target repo in UPGRADED
-    HostVersionEntity entityTarget = new HostVersionEntity();
-    entityTarget.setHostEntity(hostDAO.findByName(hostName));
-    entityTarget.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepositoryVersion.getVersion()));
-    entityTarget.setState(RepositoryVersionState.INSTALLED);
-    hostVersionDAO.create(entityTarget);
-
-    createUpgrade(c, "", Direction.DOWNGRADE, midRepositoryVersion, targetRepositoryVersion);
-  }
-
-  private void createUpgradeClusterAndSourceRepo(StackId sourceStack, String sourceRepo,
-                                                 String hostName) throws Exception {
+  private Cluster createUpgradeCluster(
+      RepositoryVersionEntity sourceRepoVersion, String hostName) throws Exception {
 
-    clusters.addCluster(clusterName, sourceStack);
-
-    StackEntity stackEntitySource = stackDAO.find(sourceStack.getStackName(), sourceStack.getStackVersion());
-    assertNotNull(stackEntitySource);
-
-    Cluster c = clusters.getCluster(clusterName);
-    c.setDesiredStackVersion(sourceStack);
+    clusters.addCluster(clusterName, sourceRepoVersion.getStackId());
+    Cluster cluster = clusters.getCluster(clusterName);
 
     // add a host component
     clusters.addHost(hostName);
@@ -299,39 +214,30 @@ public class UpgradeActionTest {
 
     // without this, HostEntity will not have a relation to ClusterEntity
     clusters.mapHostToCluster(hostName, clusterName);
-    c.createClusterVersion(sourceStack, sourceRepo, "admin", RepositoryVersionState.INSTALLING);
-    c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.CURRENT);
 
-  }
+    HostVersionEntity entity = new HostVersionEntity(hostDAO.findByName(hostName),
+        sourceRepoVersion, RepositoryVersionState.INSTALLED);
 
-  private void createUpgradeClusterTargetRepo(StackId targetStack, String targetRepo,
-                                              String hostName) throws AmbariException {
-    Cluster c = clusters.getCluster(clusterName);
-    StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion());
-    assertNotNull(stackEntityTarget);
+    hostVersionDAO.create(entity);
 
-    // Start upgrading the newer repo
-    c.createClusterVersion(targetStack, targetRepo, "admin", RepositoryVersionState.INSTALLING);
-    c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED);
-    c.setCurrentStackVersion(targetStack);
+    return cluster;
+  }
 
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-            RepositoryVersionState.CURRENT);
+  private void createHostVersions(RepositoryVersionEntity targetRepoVersion,
+      String hostName) throws AmbariException {
+    Cluster c = clusters.getCluster(clusterName);
 
     // create a single host with the UPGRADED HostVersionEntity
     HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
 
-    RepositoryVersionEntity repositoryVersionEntity = repoVersionDAO.findByStackAndVersion(
-            targetStack, targetRepo);
-
     HostVersionEntity entity = new HostVersionEntity(hostDAO.findByName(hostName),
-            repositoryVersionEntity, RepositoryVersionState.INSTALLED);
+        targetRepoVersion, RepositoryVersionState.INSTALLED);
 
     hostVersionDAO.create(entity);
 
     // verify the UPGRADED host versions were created successfully
-    List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion(clusterName,
-            targetStack, targetRepo);
+    List<HostVersionEntity> hostVersions = hostVersionDAO.findHostVersionByClusterAndRepository(
+        c.getClusterId(), targetRepoVersion);
 
     assertEquals(1, hostVersions.size());
     assertEquals(RepositoryVersionState.INSTALLED, hostVersions.get(0).getState());
@@ -362,21 +268,16 @@ public class UpgradeActionTest {
     clusters.mapHostToCluster(hostName, clusterName);
 
     // Create the starting repo version
-    c.createClusterVersion(sourceStack, sourceRepo, "admin", RepositoryVersionState.INSTALLING);
-    c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.CURRENT);
+    sourceRepositoryVersion = m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
   }
 
   private void makeCrossStackUpgradeTargetRepo(StackId targetStack, String targetRepo, String hostName) throws Exception{
     StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion());
     assertNotNull(stackEntityTarget);
-    Cluster c = clusters.getCluster(clusterName);
 
-    // Start upgrading the newer repo
-    c.createClusterVersion(targetStack, targetRepo, "admin", RepositoryVersionState.INSTALLING);
-    c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED);
+    m_helper.getOrCreateRepositoryVersion(new StackId(stackEntityTarget), targetRepo);
 
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-        RepositoryVersionState.CURRENT);
+    // Start upgrading the newer repo
 
     HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
 
@@ -397,10 +298,6 @@ public class UpgradeActionTest {
     StackId sourceStack = HDP_21_STACK;
     StackId targetStack = HDP_22_STACK;
     String sourceRepo = HDP_2_1_1_0;
-    String targetRepo = HDP_2_2_0_1;
-    RepositoryVersionEntity sourceRepositoryVersion = repositoryVersion2110;
-    RepositoryVersionEntity targetRepositoryVersion = repositoryVersion2201;
-
     String hostName = "h1";
 
     // Must be a NON_ROLLING upgrade that jumps stacks in order for it to apply config changes.
@@ -415,24 +312,22 @@ public class UpgradeActionTest {
     Cluster cluster = clusters.getCluster(clusterName);
 
     // Install ZK and HDFS with some components
-    Service zk = installService(cluster, "ZOOKEEPER");
+    Service zk = installService(cluster, "ZOOKEEPER", repositoryVersion2110);
     addServiceComponent(cluster, zk, "ZOOKEEPER_SERVER");
     addServiceComponent(cluster, zk, "ZOOKEEPER_CLIENT");
     createNewServiceComponentHost(cluster, "ZOOKEEPER", "ZOOKEEPER_SERVER", "h1");
     createNewServiceComponentHost(cluster, "ZOOKEEPER", "ZOOKEEPER_CLIENT", "h1");
 
-    Service hdfs = installService(cluster, "HDFS");
+    Service hdfs = installService(cluster, "HDFS", repositoryVersion2110);
     addServiceComponent(cluster, hdfs, "NAMENODE");
     addServiceComponent(cluster, hdfs, "DATANODE");
     createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
     createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
 
-    makeCrossStackUpgradeTargetRepo(targetStack, targetRepo, hostName);
-    createUpgrade(cluster, upgradePackName, Direction.UPGRADE, sourceRepositoryVersion,
-        targetRepositoryVersion);
+    makeCrossStackUpgradeTargetRepo(targetStack, repositoryVersion2201.getVersion(), hostName);
+    createUpgrade(cluster, repositoryVersion2201);
 
-    RepositoryVersionEntity targetRve = repoVersionDAO.findByStackNameAndVersion("HDP", targetRepo);
-    Assert.assertNotNull(targetRve);
+    Assert.assertNotNull(repositoryVersion2201);
 
     // Create some configs
     createConfigs(cluster);
@@ -440,9 +335,6 @@ public class UpgradeActionTest {
     Assert.assertFalse(configs.isEmpty());
 
     Map<String, String> commandParams = new HashMap<>();
-    commandParams.put(UpdateDesiredStackAction.COMMAND_PARAM_VERSION, targetRepo);
-    commandParams.put(UpdateDesiredStackAction.COMMAND_PARAM_UPGRADE_PACK, upgradePackName);
-
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
     Map<String, String> roleParams = new HashMap<>();
@@ -465,6 +357,7 @@ public class UpgradeActionTest {
 
     CommandReport report = action.execute(null);
     assertNotNull(report);
+
     assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
 
     List<ServiceConfigVersionResponse> configVersionsAfter = cluster.getServiceConfigVersions();
@@ -475,17 +368,11 @@ public class UpgradeActionTest {
 
   @Test
   public void testFinalizeDowngrade() throws Exception {
-    StackId sourceStack = HDP_21_STACK;
-    StackId targetStack = HDP_21_STACK;
-    String sourceRepo = HDP_2_1_1_0;
-    String targetRepo = HDP_2_1_1_1;
-    RepositoryVersionEntity sourceRepositoryVersion = repositoryVersion2111;
-    RepositoryVersionEntity targetRepositoryVersion = repositoryVersion2110;
+    makeDowngradeCluster(repositoryVersion2110, repositoryVersion2111);
 
-    makeDowngradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
     Cluster cluster = clusters.getCluster(clusterName);
-    createUpgrade(cluster, "", Direction.DOWNGRADE, sourceRepositoryVersion,
-        targetRepositoryVersion);
+
+    createUpgrade(cluster, repositoryVersion2111);
 
     Map<String, String> commandParams = new HashMap<>();
     ExecutionCommand executionCommand = new ExecutionCommand();
@@ -503,82 +390,27 @@ public class UpgradeActionTest {
     assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
 
     for (HostVersionEntity entity : hostVersionDAO.findByClusterAndHost(clusterName, "h1")) {
-      if (entity.getRepositoryVersion().getVersion().equals(sourceRepo)) {
+      if (StringUtils.equals(entity.getRepositoryVersion().getVersion(), repositoryVersion2110.getVersion())) {
         assertEquals(RepositoryVersionState.CURRENT, entity.getState());
-      } else if (entity.getRepositoryVersion().getVersion().equals(targetRepo)) {
-        assertEquals(RepositoryVersionState.INSTALLED, entity.getState());
-      }
-    }
-
-    for (ClusterVersionEntity entity : clusterVersionDAO.findByCluster(clusterName)) {
-      if (entity.getRepositoryVersion().getVersion().equals(sourceRepo)) {
-        assertEquals(RepositoryVersionState.CURRENT, entity.getState());
-      } else if (entity.getRepositoryVersion().getVersion().equals(targetRepo)) {
+      } else {
         assertEquals(RepositoryVersionState.INSTALLED, entity.getState());
       }
     }
   }
 
-  /**
-   * Test a case in which a customer performs an upgrade from HDP 2.1 to 2.2 (e.g., 2.2.0.0), but skips the step to
-   * finalize, which calls "Save DB State". Therefore, the cluster's current stack is still on HDP 2.1.
-   * They can still modify the database manually to mark HDP 2.2 as CURRENT in the cluster_version and then begin
-   * another upgrade to 2.2.0.2 and then downgrade.
-   * In the downgrade, the original stack is still 2.1 but the stack for the version marked as CURRENT is 2.2; this
-   * mismatch means that the downgrade should not delete configs and will report a warning.
-   * @throws Exception
-   */
-  @Test
-  public void testFinalizeDowngradeWhenDidNotFinalizePreviousUpgrade() throws Exception {
-    RepositoryVersionEntity sourceRepositoryVersion = repositoryVersion2110;
-    RepositoryVersionEntity midRepositoryVersion = repositoryVersion2201;
-    RepositoryVersionEntity targetRepositoryVersion = repositoryVersion2202;
-
-    makeTwoUpgradesWhereLastDidNotComplete(sourceRepositoryVersion, midRepositoryVersion, targetRepositoryVersion);
-
-    Map<String, String> commandParams = new HashMap<>();
-    ExecutionCommand executionCommand = new ExecutionCommand();
-    executionCommand.setCommandParams(commandParams);
-    executionCommand.setClusterName(clusterName);
-
-    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
-    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
-
-    finalizeUpgradeAction.setExecutionCommand(executionCommand);
-    finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand);
-
-    CommandReport report = finalizeUpgradeAction.execute(null);
-    assertNotNull(report);
-    assertEquals(HostRoleStatus.FAILED.name(), report.getStatus());
-    assertTrue(report.getStdErr().contains(FinalizeUpgradeAction.PREVIOUS_UPGRADE_NOT_COMPLETED_MSG));
-  }
-
   @Test
   public void testFinalizeUpgrade() throws Exception {
-    StackId sourceStack = HDP_21_STACK;
-    StackId targetStack = HDP_21_STACK;
-    String sourceRepo = HDP_2_1_1_0;
-    String targetRepo = HDP_2_1_1_1;
-    RepositoryVersionEntity sourceRepositoryVersion = repositoryVersion2110;
-    RepositoryVersionEntity targetRepositoryVersion = repositoryVersion2111;
-
     String hostName = "h1";
 
-    createUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
-    createUpgradeClusterTargetRepo(targetStack, targetRepo, hostName);
+    createUpgradeCluster(repositoryVersion2110, hostName);
+    createHostVersions(repositoryVersion2111, hostName);
 
-    // Verify the repo before calling Finalize
-    AmbariCustomCommandExecutionHelper helper = m_injector.getInstance(AmbariCustomCommandExecutionHelper.class);
-    Host host = clusters.getHost("h1");
     Cluster cluster = clusters.getCluster(clusterName);
 
-    createUpgrade(cluster, sourceRepositoryVersion, targetRepositoryVersion);
+    createUpgrade(cluster, repositoryVersion2111);
 
     // Finalize the upgrade
     Map<String, String> commandParams = new HashMap<>();
-    commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
-    commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
-
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
     executionCommand.setClusterName(clusterName);
@@ -589,9 +421,29 @@ public class UpgradeActionTest {
     finalizeUpgradeAction.setExecutionCommand(executionCommand);
     finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand);
 
+    // this should fail since the host versions have not moved to current
     CommandReport report = finalizeUpgradeAction.execute(null);
-    assertNotNull(report);
+    assertEquals(HostRoleStatus.FAILED.name(), report.getStatus());
+
+    List<HostVersionEntity> hostVersions = hostVersionDAO.findHostVersionByClusterAndRepository(
+        cluster.getClusterId(), repositoryVersion2111);
+
+    for (HostVersionEntity hostVersion : hostVersions) {
+      hostVersion.setState(RepositoryVersionState.CURRENT);
+    }
+
+    report = finalizeUpgradeAction.execute(null);
     assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
+
+    hostVersions = hostVersionDAO.findHostVersionByClusterAndRepository(cluster.getClusterId(),
+        repositoryVersion2111);
+
+    for (HostVersionEntity hostVersion : hostVersions) {
+      Collection<HostComponentStateEntity> hostComponentStates = hostComponentStateDAO.findByHost(hostVersion.getHostName());
+      for (HostComponentStateEntity hostComponentStateEntity: hostComponentStates) {
+       assertEquals(UpgradeState.NONE, hostComponentStateEntity.getUpgradeState());
+      }
+    }
   }
 
   /**
@@ -602,16 +454,10 @@ public class UpgradeActionTest {
    */
   @Test
   public void testFinalizeWithHostsAlreadyCurrent() throws Exception {
-    StackId sourceStack = HDP_21_STACK;
-    StackId targetStack = HDP_21_STACK;
-    String sourceRepo = HDP_2_1_1_0;
-    String targetRepo = HDP_2_1_1_1;
-    RepositoryVersionEntity sourceRepositoryVersion = repositoryVersion2110;
-    RepositoryVersionEntity targetRepositoryVersion = repositoryVersion2111;
     String hostName = "h1";
 
-    createUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
-    createUpgradeClusterTargetRepo(targetStack, targetRepo, hostName);
+    createUpgradeCluster(repositoryVersion2110, hostName);
+    createHostVersions(repositoryVersion2111, hostName);
 
     // move the old version from CURRENT to INSTALLED and the new version from
     // UPGRADED to CURRENT - this will simulate what happens when a host is
@@ -628,63 +474,18 @@ public class UpgradeActionTest {
     }
 
     // Verify the repo before calling Finalize
-    AmbariCustomCommandExecutionHelper helper = m_injector.getInstance(AmbariCustomCommandExecutionHelper.class);
-    Host host = clusters.getHost("h1");
     Cluster cluster = clusters.getCluster(clusterName);
-    createUpgrade(cluster, sourceRepositoryVersion, targetRepositoryVersion);
-
-    // Finalize the upgrade
-    Map<String, String> commandParams = new HashMap<>();
-    commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
-    commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
-
-    ExecutionCommand executionCommand = new ExecutionCommand();
-    executionCommand.setCommandParams(commandParams);
-    executionCommand.setClusterName(clusterName);
 
-    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
-    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
-
-    finalizeUpgradeAction.setExecutionCommand(executionCommand);
-    finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand);
-
-    CommandReport report = finalizeUpgradeAction.execute(null);
-    assertNotNull(report);
-    assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
-  }
-
-  @Test
-  public void testFinalizeUpgradeAcrossStacks() throws Exception {
-    StackId sourceStack = HDP_21_STACK;
-    StackId targetStack = HDP_22_STACK;
-    String sourceRepo = HDP_2_1_1_0;
-    String targetRepo = HDP_2_2_0_1;
-    RepositoryVersionEntity sourceRepositoryVersion = repositoryVersion2110;
-    RepositoryVersionEntity targetRepositoryVersion = repositoryVersion2201;
-
-    String hostName = "h1";
-
-    makeCrossStackUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
-    makeCrossStackUpgradeTargetRepo(targetStack, targetRepo, hostName);
-
-    Cluster cluster = clusters.getCluster(clusterName);
-    createUpgrade(cluster, sourceRepositoryVersion, targetRepositoryVersion);
-
-    // setup the cluster for the upgrade across stacks
-    cluster.setCurrentStackVersion(sourceStack);
-    cluster.setDesiredStackVersion(targetStack);
+    createUpgrade(cluster, repositoryVersion2111);
 
+    // Finalize the upgrade
     Map<String, String> commandParams = new HashMap<>();
-    commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
-    commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
-    commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId());
 
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
     executionCommand.setClusterName(clusterName);
 
     HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
-
     hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
 
     finalizeUpgradeAction.setExecutionCommand(executionCommand);
@@ -693,263 +494,66 @@ public class UpgradeActionTest {
     CommandReport report = finalizeUpgradeAction.execute(null);
     assertNotNull(report);
     assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
-
-    StackId currentStackId = cluster.getCurrentStackVersion();
-    StackId desiredStackId = cluster.getDesiredStackVersion();
-
-    // verify current/desired stacks are updated to the new stack
-    assertEquals(desiredStackId, currentStackId);
-    assertEquals(targetStack, currentStackId);
-    assertEquals(targetStack, desiredStackId);
   }
 
   /**
-   * Tests some of the action items are completed when finalizing downgrade
-   * across stacks (HDP 2.2 -> HDP 2.3).
+   * Tests that all host versions are correct after upgrade. This test will
+   * ensure that the prior CURRENT versions are moved to INSTALLED while not
+   * touching any others.
    *
    * @throws Exception
    */
   @Test
-  public void testFinalizeDowngradeAcrossStacks() throws Exception {
-    StackId sourceStack = HDP_21_STACK;
-    StackId targetStack = HDP_22_STACK;
-    String sourceRepo = HDP_2_1_1_0;
-    String targetRepo = HDP_2_2_0_1;
-    RepositoryVersionEntity sourceRepositoryVersion = repositoryVersion2110;
-    RepositoryVersionEntity targetRepositoryVersion = repositoryVersion2201;
-
+  public void testHostVersionsAfterUpgrade() throws Exception {
     String hostName = "h1";
+    Cluster cluster = createUpgradeCluster(repositoryVersion2110, hostName);
+    createHostVersions(repositoryVersion2111, hostName);
+    createHostVersions(repositoryVersion2201, hostName);
 
-    makeCrossStackUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
-    Cluster cluster = clusters.getCluster(clusterName);
-
-    // install HDFS with some components
-    Service service = installService(cluster, "HDFS");
-    addServiceComponent(cluster, service, "NAMENODE");
-    addServiceComponent(cluster, service, "DATANODE");
-    createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
-    createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
-
-    makeCrossStackUpgradeTargetRepo(targetStack, targetRepo, hostName);
-
-    // create some configs
-    createConfigs(cluster);
-
-    // setup the cluster for the upgrade across stacks
-    cluster.setCurrentStackVersion(sourceStack);
-    cluster.setDesiredStackVersion(targetStack);
-
-    // now that the desired version is set, we can create some new configs in
-    // the new stack version
-    createConfigs(cluster);
-
-    // verify we have configs in both HDP stacks
-    cluster = clusters.getCluster(clusterName);
-    Collection<Config> configs = cluster.getAllConfigs();
-    assertEquals(8, configs.size());
-
-    createUpgrade(cluster, "", Direction.DOWNGRADE, targetRepositoryVersion,
-        sourceRepositoryVersion);
-
-    ExecutionCommand executionCommand = new ExecutionCommand();
-    executionCommand.setClusterName(clusterName);
-
-    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
+    // Install ZK with some components
+    Service zk = installService(cluster, "ZOOKEEPER", repositoryVersion2110);
+    addServiceComponent(cluster, zk, "ZOOKEEPER_SERVER");
+    addServiceComponent(cluster, zk, "ZOOKEEPER_CLIENT");
+    createNewServiceComponentHost(cluster, "ZOOKEEPER", "ZOOKEEPER_SERVER", hostName);
+    createNewServiceComponentHost(cluster, "ZOOKEEPER", "ZOOKEEPER_CLIENT", hostName);
 
-    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
+    List<HostVersionEntity> hostVersions = hostVersionDAO.findAll();
+    assertEquals(3, hostVersions.size());
 
-    HostVersionDAO dao = m_injector.getInstance(HostVersionDAO.class);
+    // repo 2110 - CURRENT (upgrading from)
+    // repo 2111 - CURRENT (all hosts reported in during upgrade)
+    // repo 2201 - NOT_REQUIRED (different stack)
+    for (HostVersionEntity hostVersion : hostVersions) {
+      RepositoryVersionEntity hostRepoVersion = hostVersion.getRepositoryVersion();
+      if (repositoryVersion2110.equals(hostRepoVersion)) {
+        hostVersion.setState(RepositoryVersionState.CURRENT);
+      } else if (repositoryVersion2111.equals(hostRepoVersion)) {
+        hostVersion.setState(RepositoryVersionState.CURRENT);
+      } else {
+        hostVersion.setState(RepositoryVersionState.NOT_REQUIRED);
+      }
 
-    List<HostVersionEntity> hosts = dao.findByClusterStackAndVersion(clusterName, targetStack, targetRepo);
-    assertFalse(hosts.isEmpty());
-    for (HostVersionEntity hve : hosts) {
-      assertTrue(hve.getState() == RepositoryVersionState.INSTALLED);
+      hostVersionDAO.merge(hostVersion);
     }
 
-    finalizeUpgradeAction.setExecutionCommand(executionCommand);
-    finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand);
-
-    CommandReport report = finalizeUpgradeAction.execute(null);
-    assertNotNull(report);
-    assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
-
-    StackId currentStackId = cluster.getCurrentStackVersion();
-    StackId desiredStackId = cluster.getDesiredStackVersion();
-
-    // verify current/desired stacks are back to normal
-    assertEquals(desiredStackId, currentStackId);
-    assertEquals(sourceStack, currentStackId);
-    assertEquals(sourceStack, desiredStackId);
-
-    // verify we have configs in only 1 stack
-    cluster = clusters.getCluster(clusterName);
-    configs = cluster.getAllConfigs();
-    assertEquals(4, configs.size());
+    // upgrade to 2111
+    createUpgrade(cluster, repositoryVersion2111);
 
-    hosts = dao.findByClusterStackAndVersion(clusterName, targetStack, targetRepo);
-    assertFalse(hosts.isEmpty());
-    for (HostVersionEntity hve : hosts) {
-      assertTrue(hve.getState() == RepositoryVersionState.INSTALLED);
+    // push all services to the correct repo version for finalize
+    Map<String, Service> services = cluster.getServices();
+    assertTrue(services.size() > 0);
+    for (Service service : services.values()) {
+      service.setDesiredRepositoryVersion(repositoryVersion2111);
     }
-  }
-
-  /**
-   * Tests that finalization can occur when the cluster state is
-   * {@link RepositoryVersionState#UPGRADING} if all of the hosts and components
-   * are reporting correct versions and states.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testFinalizeUpgradeWithClusterStateInconsistencies() throws Exception {
-    StackId sourceStack = HDP_21_STACK;
-    StackId targetStack = HDP_22_STACK;
-    String sourceRepo = HDP_2_1_1_0;
-    String targetRepo = HDP_2_2_0_1;
-    RepositoryVersionEntity sourceRepositoryVersion = repositoryVersion2110;
-    RepositoryVersionEntity targetRepositoryVersion = repositoryVersion2201;
-
-    String hostName = "h1";
-
-    makeCrossStackUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
-
-    Cluster cluster = clusters.getCluster(clusterName);
-
-    Service service = installService(cluster, "HDFS");
-    addServiceComponent(cluster, service, "NAMENODE");
-    addServiceComponent(cluster, service, "DATANODE");
-    createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
-    createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
-
-    makeCrossStackUpgradeTargetRepo(targetStack, targetRepo, hostName);
-    createUpgrade(cluster, sourceRepositoryVersion, targetRepositoryVersion);
-
-    // create some configs
-    createConfigs(cluster);
-
-    // setup the cluster for the upgrade across stacks
-    cluster.setCurrentStackVersion(sourceStack);
-    cluster.setDesiredStackVersion(targetStack);
-
-    // set the SCH versions to the new stack so that the finalize action is
-    // happy
-    cluster.getServiceComponentHosts("HDFS", "NAMENODE").get(0).setVersion(targetRepo);
-    cluster.getServiceComponentHosts("HDFS", "DATANODE").get(0).setVersion(targetRepo);
-
-    // inject an unhappy path where the cluster repo version is still UPGRADING
-    // even though all of the hosts are UPGRADED
-    ClusterVersionEntity upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion(
-            clusterName, HDP_22_STACK, targetRepo);
-
-    upgradingClusterVersion.setState(RepositoryVersionState.INSTALLING);
-    upgradingClusterVersion = clusterVersionDAO.merge(upgradingClusterVersion);
 
-    // verify the conditions for the test are met properly
-    upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion(clusterName, HDP_22_STACK, targetRepo);
-    List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion(clusterName, HDP_22_STACK, targetRepo);
-
-    assertEquals(RepositoryVersionState.INSTALLING, upgradingClusterVersion.getState());
-    assertTrue(hostVersions.size() > 0);
-    for (HostVersionEntity hostVersion : hostVersions) {
-      assertEquals(RepositoryVersionState.INSTALLED, hostVersion.getState());
+    // push all components to the correct version
+    List<HostComponentStateEntity> hostComponentStates = hostComponentStateDAO.findByHost(hostName);
+    for (HostComponentStateEntity hostComponentState : hostComponentStates) {
+      hostComponentState.setVersion(repositoryVersion2111.getVersion());
+      hostComponentStateDAO.merge(hostComponentState);
     }
 
-    // now finalize and ensure we can transition from UPGRADING to UPGRADED
-    // automatically before CURRENT
-    Map<String, String> commandParams = new HashMap<>();
-    commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
-    commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
-    commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId());
-
-    ExecutionCommand executionCommand = new ExecutionCommand();
-    executionCommand.setCommandParams(commandParams);
-    executionCommand.setClusterName(clusterName);
-
-    HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
-
-    hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
-
-    finalizeUpgradeAction.setExecutionCommand(executionCommand);
-    finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand);
-
-    CommandReport report = finalizeUpgradeAction.execute(null);
-    assertNotNull(report);
-    assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
-
-    StackId currentStackId = cluster.getCurrentStackVersion();
-    StackId desiredStackId = cluster.getDesiredStackVersion();
-
-    // verify current/desired stacks are updated to the new stack
-    assertEquals(desiredStackId, currentStackId);
-    assertEquals(targetStack, currentStackId);
-    assertEquals(targetStack, desiredStackId);
-  }
-
-  @Test
-  public void testUpgradeHistory() throws Exception {
-    StackId sourceStack = HDP_21_STACK;
-    StackId targetStack = HDP_21_STACK;
-    String sourceRepo = repositoryVersion2110.getVersion();
-    String targetRepo = repositoryVersion2111.getVersion();
-    String hostName = "h1";
-
-    createUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
-
-    // Verify the repo before calling Finalize
-    AmbariCustomCommandExecutionHelper helper = m_injector.getInstance(AmbariCustomCommandExecutionHelper.class);
-    Host host = clusters.getHost("h1");
-    Cluster cluster = clusters.getCluster(clusterName);
-
-    // install HDFS with some components
-    Service service = installService(cluster, "HDFS");
-    addServiceComponent(cluster, service, "NAMENODE");
-    addServiceComponent(cluster, service, "DATANODE");
-    ServiceComponentHost nnSCH = createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
-    ServiceComponentHost dnSCH = createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
-
-    createUpgradeClusterTargetRepo(targetStack, targetRepo, hostName);
-
-    // fake their upgrade
-    nnSCH.setStackVersion(nnSCH.getDesiredStackVersion());
-    nnSCH.setVersion(targetRepo);
-    dnSCH.setStackVersion(nnSCH.getDesiredStackVersion());
-    dnSCH.setVersion(targetRepo);
-
-    // create some entities for the finalize action to work with for patch
-    // history
-    RequestEntity requestEntity = new RequestEntity();
-    requestEntity.setClusterId(cluster.getClusterId());
-    requestEntity.setRequestId(1L);
-    requestEntity.setStartTime(System.currentTimeMillis());
-    requestEntity.setCreateTime(System.currentTimeMillis());
-    requestDAO.create(requestEntity);
-
-    UpgradeEntity upgradeEntity = new UpgradeEntity();
-    upgradeEntity.setId(1L);
-    upgradeEntity.setClusterId(cluster.getClusterId());
-    upgradeEntity.setRequestEntity(requestEntity);
-    upgradeEntity.setUpgradePackage("");
-    upgradeEntity.setFromRepositoryVersion(repositoryVersion2110);
-    upgradeEntity.setToRepositoryVersion(repositoryVersion2111);
-    upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING);
-    upgradeDAO.create(upgradeEntity);
-
-    cluster.setUpgradeEntity(upgradeEntity);
-
-    // verify that no history exist exists yet
-    List<ServiceComponentHistoryEntity> historyEntites = serviceComponentDesiredStateDAO.findHistory(
-            cluster.getClusterId(), nnSCH.getServiceName(),
-            nnSCH.getServiceComponentName());
-
-    assertEquals(0, historyEntites.size());
-
-    // Finalize the upgrade, passing in the request ID so that history is
-    // created
     Map<String, String> commandParams = new HashMap<>();
-    commandParams.put(FinalizeUpgradeAction.REQUEST_ID, String.valueOf(requestEntity.getRequestId()));
-    commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
-    commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
-
     ExecutionCommand executionCommand = new ExecutionCommand();
     executionCommand.setCommandParams(commandParams);
     executionCommand.setClusterName(clusterName);
@@ -960,22 +564,27 @@ public class UpgradeActionTest {
     finalizeUpgradeAction.setExecutionCommand(executionCommand);
     finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand);
 
+    // finalize
     CommandReport report = finalizeUpgradeAction.execute(null);
     assertNotNull(report);
     assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
 
-    // ensure that history now exists
-    historyEntites = serviceComponentDesiredStateDAO.findHistory(cluster.getClusterId(),
-            nnSCH.getServiceName(), nnSCH.getServiceComponentName());
-
-    assertEquals(1, historyEntites.size());
+    for (HostVersionEntity hostVersion : hostVersions) {
+      RepositoryVersionEntity hostRepoVersion = hostVersion.getRepositoryVersion();
+      if (repositoryVersion2110.equals(hostRepoVersion)) {
+        assertEquals(RepositoryVersionState.INSTALLED, hostVersion.getState());
+      } else if (repositoryVersion2111.equals(hostRepoVersion)) {
+        assertEquals(RepositoryVersionState.CURRENT, hostVersion.getState());
+      } else {
+        assertEquals(RepositoryVersionState.NOT_REQUIRED, hostVersion.getState());
+      }
+    }
   }
 
-
   private ServiceComponentHost createNewServiceComponentHost(Cluster cluster, String svc,
                                                              String svcComponent, String hostName) throws AmbariException {
     Assert.assertNotNull(cluster.getConfigGroups());
-    Service s = installService(cluster, svc);
+    Service s = installService(cluster, svc, sourceRepositoryVersion);
     ServiceComponent sc = addServiceComponent(cluster, s, svcComponent);
 
     ServiceComponentHost sch = serviceComponentHostFactory.createNew(sc, hostName);
@@ -983,18 +592,17 @@ public class UpgradeActionTest {
     sc.addServiceComponentHost(sch);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLED);
-    sch.setDesiredStackVersion(cluster.getDesiredStackVersion());
-    sch.setStackVersion(cluster.getCurrentStackVersion());
     return sch;
   }
 
-  private Service installService(Cluster cluster, String serviceName) throws AmbariException {
+  private Service installService(Cluster cluster, String serviceName,
+      RepositoryVersionEntity repositoryVersionEntity) throws AmbariException {
     Service service = null;
 
     try {
       service = cluster.getService(serviceName);
     } catch (ServiceNotFoundException e) {
-      service = serviceFactory.createNew(cluster, serviceName);
+      service = serviceFactory.createNew(cluster, serviceName, repositoryVersionEntity);
       cluster.addService(service);
     }
 
@@ -1002,7 +610,7 @@ public class UpgradeActionTest {
   }
 
   private ServiceComponent addServiceComponent(Cluster cluster, Service service,
-                                               String componentName) throws AmbariException {
+      String componentName) throws AmbariException {
     ServiceComponent serviceComponent = null;
     try {
       serviceComponent = service.getServiceComponent(componentName);
@@ -1043,19 +651,8 @@ public class UpgradeActionTest {
   /**
    * Creates an upgrade and associates it with the cluster.
    */
-  private UpgradeEntity createUpgrade(Cluster cluster,
-      RepositoryVersionEntity sourceRepositoryVersion,
-      RepositoryVersionEntity targetRepositoryVersion) throws Exception {
-    return createUpgrade(cluster, "", Direction.UPGRADE, sourceRepositoryVersion,
-        targetRepositoryVersion);
-  }
-
-  /**
-   * Creates an upgrade and associates it with the cluster.
-   */
-  private UpgradeEntity createUpgrade(Cluster cluster, String upgradePackName, Direction direction,
-      RepositoryVersionEntity sourceRepositoryVersion,
-      RepositoryVersionEntity targetRepositoryVersion) throws Exception {
+  private UpgradeEntity createUpgrade(Cluster cluster, RepositoryVersionEntity repositoryVersion)
+      throws Exception {
 
     // create some entities for the finalize action to work with for patch
     // history
@@ -1070,14 +667,27 @@ public class UpgradeActionTest {
     upgradeEntity.setId(1L);
     upgradeEntity.setClusterId(cluster.getClusterId());
     upgradeEntity.setRequestEntity(requestEntity);
-    upgradeEntity.setUpgradePackage(upgradePackName);
-    upgradeEntity.setFromRepositoryVersion(sourceRepositoryVersion);
-    upgradeEntity.setToRepositoryVersion(targetRepositoryVersion);
+    upgradeEntity.setUpgradePackage("");
+    upgradeEntity.setRepositoryVersion(repositoryVersion);
     upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING);
-    upgradeEntity.setDirection(direction);
+
+    Map<String, Service> services = cluster.getServices();
+    for (String serviceName : services.keySet()) {
+      Service service = services.get(serviceName);
+      Map<String, ServiceComponent> components = service.getServiceComponents();
+      for (String componentName : components.keySet()) {
+        UpgradeHistoryEntity history = new UpgradeHistoryEntity();
+        history.setUpgrade(upgradeEntity);
+        history.setServiceName(serviceName);
+        history.setComponentName(componentName);
+        history.setFromRepositoryVersion(service.getDesiredRepositoryVersion());
+        history.setTargetRepositoryVersion(repositoryVersion);
+        upgradeEntity.addHistory(history);
+      }
+    }
 
     upgradeDAO.create(upgradeEntity);
     cluster.setUpgradeEntity(upgradeEntity);
     return upgradeEntity;
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/56362fd6/ambari-server/src/test/java/org/apache/ambari/server/state/CheckHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/CheckHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/CheckHelperTest.java
index bfb1db4..215cbd6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/CheckHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/CheckHelperTest.java
@@ -22,8 +22,6 @@ import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
 
-import junit.framework.Assert;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ClusterNotFoundException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -32,7 +30,6 @@ import org.apache.ambari.server.checks.CheckDescription;
 import org.apache.ambari.server.checks.ServicesUpCheck;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.PrereqCheckRequest;
-import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
 import org.apache.ambari.server.orm.dao.HostVersionDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.UpgradeDAO;
@@ -51,6 +48,7 @@ import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.util.Providers;
 
+import junit.framework.Assert;
 
 /**
  * Tests the {@link CheckHelper} class
@@ -69,7 +67,7 @@ public class CheckHelperTest {
   public void testPreUpgradeCheck() throws Exception {
     final CheckHelper helper = new CheckHelper();
     Configuration configuration = EasyMock.createNiceMock(Configuration.class);
-    List<AbstractCheckDescriptor> updateChecksRegistry = new ArrayList<AbstractCheckDescriptor>();
+    List<AbstractCheckDescriptor> updateChecksRegistry = new ArrayList<>();
     AbstractCheckDescriptor descriptor = EasyMock.createNiceMock(AbstractCheckDescriptor.class);
 
     EasyMock.expect(configuration.isUpgradePrecheckBypass()).andReturn(false);
@@ -90,7 +88,7 @@ public class CheckHelperTest {
   public void testPreUpgradeCheckNotApplicable() throws Exception {
     final CheckHelper helper = new CheckHelper();
     Configuration configuration = EasyMock.createNiceMock(Configuration.class);
-    List<AbstractCheckDescriptor> updateChecksRegistry = new ArrayList<AbstractCheckDescriptor>();
+    List<AbstractCheckDescriptor> updateChecksRegistry = new ArrayList<>();
     AbstractCheckDescriptor descriptor = EasyMock.createNiceMock(AbstractCheckDescriptor.class);
     EasyMock.expect(configuration.isUpgradePrecheckBypass()).andReturn(false);
     EasyMock.expect(descriptor.isApplicable(EasyMock.<PrereqCheckRequest> anyObject())).andReturn(false);
@@ -106,7 +104,7 @@ public class CheckHelperTest {
   @Test
   public void testPreUpgradeCheckThrowsException() throws Exception {
     final CheckHelper helper = new CheckHelper();
-    List<AbstractCheckDescriptor> updateChecksRegistry = new ArrayList<AbstractCheckDescriptor>();
+    List<AbstractCheckDescriptor> updateChecksRegistry = new ArrayList<>();
     Configuration configuration = EasyMock.createNiceMock(Configuration.class);
     AbstractCheckDescriptor descriptor = EasyMock.createNiceMock(AbstractCheckDescriptor.class);
 
@@ -131,7 +129,7 @@ public class CheckHelperTest {
     // a PrerequisiteCheck object whose status is FAIL.
     final CheckHelperMock helper =  new CheckHelperMock();
     Configuration configuration = EasyMock.createNiceMock(Configuration.class);
-    List<AbstractCheckDescriptor> updateChecksRegistry = new ArrayList<AbstractCheckDescriptor>();
+    List<AbstractCheckDescriptor> updateChecksRegistry = new ArrayList<>();
 
     PrereqCheckRequest checkRequest = EasyMock.createNiceMock(PrereqCheckRequest.class);
     EasyMock.expect(configuration.isUpgradePrecheckBypass()).andReturn(true);
@@ -166,7 +164,6 @@ public class CheckHelperTest {
       @Override
       protected void configure() {
         bind(Clusters.class).toInstance(clusters);
-        bind(ClusterVersionDAO.class).toProvider(Providers.<ClusterVersionDAO>of(null));
         bind(HostVersionDAO.class).toProvider(Providers.<HostVersionDAO>of(null));
         bind(UpgradeDAO.class).toProvider(Providers.<UpgradeDAO>of(null));
         bind(RepositoryVersionDAO.class).toProvider(Providers.<RepositoryVersionDAO>of(null));
@@ -178,7 +175,7 @@ public class CheckHelperTest {
     });
 
     final CheckHelper helper = injector.getInstance(CheckHelper.class);
-    List<AbstractCheckDescriptor> updateChecksRegistry = new ArrayList<AbstractCheckDescriptor>();
+    List<AbstractCheckDescriptor> updateChecksRegistry = new ArrayList<>();
 
     EasyMock.expect(configuration.isUpgradePrecheckBypass()).andReturn(false);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/56362fd6/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index b1c10f5..21ce2c2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -38,6 +38,7 @@ import javax.persistence.EntityManager;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.H2DatabaseCleaner;
+import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
 import org.apache.ambari.server.actionmanager.RequestFactory;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper;
@@ -48,7 +49,9 @@ import org.apache.ambari.server.controller.spi.ClusterController;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.security.SecurityHelper;
 import org.apache.ambari.server.security.TestAuthenticationFactory;
 import org.apache.ambari.server.stack.StackManagerFactory;
@@ -103,8 +106,14 @@ public class ConfigHelperTest {
       metaInfo = injector.getInstance(AmbariMetaInfo.class);
       configFactory = injector.getInstance(ConfigFactory.class);
 
+      StackId stackId = new StackId("HDP-2.0.6");
+      OrmTestHelper helper = injector.getInstance(OrmTestHelper.class);
+      helper.createStack(stackId);
+
+      RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, "2.0.6");
+
       clusterName = "c1";
-      clusters.addCluster(clusterName, new StackId("HDP-2.0.6"));
+      clusters.addCluster(clusterName, stackId);
       cluster = clusters.getCluster(clusterName);
       Assert.assertNotNull(cluster);
       clusters.addHost("h1");
@@ -146,6 +155,8 @@ public class ConfigHelperTest {
       cr2.setType("flume-conf");
       cr2.setVersionTag("version1");
 
+      cluster.addService("FLUME", repositoryVersion);
+      cluster.addService("OOZIE", repositoryVersion);
 
       final ClusterRequest clusterRequest2 =
           new ClusterRequest(cluster.getClusterId(), clusterName,
@@ -753,6 +764,28 @@ public class ConfigHelperTest {
     }
 
     @Test
+    public void testFilterInvalidPropertyValues() {
+      Map<PropertyInfo, String> properties = new HashMap<>();
+      PropertyInfo prop1 = new PropertyInfo();
+      prop1.setName("1");
+      PropertyInfo prop2 = new PropertyInfo();
+      prop1.setName("2");
+      PropertyInfo prop3 = new PropertyInfo();
+      prop1.setName("3");
+      PropertyInfo prop4 = new PropertyInfo();
+      prop1.setName("4");
+
+      properties.put(prop1, "/tmp");
+      properties.put(prop2, "null");
+      properties.put(prop3, "");
+      properties.put(prop4, null);
+
+      Set<String> resultSet = configHelper.filterInvalidPropertyValues(properties, "testlist");
+      Assert.assertEquals(1, resultSet.size());
+      Assert.assertEquals(resultSet.iterator().next(), "/tmp");
+    }
+
+    @Test
     public void testMergeAttributesWithNullProperties() throws Exception {
       Map<String, Map<String, String>> persistedAttributes = new HashMap<>();
       Map<String, String> persistedFinalAttrs = new HashMap<>();
@@ -804,7 +837,8 @@ public class ConfigHelperTest {
       updates.put("new-property", "new-value");
       updates.put("fs.trash.interval", "updated-value");
       Collection<String> removals = Collections.singletonList("ipc.client.connect.max.retries");
-      configHelper.updateConfigType(cluster, managementController, "core-site", updates, removals, "admin", "Test note");
+      configHelper.updateConfigType(cluster, cluster.getCurrentStackVersion(), managementController,
+          "core-site", updates, removals, "admin", "Test note");
 
 
       Config updatedConfig = cluster.getDesiredConfigByType("core-site");
@@ -842,8 +876,8 @@ public class ConfigHelperTest {
       updates.put("oozie.authentication.type", "kerberos");
       updates.put("oozie.service.HadoopAccessorService.kerberos.enabled", "true");
 
-      configHelper.updateConfigType(cluster, managementController, "oozie-site", updates, null, "admin", "Test " +
-          "note");
+      configHelper.updateConfigType(cluster, cluster.getCurrentStackVersion(), managementController,
+          "oozie-site", updates, null, "admin", "Test " + "note");
 
       Config updatedConfig = cluster.getDesiredConfigByType("oozie-site");
       // Config tag updated
@@ -870,7 +904,8 @@ public class ConfigHelperTest {
       List<String> removals = new ArrayList<>();
       removals.add("timeline.service.operating.mode");
 
-      configHelper.updateConfigType(cluster, managementController, "ams-site", null, removals, "admin", "Test note");
+      configHelper.updateConfigType(cluster, cluster.getCurrentStackVersion(), managementController,
+          "ams-site", null, removals, "admin", "Test note");
 
       Config updatedConfig = cluster.getDesiredConfigByType("ams-site");
       // Config tag updated
@@ -892,15 +927,21 @@ public class ConfigHelperTest {
       hc.setDefaultVersionTag("version2");
       schReturn.put("flume-conf", hc);
 
+      ServiceComponent sc = createNiceMock(ServiceComponent.class);
+
       // set up mocks
       ServiceComponentHost sch = createNiceMock(ServiceComponentHost.class);
+      expect(sc.getDesiredStackId()).andReturn(cluster.getDesiredStackVersion()).anyTimes();
+
       // set up expectations
       expect(sch.getActualConfigs()).andReturn(schReturn).times(6);
       expect(sch.getHostName()).andReturn("h1").anyTimes();
       expect(sch.getClusterId()).andReturn(cluster.getClusterId()).anyTimes();
       expect(sch.getServiceName()).andReturn("FLUME").anyTimes();
       expect(sch.getServiceComponentName()).andReturn("FLUME_HANDLER").anyTimes();
-      replay(sch);
+      expect(sch.getServiceComponent()).andReturn(sc).anyTimes();
+
+      replay(sc, sch);
       // Cluster level config changes
       Assert.assertTrue(configHelper.isStaleConfigs(sch, null));
 
@@ -980,6 +1021,7 @@ public class ConfigHelperTest {
           bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
           bind(ClusterController.class).toInstance(clusterController);
           bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
+          bind(HostRoleCommandFactory.class).toInstance(createNiceMock(HostRoleCommandFactory.class));
           bind(HostRoleCommandDAO.class).toInstance(createNiceMock(HostRoleCommandDAO.class));
         }
       });
@@ -1000,6 +1042,7 @@ public class ConfigHelperTest {
       Cluster mockCluster = createStrictMock(Cluster.class);
       StackId mockStackVersion = createStrictMock(StackId.class);
       AmbariMetaInfo mockAmbariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+      Service mockService = createStrictMock(Service.class);
       ServiceInfo mockServiceInfo = createStrictMock(ServiceInfo.class);
 
       PropertyInfo mockPropertyInfo1 = createStrictMock(PropertyInfo.class);
@@ -1007,8 +1050,8 @@ public class ConfigHelperTest {
 
       List<PropertyInfo> serviceProperties = Arrays.asList(mockPropertyInfo1, mockPropertyInfo2);
 
-      expect(mockCluster.getCurrentStackVersion()).andReturn(mockStackVersion).once();
-
+      expect(mockCluster.getService("SERVICE")).andReturn(mockService).once();
+      expect(mockService.getDesiredStackId()).andReturn(mockStackVersion).once();
       expect(mockStackVersion.getStackName()).andReturn("HDP").once();
       expect(mockStackVersion.getStackVersion()).andReturn("2.2").once();
 
@@ -1016,7 +1059,7 @@ public class ConfigHelperTest {
 
       expect(mockServiceInfo.getProperties()).andReturn(serviceProperties).once();
 
-      replay(mockAmbariMetaInfo, mockCluster, mockStackVersion, mockServiceInfo, mockPropertyInfo1, mockPropertyInfo2);
+      replay(mockAmbariMetaInfo, mockCluster, mockService, mockStackVersion, mockServiceInfo, mockPropertyInfo1, mockPropertyInfo2);
 
       mockAmbariMetaInfo.init();
 
@@ -1097,4 +1140,4 @@ public class ConfigHelperTest {
       verify(mockAmbariMetaInfo, mockStackVersion, mockServiceInfo, mockPropertyInfo1, mockPropertyInfo2);
     }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/56362fd6/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
index 780398c..e503beb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -23,15 +23,12 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.fail;
 
 import java.sql.SQLException;
-import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.H2DatabaseCleaner;
-import org.apache.ambari.server.actionmanager.HostRoleStatus;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.ServiceComponentResponse;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
@@ -40,21 +37,15 @@ import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
-import org.apache.ambari.server.orm.dao.RequestDAO;
 import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.UpgradeDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.orm.entities.RequestEntity;
 import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentHistoryEntity;
 import org.apache.ambari.server.orm.entities.ServiceComponentVersionEntity;
-import org.apache.ambari.server.orm.entities.StageEntity;
-import org.apache.ambari.server.orm.entities.UpgradeEntity;
-import org.apache.ambari.server.state.stack.upgrade.Direction;
-import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import org.apache.ambari.server.orm.entities.StackEntity;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -75,7 +66,6 @@ public class ServiceComponentTest {
   private ServiceFactory serviceFactory;
   private ServiceComponentFactory serviceComponentFactory;
   private ServiceComponentHostFactory serviceComponentHostFactory;
-  private AmbariMetaInfo metaInfo;
   private OrmTestHelper helper;
   private HostDAO hostDAO;
 
@@ -91,22 +81,24 @@ public class ServiceComponentTest {
         ServiceComponentHostFactory.class);
     helper = injector.getInstance(OrmTestHelper.class);
     hostDAO = injector.getInstance(HostDAO.class);
-    metaInfo = injector.getInstance(AmbariMetaInfo.class);
 
     clusterName = "foo";
     serviceName = "HDFS";
 
     StackId stackId = new StackId("HDP-0.1");
+
+    helper.createStack(stackId);
+
     clusters.addCluster(clusterName, stackId);
     cluster = clusters.getCluster(clusterName);
 
     cluster.setDesiredStackVersion(stackId);
     Assert.assertNotNull(cluster);
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
-    cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
-        RepositoryVersionState.INSTALLING);
 
-    Service s = serviceFactory.createNew(cluster, serviceName);
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
+
+    Service s = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
     cluster.addService(s);
     service = cluster.getService(serviceName);
     Assert.assertNotNull(service);
@@ -135,7 +127,7 @@ public class ServiceComponentTest {
         sc.getClusterName());
     Assert.assertEquals(State.INIT, sc.getDesiredState());
     Assert.assertFalse(
-        sc.getDesiredStackVersion().getStackId().isEmpty());
+        sc.getDesiredStackId().getStackId().isEmpty());
   }
 
 
@@ -153,8 +145,12 @@ public class ServiceComponentTest {
     sc.setDesiredState(State.INSTALLED);
     Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
 
-    sc.setDesiredStackVersion(new StackId("HDP-1.2.0"));
-    Assert.assertEquals("HDP-1.2.0", sc.getDesiredStackVersion().getStackId());
+    StackId newStackId = new StackId("HDP-1.2.0");
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(newStackId,
+        newStackId.getStackVersion());
+
+    sc.setDesiredRepositoryVersion(repositoryVersion);
+    Assert.assertEquals(newStackId.toString(), sc.getDesiredStackId().getStackId());
 
     ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO =
         injector.getInstance(ServiceComponentDesiredStateDAO.class);
@@ -167,7 +163,7 @@ public class ServiceComponentTest {
     Assert.assertNotNull(sc1);
     Assert.assertEquals(State.INSTALLED, sc1.getDesiredState());
     Assert.assertEquals("HDP-1.2.0",
-        sc1.getDesiredStackVersion().getStackId());
+        sc1.getDesiredStackId().getStackId());
 
   }
 
@@ -198,8 +194,7 @@ public class ServiceComponentTest {
   @Test
   public void testAddAndGetServiceComponentHosts() throws AmbariException {
     String componentName = "NAMENODE";
-    ServiceComponent component = serviceComponentFactory.createNew(service,
-        componentName);
+    ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
     service.addServiceComponent(component);
 
     ServiceComponent sc = service.getServiceComponent(componentName);
@@ -223,6 +218,8 @@ public class ServiceComponentTest {
 
     ServiceComponentHost sch1 = sc.addServiceComponentHost("h1");
     ServiceComponentHost sch2 = sc.addServiceComponentHost("h2");
+    assertNotNull(sch1);
+    assertNotNull(sch2);
 
     try {
       sc.addServiceComponentHost("h2");
@@ -240,9 +237,7 @@ public class ServiceComponentTest {
     sc.addServiceComponentHost("h3");
     Assert.assertNotNull(sc.getServiceComponentHost("h3"));
 
-    sch1.setDesiredStackVersion(new StackId("HDP-1.2.0"));
     sch1.setState(State.STARTING);
-    sch1.setStackVersion(new StackId("HDP-1.2.0"));
     sch1.setDesiredState(State.STARTED);
 
     HostComponentDesiredStateDAO desiredStateDAO = injector.getInstance(
@@ -267,17 +262,14 @@ public class ServiceComponentTest {
     Assert.assertNotNull(sch);
     Assert.assertEquals(State.STARTING, sch.getState());
     Assert.assertEquals(State.STARTED, sch.getDesiredState());
-    Assert.assertEquals("HDP-1.2.0",
-        sch.getStackVersion().getStackId());
-    Assert.assertEquals("HDP-1.2.0",
-        sch.getDesiredStackVersion().getStackId());
+    Assert.assertEquals(service.getDesiredRepositoryVersion().getVersion(),
+        sch.getServiceComponent().getDesiredVersion());
   }
 
   @Test
   public void testConvertToResponse() throws AmbariException {
     String componentName = "NAMENODE";
-    ServiceComponent component = serviceComponentFactory.createNew(service,
-        componentName);
+    ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
     service.addServiceComponent(component);
 
     addHostToCluster("h1", service.getCluster().getClusterName());
@@ -294,17 +286,15 @@ public class ServiceComponentTest {
     ServiceComponent sc = service.getServiceComponent(componentName);
     Assert.assertNotNull(sc);
     sc.setDesiredState(State.INSTALLED);
-    sc.setDesiredStackVersion(new StackId("HDP-1.2.0"));
 
     ServiceComponentResponse r = sc.convertToResponse();
     Assert.assertEquals(sc.getClusterName(), r.getClusterName());
     Assert.assertEquals(sc.getClusterId(), r.getClusterId().longValue());
     Assert.assertEquals(sc.getName(), r.getComponentName());
     Assert.assertEquals(sc.getServiceName(), r.getServiceName());
-    Assert.assertEquals(sc.getDesiredStackVersion().getStackId(),
-        r.getDesiredStackVersion());
-    Assert.assertEquals(sc.getDesiredState().toString(),
-        r.getDesiredState());
+    Assert.assertEquals(sc.getDesiredStackId().getStackId(), r.getDesiredStackId());
+    Assert.assertEquals(sc.getDesiredState().toString(), r.getDesiredState());
+
     int totalCount = r.getServiceComponentStateCount().get("totalCount");
     int startedCount = r.getServiceComponentStateCount().get("startedCount");
     int installedCount = r.getServiceComponentStateCount().get("installedCount");
@@ -346,55 +336,6 @@ public class ServiceComponentTest {
     }
   }
 
-  @Test
-  public void testHistoryCreation() throws AmbariException {
-    ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = injector.getInstance(
-        ServiceComponentDesiredStateDAO.class);
-
-    String componentName = "NAMENODE";
-    ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
-    service.addServiceComponent(component);
-
-    ServiceComponent sc = service.getServiceComponent(componentName);
-    Assert.assertNotNull(sc);
-
-    sc.setDesiredState(State.INSTALLED);
-    Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
-
-    StackId stackId = new StackId("HDP-2.2.0");
-    sc.setDesiredStackVersion(stackId);
-    Assert.assertEquals(stackId, sc.getDesiredStackVersion());
-
-    Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackVersion().getStackId());
-
-    ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
-        cluster.getClusterId(), serviceName, componentName);
-
-    Assert.assertNotNull(serviceComponentDesiredStateEntity);
-
-    UpgradeEntity upgradeEntity = createUpgradeEntity(stackId, "2.2.0.0", "2.2.0.1");
-    ServiceComponentHistoryEntity history = new ServiceComponentHistoryEntity();
-    history.setFromStack(serviceComponentDesiredStateEntity.getDesiredStack());
-    history.setToStack(serviceComponentDesiredStateEntity.getDesiredStack());
-    history.setUpgrade(upgradeEntity);
-
-    serviceComponentDesiredStateEntity.addHistory(history);
-
-    serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.merge(
-        serviceComponentDesiredStateEntity);
-
-    serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
-        cluster.getClusterId(), serviceName, componentName);
-
-    assertEquals(1, serviceComponentDesiredStateEntity.getHistory().size());
-    ServiceComponentHistoryEntity persistedHistory = serviceComponentDesiredStateEntity.getHistory().iterator().next();
-
-    assertEquals(history.getFromStack(), persistedHistory.getFromStack());
-    assertEquals(history.getToStack(), persistedHistory.getFromStack());
-    assertEquals(history.getUpgrade(), persistedHistory.getUpgrade());
-    assertEquals(history.getServiceComponentDesiredState(), persistedHistory.getServiceComponentDesiredState());
-  }
-
 
   @Test
   public void testServiceComponentRemove() throws AmbariException {
@@ -457,14 +398,8 @@ public class ServiceComponentTest {
     Assert.assertNull(serviceComponentDesiredStateEntity);
  }
 
-  /**
-   * Tests the CASCADE nature of removing a service component also removes the
-   * history.
-   *
-   * @throws AmbariException
-   */
   @Test
-  public void testHistoryRemoval() throws AmbariException {
+  public void testVersionCreation() throws Exception {
     ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = injector.getInstance(
         ServiceComponentDesiredStateDAO.class);
 
@@ -478,89 +413,28 @@ public class ServiceComponentTest {
     sc.setDesiredState(State.INSTALLED);
     Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
 
-    StackId stackId = new StackId("HDP-2.2.0");
-    sc.setDesiredStackVersion(stackId);
-    Assert.assertEquals(stackId, sc.getDesiredStackVersion());
-
-    Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackVersion().getStackId());
-
     ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
         cluster.getClusterId(), serviceName, componentName);
 
+    StackDAO stackDAO = injector.getInstance(StackDAO.class);
+    StackEntity stackEntity = stackDAO.find("HDP", "2.2.0");
 
-    Assert.assertNotNull(serviceComponentDesiredStateEntity);
-
-    UpgradeEntity upgradeEntity = createUpgradeEntity(stackId, "2.2.0.0", "2.2.0.1");
-    ServiceComponentHistoryEntity history = new ServiceComponentHistoryEntity();
-    history.setFromStack(serviceComponentDesiredStateEntity.getDesiredStack());
-    history.setToStack(serviceComponentDesiredStateEntity.getDesiredStack());
-    history.setUpgrade(upgradeEntity);
-    history.setServiceComponentDesiredState(serviceComponentDesiredStateEntity);
-
-    serviceComponentDesiredStateEntity.addHistory(history);
-
-    serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.merge(
-        serviceComponentDesiredStateEntity);
-
-    serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
-        cluster.getClusterId(), serviceName, componentName);
-
-    assertEquals(1, serviceComponentDesiredStateEntity.getHistory().size());
-
-    // verify that we can retrieve the history directly
-    List<ServiceComponentHistoryEntity> componentHistoryList = serviceComponentDesiredStateDAO.findHistory(
-        sc.getClusterId(), sc.getServiceName(), sc.getName());
-
-    assertEquals(1, componentHistoryList.size());
-
-    // delete the SC
-    sc.delete();
-
-    // verify history is gone, too
-    serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
-        cluster.getClusterId(), serviceName, componentName);
-
-    Assert.assertNull(serviceComponentDesiredStateEntity);
-
-    // verify that we cannot retrieve the history directly
-    componentHistoryList = serviceComponentDesiredStateDAO.findHistory(sc.getClusterId(),
-        sc.getServiceName(), sc.getName());
-
-    assertEquals(0, componentHistoryList.size());
-  }
+    RepositoryVersionEntity rve = new RepositoryVersionEntity(stackEntity, "HDP-2.2.0",
+        "2.2.0.1-1111", "[]");
 
-  @Test
-  public void testVersionCreation() throws Exception {
-    ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = injector.getInstance(
-        ServiceComponentDesiredStateDAO.class);
-
-    String componentName = "NAMENODE";
-    ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
-    service.addServiceComponent(component);
-
-    ServiceComponent sc = service.getServiceComponent(componentName);
-    Assert.assertNotNull(sc);
+    RepositoryVersionDAO repositoryDAO = injector.getInstance(RepositoryVersionDAO.class);
+    repositoryDAO.create(rve);
 
-    sc.setDesiredState(State.INSTALLED);
-    Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
+    sc.setDesiredRepositoryVersion(rve);
 
-    sc.setDesiredStackVersion(new StackId("HDP-2.2.0"));
-    StackId stackId = sc.getDesiredStackVersion();
-    Assert.assertEquals(new StackId("HDP", "2.2.0"), stackId);
+    Assert.assertEquals(rve, sc.getDesiredRepositoryVersion());
 
-    Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackVersion().getStackId());
+    Assert.assertEquals(new StackId("HDP", "2.2.0"), sc.getDesiredStackId());
 
-    ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
-        cluster.getClusterId(), serviceName, componentName);
+    Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackId().getStackId());
 
     Assert.assertNotNull(serviceComponentDesiredStateEntity);
 
-    RepositoryVersionEntity rve = new RepositoryVersionEntity(
-        serviceComponentDesiredStateEntity.getDesiredStack(), "HDP-2.2.0", "2.2.0.1-1111", "[]");
-
-    RepositoryVersionDAO repositoryDAO = injector.getInstance(RepositoryVersionDAO.class);
-    repositoryDAO.create(rve);
-
     ServiceComponentVersionEntity version = new ServiceComponentVersionEntity();
     version.setState(RepositoryVersionState.CURRENT);
     version.setRepositoryVersion(rve);
@@ -594,23 +468,27 @@ public class ServiceComponentTest {
     sc.setDesiredState(State.INSTALLED);
     Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
 
-    sc.setDesiredStackVersion(new StackId("HDP-2.2.0"));
-    StackId stackId = sc.getDesiredStackVersion();
-    Assert.assertEquals(new StackId("HDP", "2.2.0"), stackId);
-
-    Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackVersion().getStackId());
-
     ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
         cluster.getClusterId(), serviceName, componentName);
 
-    Assert.assertNotNull(serviceComponentDesiredStateEntity);
+    StackDAO stackDAO = injector.getInstance(StackDAO.class);
+    StackEntity stackEntity = stackDAO.find("HDP", "2.2.0");
 
-    RepositoryVersionEntity rve = new RepositoryVersionEntity(
-        serviceComponentDesiredStateEntity.getDesiredStack(), "HDP-2.2.0", "2.2.0.1-1111", "[]");
+    RepositoryVersionEntity rve = new RepositoryVersionEntity(stackEntity, "HDP-2.2.0",
+        "2.2.0.1-1111", "[]");
 
     RepositoryVersionDAO repositoryDAO = injector.getInstance(RepositoryVersionDAO.class);
     repositoryDAO.create(rve);
 
+    sc.setDesiredRepositoryVersion(rve);
+
+    StackId stackId = sc.getDesiredStackId();
+    Assert.assertEquals(new StackId("HDP", "2.2.0"), stackId);
+
+    Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackId().getStackId());
+
+    Assert.assertNotNull(serviceComponentDesiredStateEntity);
+
     ServiceComponentVersionEntity version = new ServiceComponentVersionEntity();
     version.setState(RepositoryVersionState.CURRENT);
     version.setRepositoryVersion(rve);
@@ -640,39 +518,91 @@ public class ServiceComponentTest {
     assertEquals(0, list.size());
   }
 
-  /**
-   * Creates an upgrade entity, asserting it was created correctly.
-   *
-   * @param fromVersion
-   * @param toVersion
-   * @return
-   */
-  private UpgradeEntity createUpgradeEntity(StackId stackId, String fromVersion, String toVersion) {
-    RepositoryVersionEntity toRepositoryVersion = helper.getOrCreateRepositoryVersion(stackId, fromVersion);
-    RepositoryVersionEntity fromRepositoryVersion = helper.getOrCreateRepositoryVersion(stackId, fromVersion);
-
-    RequestDAO requestDAO = injector.getInstance(RequestDAO.class);
-    RequestEntity requestEntity = new RequestEntity();
-    requestEntity.setRequestId(99L);
-    requestEntity.setClusterId(cluster.getClusterId());
-    requestEntity.setStatus(HostRoleStatus.PENDING);
-    requestEntity.setStages(new ArrayList<StageEntity>());
-    requestDAO.create(requestEntity);
-
-    UpgradeDAO upgradeDao = injector.getInstance(UpgradeDAO.class);
-    UpgradeEntity upgradeEntity = new UpgradeEntity();
-    upgradeEntity.setClusterId(cluster.getClusterId());
-    upgradeEntity.setDirection(Direction.UPGRADE);
-    upgradeEntity.setFromRepositoryVersion(fromRepositoryVersion);
-    upgradeEntity.setToRepositoryVersion(toRepositoryVersion);
-    upgradeEntity.setUpgradePackage("upgrade_test");
-    upgradeEntity.setUpgradeType(UpgradeType.ROLLING);
-    upgradeEntity.setRequestEntity(requestEntity);
-
-    upgradeDao.create(upgradeEntity);
-    List<UpgradeEntity> upgrades = upgradeDao.findUpgrades(cluster.getClusterId());
-    assertEquals(1, upgrades.size());
-    return upgradeEntity;
-  }
 
-}
+  @Test
+  public void testUpdateStates() throws Exception {
+    ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = injector.getInstance(
+        ServiceComponentDesiredStateDAO.class);
+
+    String componentName = "NAMENODE";
+
+    ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
+
+    StackId newStackId = new StackId("HDP-2.2.0");
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(newStackId,
+        newStackId.getStackVersion());
+
+    component.setDesiredRepositoryVersion(repositoryVersion);
+
+    service.addServiceComponent(component);
+
+    ServiceComponent sc = service.getServiceComponent(componentName);
+    Assert.assertNotNull(sc);
+
+    ServiceComponentDesiredStateEntity entity = serviceComponentDesiredStateDAO.findByName(
+        cluster.getClusterId(), serviceName, componentName);
+
+    RepositoryVersionEntity repoVersion2201 = helper.getOrCreateRepositoryVersion(
+        component.getDesiredStackId(), "2.2.0.1");
+
+    RepositoryVersionEntity repoVersion2202 = helper.getOrCreateRepositoryVersion(
+        component.getDesiredStackId(), "2.2.0.2");
+
+    addHostToCluster("h1", clusterName);
+    addHostToCluster("h2", clusterName);
+
+    sc.setDesiredState(State.INSTALLED);
+    Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
+
+    ServiceComponentHost sch1 = sc.addServiceComponentHost("h1");
+    ServiceComponentHost sch2 = sc.addServiceComponentHost("h2");
+
+    // !!! case 1: component desired is UNKNOWN, mix of h-c versions
+    sc.setDesiredRepositoryVersion(repositoryVersion);
+    sch1.setVersion("2.2.0.1");
+    sch2.setVersion("2.2.0.2");
+    sc.updateRepositoryState("2.2.0.2");
+    entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName,
+        componentName);
+    assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState());
+
+    // !!! case 2: component desired is UNKNOWN, all h-c same version
+    sc.setDesiredRepositoryVersion(repositoryVersion);
+    sch1.setVersion("2.2.0.1");
+    sch2.setVersion("2.2.0.1");
+    sc.updateRepositoryState("2.2.0.1");
+    entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName,
+        componentName);
+    assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState());
+
+    // !!! case 3: component desired is known, any component reports different
+    // version
+    sc.setDesiredRepositoryVersion(repoVersion2201);
+    sch1.setVersion("2.2.0.1");
+    sch2.setVersion("2.2.0.2");
+    sc.updateRepositoryState("2.2.0.2");
+    entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName,
+        componentName);
+    assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState());
+
+    // !!! case 4: component desired is known, component reports same as
+    // desired, mix of h-c versions
+    sc.setDesiredRepositoryVersion(repoVersion2201);
+    sch1.setVersion("2.2.0.1");
+    sch2.setVersion("2.2.0.2");
+    sc.updateRepositoryState("2.2.0.1");
+    entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName,
+        componentName);
+    assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState());
+
+    // !!! case 5: component desired is known, component reports same as
+    // desired, all h-c the same
+    sc.setDesiredRepositoryVersion(repoVersion2201);
+    sch1.setVersion("2.2.0.1");
+    sch2.setVersion("2.2.0.1");
+    sc.updateRepositoryState("2.2.0.1");
+    entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName,
+        componentName);
+    assertEquals(RepositoryVersionState.CURRENT, entity.getRepositoryState());
+  }
+}
\ No newline at end of file