You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/05/03 15:13:48 UTC

[1/9] ambari git commit: AMBARI-20894 - Setting Desired Stack Ids to Correct Values During Service and Patch Upgrades (jonathanhurley)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-12556 b68662490 -> dcbd826c9


http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index 9579c22..ad63ca5 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -113,8 +113,8 @@ with patch.object(platform, "linux_distribution", return_value = MagicMock(retur
                 from ambari_server.serverSetup import check_selinux, check_ambari_user, proceedJDBCProperties, SE_STATUS_DISABLED, SE_MODE_ENFORCING, configure_os_settings, \
                   download_and_install_jdk, prompt_db_properties, setup, \
                   AmbariUserChecks, AmbariUserChecksLinux, AmbariUserChecksWindows, JDKSetup, reset, setup_jce_policy, expand_jce_zip_file
-                from ambari_server.serverUpgrade import upgrade, upgrade_local_repo, change_objects_owner, upgrade_stack, \
-                  run_stack_upgrade, run_metainfo_upgrade, run_schema_upgrade, move_user_custom_actions
+                from ambari_server.serverUpgrade import upgrade, change_objects_owner, \
+                  run_schema_upgrade, move_user_custom_actions
                 from ambari_server.setupHttps import is_valid_https_port, setup_https, import_cert_and_key_action, get_fqdn, \
                   generate_random_string, get_cert_info, COMMON_NAME_ATTR, is_valid_cert_exp, NOT_AFTER_ATTR, NOT_BEFORE_ATTR, \
                   SSL_DATE_FORMAT, import_cert_and_key, is_valid_cert_host, setup_truststore, \
@@ -4913,58 +4913,6 @@ class TestAmbariServer(TestCase):
     self.assertEqual(shutil_move_mock.call_count, 2)
     pass
 
-
-  @patch("os.path.isdir", new = MagicMock(return_value=True))
-  @patch("os.access", new = MagicMock(return_value=True))
-  @patch("ambari_server.serverConfiguration.get_conf_dir")
-  @patch("ambari_server.serverUpgrade.run_os_command")
-  @patch("ambari_server.serverUpgrade.get_java_exe_path")
-  def test_run_stack_upgrade(self, java_exe_path_mock, run_os_command_mock,
-                             get_conf_dir_mock):
-    java_exe_path_mock.return_value = "/usr/lib/java/bin/java"
-    run_os_command_mock.return_value = (0, None, None)
-    get_conf_dir_mock.return_value = '/etc/conf'
-    stackIdMap = {'HDP' : '2.0', 'repo_url' : 'http://test.com'}
-
-    run_stack_upgrade(None, 'HDP', '2.0', 'http://test.com', None)
-
-    self.assertTrue(java_exe_path_mock.called)
-    self.assertTrue(get_conf_dir_mock.called)
-    self.assertTrue(run_os_command_mock.called)
-    run_os_command_mock.assert_called_with('/usr/lib/java/bin/java -cp \'/etc/conf:/usr/lib/ambari-server/*\' '
-                                          'org.apache.ambari.server.upgrade.StackUpgradeHelper '
-                                          'updateStackId ' + "'" + json.dumps(stackIdMap) + "'" +
-                                          ' > ' + os.sep + 'var' + os.sep + 'log' + os.sep + 'ambari-server' + os.sep +
-                                          'ambari-server.out 2>&1')
-    pass
-
-  @patch("os.path.isdir", new = MagicMock(return_value=True))
-  @patch("os.access", new = MagicMock(return_value=True))
-  @patch.object(ServerClassPath, "get_full_ambari_classpath_escaped_for_shell",
-                  new = MagicMock(return_value = '/etc/conf' + os.pathsep + 'test' + os.pathsep + 'path12'))
-  @patch("ambari_server.serverConfiguration.get_conf_dir")
-  @patch("ambari_server.serverUpgrade.run_os_command")
-  @patch("ambari_server.serverUpgrade.get_java_exe_path")
-  def test_run_stack_upgrade_with_url_os(self, java_exe_path_mock, run_os_command_mock,
-                             get_conf_dir_mock):
-    java_exe_path_mock.return_value = "/usr/lib/java/bin/java"
-    run_os_command_mock.return_value = (0, None, None)
-    get_conf_dir_mock.return_value = '/etc/conf'
-    stackIdMap = {'HDP' : '2.0', 'repo_url': 'http://test.com', 'repo_url_os': 'centos5,centos6'}
-
-    run_stack_upgrade(None, 'HDP', '2.0', 'http://test.com', 'centos5,centos6')
-
-    self.assertTrue(java_exe_path_mock.called)
-    self.assertTrue(get_conf_dir_mock.called)
-    self.assertTrue(run_os_command_mock.called)
-    run_os_command_mock.assert_called_with('/usr/lib/java/bin/java -cp /etc/conf' + os.pathsep + 'test:path12 '
-                                           'org.apache.ambari.server.upgrade.StackUpgradeHelper '
-                                           'updateStackId ' + "'" + json.dumps(stackIdMap) + "'" +
-                                           ' > ' + os.sep + 'var' + os.sep + 'log' + os.sep + 'ambari-server' + os.sep +
-                                           'ambari-server.out 2>&1')
-    pass
-
-
   @patch("os.path.isdir", new = MagicMock(return_value=True))
   @patch("os.access", new = MagicMock(return_value=True))
   @patch.object(ServerClassPath, "get_full_ambari_classpath_escaped_for_shell",
@@ -5011,31 +4959,6 @@ class TestAmbariServer(TestCase):
     self.assertTrue(run_os_command_mock.called)
     run_os_command_mock.assert_called_with(command, env=environ)
 
-  @patch("os.path.isdir", new = MagicMock(return_value=True))
-  @patch("os.access", new = MagicMock(return_value=True))
-  @patch("ambari_server.serverConfiguration.get_conf_dir")
-  @patch.object(ServerClassPath, "get_full_ambari_classpath_escaped_for_shell", new = MagicMock(return_value = 'test' + os.pathsep + 'path12'))
-  @patch("ambari_server.serverUpgrade.run_os_command")
-  @patch("ambari_server.serverUpgrade.get_java_exe_path")
-  def test_run_metainfo_upgrade(self, java_exe_path_mock, run_os_command_mock,
-                                get_conf_dir_mock):
-    java_exe_path_mock.return_value = "/usr/lib/java/bin/java"
-    run_os_command_mock.return_value = (0, None, None)
-    get_conf_dir_mock.return_value = '/etc/conf'
-
-    json_map = {'a': 'http://newurl'}
-    run_metainfo_upgrade(None, json_map)
-
-    self.assertTrue(java_exe_path_mock.called)
-    self.assertTrue(run_os_command_mock.called)
-    run_os_command_mock.assert_called_with('/usr/lib/java/bin/java '
-                                           '-cp test' + os.pathsep + 'path12 '
-                                           'org.apache.ambari.server.upgrade.StackUpgradeHelper updateMetaInfo ' +
-                                           "'" + json.dumps(json_map) + "'" +
-                                           ' > ' + os.sep + 'var' + os.sep + 'log' + os.sep + 'ambari-server' +
-                                           os.sep + 'ambari-server.out 2>&1')
-    pass
-
 
   @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@@ -5224,14 +5147,13 @@ class TestAmbariServer(TestCase):
   @patch("ambari_server.dbConfiguration.get_ambari_properties")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch("ambari_server.serverUpgrade.get_ambari_properties")
-  @patch("ambari_server.serverUpgrade.upgrade_local_repo")
   @patch("ambari_server.serverUpgrade.move_user_custom_actions")
   @patch("ambari_server.serverUpgrade.update_krb_jaas_login_properties")
   @patch("ambari_server.serverUpgrade.update_ambari_env")
   @patch("ambari_server.setupMpacks.get_replay_log_file")
   @patch("ambari_server.serverUpgrade.logger")
   @patch.object(PGConfig, "_change_db_files_owner", return_value=0)
-  def test_upgrade_from_161(self, change_db_files_owner_mock, logger_mock, get_replay_log_file_mock, update_ambari_env_mock, update_krb_jaas_login_properties_mock, move_user_custom_actions_mock, upgrade_local_repo_mock, get_ambari_properties_mock,
+  def test_upgrade_from_161(self, change_db_files_owner_mock, logger_mock, get_replay_log_file_mock, update_ambari_env_mock, update_krb_jaas_login_properties_mock, move_user_custom_actions_mock, get_ambari_properties_mock,
                             get_ambari_properties_2_mock, get_ambari_properties_3_mock, get_ambari_version_mock, write_property_mock,
                             is_root_mock, update_ambari_properties_mock, find_properties_file_mock, run_os_command_mock,
                             run_schema_upgrade_mock, read_ambari_user_mock, print_warning_msg_mock,
@@ -5446,11 +5368,10 @@ class TestAmbariServer(TestCase):
   @patch("ambari_server.dbConfiguration.get_ambari_properties")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch("ambari_server.serverUpgrade.get_ambari_properties")
-  @patch("ambari_server.serverUpgrade.upgrade_local_repo")
   @patch("ambari_server.serverUpgrade.move_user_custom_actions")
   @patch("ambari_server.serverUpgrade.update_krb_jaas_login_properties")
   @patch("ambari_server.serverUpgrade.update_ambari_env")
-  def test_upgrade(self, update_ambari_env_mock, update_krb_jaas_login_properties_mock, move_user_custom_actions, upgrade_local_repo_mock,
+  def test_upgrade(self, update_ambari_env_mock, update_krb_jaas_login_properties_mock, move_user_custom_actions,
                    get_ambari_properties_mock, get_ambari_properties_2_mock, get_ambari_properties_3_mock,
                    is_root_mock, get_ambari_version_mock, get_ambari_version_2_mock,
                    parse_properties_file_mock,
@@ -5516,7 +5437,6 @@ class TestAmbariServer(TestCase):
     self.assertTrue(print_warning_msg_mock.called)
     warning_args = print_warning_msg_mock.call_args[0][0]
     self.assertTrue("custom ambari user" in warning_args)
-    self.assertTrue(upgrade_local_repo_mock.called)
     self.assertTrue(move_user_custom_actions.called)
 
     args = reset_mocks()
@@ -5631,10 +5551,9 @@ class TestAmbariServer(TestCase):
   @patch("ambari_server.dbConfiguration.get_ambari_properties")
   @patch("ambari_server.serverConfiguration.get_ambari_properties")
   @patch("ambari_server.serverUpgrade.get_ambari_properties")
-  @patch("ambari_server.serverUpgrade.upgrade_local_repo")
   @patch("ambari_server.serverUpgrade.move_user_custom_actions")
   @patch("ambari_server.serverUpgrade.update_krb_jaas_login_properties")
-  def test_upgrade(self, update_krb_jaas_login_properties_mock, move_user_custom_actions, upgrade_local_repo_mock,
+  def test_upgrade(self, update_krb_jaas_login_properties_mock, move_user_custom_actions,
                    get_ambari_properties_mock, get_ambari_properties_2_mock, get_ambari_properties_3_mock,
                    is_root_mock, get_ambari_version_mock, get_ambari_version_2_mock,
                    parse_properties_file_mock,
@@ -5701,7 +5620,6 @@ class TestAmbariServer(TestCase):
     self.assertTrue(print_warning_msg_mock.called)
     warning_args = print_warning_msg_mock.call_args[0][0]
     self.assertTrue("custom ambari user" in warning_args)
-    self.assertTrue(upgrade_local_repo_mock.called)
     self.assertTrue(move_user_custom_actions.called)
 
     args = reset_mocks()
@@ -8267,80 +8185,6 @@ class TestAmbariServer(TestCase):
                                                        validatorFunction = is_valid_filepath, answer='/kerberos/admin.keytab')
     pass
 
-  @patch("os.listdir")
-  @patch("os.path.exists")
-  @patch("ambari_server.serverUpgrade.load_stack_values")
-  @patch("ambari_server.serverUpgrade.get_ambari_properties")
-  @patch("ambari_server.serverUpgrade.run_metainfo_upgrade")
-  def test_upgrade_local_repo(self,
-                           run_metainfo_upgrade_mock,
-                           get_ambari_properties_mock,
-                           load_stack_values_mock,
-                           os_path_exists_mock,
-                           os_listdir_mock):
-
-    from mock.mock import call
-    args = MagicMock()
-    args.persistence_type = "local"
-
-    def load_values_side_effect(*args, **kwargs):
-      res = {}
-      res['a'] = 'http://oldurl'
-      if -1 != args[1].find("HDPLocal"):
-        res['a'] = 'http://newurl'
-      return res
-
-    load_stack_values_mock.side_effect = load_values_side_effect
-
-    properties = Properties()
-    get_ambari_properties_mock.return_value = properties
-    os_path_exists_mock.return_value = 1
-    os_listdir_mock.return_value = ['1.1']
-
-    upgrade_local_repo(args)
-
-    self.assertTrue(get_ambari_properties_mock.called)
-    self.assertTrue(load_stack_values_mock.called)
-    self.assertTrue(run_metainfo_upgrade_mock.called)
-    run_metainfo_upgrade_mock.assert_called_with(args, {'a': 'http://newurl'})
-    pass
-
-  @patch("os.listdir")
-  @patch("os.path.exists")
-  @patch("ambari_server.serverUpgrade.load_stack_values")
-  @patch("ambari_server.serverUpgrade.get_ambari_properties")
-  @patch("ambari_server.serverUpgrade.run_metainfo_upgrade")
-  def test_upgrade_local_repo_nochange(self,
-                         run_metainfo_upgrade_mock,
-                         get_ambari_properties_mock,
-                         load_stack_values_mock,
-                         os_path_exists_mock,
-                         os_listdir_mock):
-
-    from mock.mock import call
-    args = MagicMock()
-    args.persistence_type = "local"
-
-    def load_values_side_effect(*args, **kwargs):
-      res = {}
-      res['a'] = 'http://oldurl'
-      return res
-
-    load_stack_values_mock.side_effect = load_values_side_effect
-
-    properties = Properties()
-    get_ambari_properties_mock.return_value = properties
-    os_path_exists_mock.return_value = 1
-    os_listdir_mock.return_value = ['1.1']
-
-    upgrade_local_repo(args)
-
-    self.assertTrue(get_ambari_properties_mock.called)
-    self.assertTrue(load_stack_values_mock.called)
-    self.assertTrue(run_metainfo_upgrade_mock.called)
-    run_metainfo_upgrade_mock.assert_called_with(args, {})
-    pass
-
   @patch("os.path.exists")
   @patch.object(ResourceFilesKeeper, "perform_housekeeping")
   def test_refresh_stack_hash(self,

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/python/TestServerUpgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestServerUpgrade.py b/ambari-server/src/test/python/TestServerUpgrade.py
index 9235ff9..d944e0d 100644
--- a/ambari-server/src/test/python/TestServerUpgrade.py
+++ b/ambari-server/src/test/python/TestServerUpgrade.py
@@ -36,7 +36,7 @@ with patch.object(platform, "linux_distribution", return_value = MagicMock(retur
   with patch("os.path.isdir", return_value = MagicMock(return_value=True)):
     with patch("os.access", return_value = MagicMock(return_value=True)):
       with patch.object(os_utils, "parse_log4j_file", return_value={'ambari.log.dir': '/var/log/ambari-server'}):
-        from ambari_server.serverUpgrade import set_current, SetCurrentVersionOptions, upgrade_stack
+        from ambari_server.serverUpgrade import set_current, SetCurrentVersionOptions
         import ambari_server
 
 os_utils.search_file = _search_file
@@ -154,39 +154,6 @@ class TestServerUpgrade(TestCase):
     self.assertEquals(request.origin_req_host, '127.0.0.1')
     self.assertEquals(request.headers, {'X-requested-by': 'ambari', 'Authorization': 'Basic ZHVtbXlfc3RyaW5nOmR1bW15X3N0cmluZw=='})
 
-  @patch("ambari_server.serverUpgrade.run_os_command")
-  @patch("ambari_server.serverUpgrade.get_java_exe_path")
-  @patch("ambari_server.serverConfiguration.get_ambari_properties")
-  @patch("ambari_server.serverUpgrade.get_ambari_properties")
-  @patch("ambari_server.serverUpgrade.check_database_name_property")
-  @patch("ambari_server.serverUpgrade.is_root")
-  def test_upgrade_stack(self, is_root_mock, c_d_n_p_mock, up_g_a_p_mock, server_g_a_p_mock, java_path_mock, run_os_mock):
-
-    run_os_mock.return_value = 0, "", ""
-
-    java_path_mock.return_value = ""
-
-    is_root_mock.return_value = True
-
-    def do_nothing():
-      pass
-    c_d_n_p_mock.side_effect = do_nothing
-
-    p = ambari_server.properties.Properties()
-    p._props = {
-      ambari_server.serverConfiguration.JDBC_DATABASE_PROPERTY: "mysql",
-      ambari_server.serverConfiguration.JDBC_DATABASE_NAME_PROPERTY: "ambari"
-    }
-
-    up_g_a_p_mock.side_effect = [p, p]
-    server_g_a_p_mock.side_effect = [p]
-
-    args = ["upgrade_stack", "HDP-2.3"]
-    upgrade_stack(args)
-
-    self.assertTrue(run_os_mock.called)
-    command = run_os_mock.call_args_list[0][0][0]
-    self.assertTrue("StackUpgradeHelper" in command and "HDP" in command and "2.3" in command)
 
   def testCurrentVersionOptions(self):
     # Negative test cases


[8/9] ambari git commit: AMBARI-20894 - Setting Desired Stack Ids to Correct Values During Service and Patch Upgrades (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
index 5b65833..3087379 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/PrepareDisableKerberosServerAction.java
@@ -35,6 +35,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.SecurityState;
+import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
@@ -177,7 +178,8 @@ public class PrepareDisableKerberosServerAction extends AbstractPrepareKerberosS
           String serviceName = sch.getServiceName();
 
           if (!visitedServices.contains(serviceName)) {
-            StackId stackVersion = sch.getStackVersion();
+            ServiceComponent serviceComponent = sch.getServiceComponent();
+            StackId stackVersion = serviceComponent.getDesiredStackVersion();
 
             visitedServices.add(serviceName);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java
index 5d73fac..4fc8271 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java
@@ -21,13 +21,18 @@ import java.util.Collections;
 import java.util.Set;
 
 import org.apache.ambari.server.controller.internal.UpgradeResourceProvider;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.serveraction.AbstractServerAction;
+import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.UpgradeContextFactory;
+import org.apache.ambari.server.state.UpgradeHelper;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeScope;
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.metrics2.sink.relocated.google.common.collect.Sets;
 
+import com.google.common.collect.Sets;
 import com.google.inject.Inject;;
 
 /**
@@ -60,15 +65,40 @@ public abstract class AbstractUpgradeServerAction extends AbstractServerAction {
   protected Clusters m_clusters;
 
   /**
-   * @return the set of supported services
+   * Used to move desired repo versions forward.
    */
-  protected Set<String> getSupportedServices() {
+  @Inject
+  protected UpgradeHelper m_upgradeHelper;
+
+  /**
+   * Used to create instances of {@link UpgradeContext} with injected
+   * dependencies.
+   */
+  @Inject
+  private UpgradeContextFactory m_upgradeContextFactory;
+
+  /**
+   * Gets an initialized {@link UpgradeContext} for the in-progress upgrade.
+   */
+  protected UpgradeContext getUpgradeContext(Cluster cluster) {
+    UpgradeEntity upgrade = cluster.getUpgradeInProgress();
+    UpgradeContext upgradeContext = m_upgradeContextFactory.create(cluster, upgrade);
+
+    final UpgradeScope scope;
+    final Set<String> supportedServices;
     String services = getCommandParameterValue(SUPPORTED_SERVICES_KEY);
     if (StringUtils.isBlank(services)) {
-      return Collections.emptySet();
+      scope = UpgradeScope.COMPLETE;
+      supportedServices = Collections.emptySet();
+
     } else {
-      return Sets.newHashSet(StringUtils.split(services, ','));
+      scope = UpgradeScope.PARTIAL;
+      supportedServices = Sets.newHashSet(StringUtils.split(services, ','));
     }
-  }
 
+    upgradeContext.setSupportedServices(supportedServices);
+    upgradeContext.setScope(scope);
+
+    return upgradeContext;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
index 52c0cf2..4a3bd9b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
@@ -29,7 +29,7 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
 import org.apache.commons.lang.StringUtils;
 
 import com.google.gson.JsonArray;
@@ -49,13 +49,12 @@ public class ComponentVersionCheckAction extends FinalizeUpgradeAction {
 
     Map<String, String> commandParams = getExecutionCommand().getCommandParams();
 
-    String version = commandParams.get(VERSION_KEY);
-    StackId targetStackId = new StackId(commandParams.get(TARGET_STACK_KEY));
     String clusterName = getExecutionCommand().getClusterName();
 
     Cluster cluster = m_clusters.getCluster(clusterName);
 
-    List<InfoTuple> errors = checkHostComponentVersions(cluster, version, targetStackId);
+    UpgradeContext upgradeContext = getUpgradeContext(cluster);
+    List<InfoTuple> errors = getHostComponentsWhichDidNotUpgrade(upgradeContext);
 
     StringBuilder outSB = new StringBuilder();
     StringBuilder errSB = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
index 32d6151..a4cc757 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
@@ -24,7 +24,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
@@ -34,13 +33,10 @@ import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.events.StackUpgradeFinishEvent;
 import org.apache.ambari.server.events.publishers.VersionEventPublisher;
-import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
 import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.HostVersionDAO;
 import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.dao.UpgradeDAO;
-import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.orm.entities.HostVersionEntity;
@@ -56,7 +52,9 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
 import org.apache.ambari.server.state.UpgradeState;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostSummary;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.text.StrBuilder;
@@ -72,9 +70,6 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
       "For this reason, Ambari will not remove any configs. Please ensure that all database records are correct.";
 
   @Inject
-  private ClusterVersionDAO clusterVersionDAO;
-
-  @Inject
   private HostVersionDAO hostVersionDAO;
 
   @Inject
@@ -92,12 +87,6 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
   @Inject
   private ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
 
-  /**
-   * Gets {@link UpgradeEntity} instances.
-   */
-  @Inject
-  private UpgradeDAO upgradeDAO;
-
   @Inject
   private AmbariMetaInfo ambariMetaInfo;
 
@@ -108,21 +97,15 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
       throws AmbariException, InterruptedException {
 
-    Map<String, String> commandParams = getExecutionCommand().getCommandParams();
-
-    boolean isDowngrade = commandParams.containsKey(UPGRADE_DIRECTION_KEY) &&
-        "downgrade".equals(commandParams.get(UPGRADE_DIRECTION_KEY).toLowerCase());
-
-    String version = commandParams.get(VERSION_KEY);
-    StackId originalStackId = new StackId(commandParams.get(ORIGINAL_STACK_KEY));
-    StackId targetStackId = new StackId(commandParams.get(TARGET_STACK_KEY));
-
     String clusterName = getExecutionCommand().getClusterName();
+    Cluster cluster = m_clusters.getCluster(clusterName);
 
-    if (isDowngrade) {
-      return finalizeDowngrade(clusterName, originalStackId, targetStackId, version);
+    UpgradeContext upgradeContext = getUpgradeContext(cluster);
+
+    if (upgradeContext.getDirection() == Direction.UPGRADE) {
+      return finalizeUpgrade(upgradeContext);
     } else {
-      return finalizeUpgrade(clusterName, version, commandParams);
+      return finalizeDowngrade(upgradeContext);
     }
   }
 
@@ -132,32 +115,36 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
    * @param version     the target version of the upgrade
    * @return the command report
    */
-  private CommandReport finalizeUpgrade(String clusterName, String version,
-      Map<String, String> commandParams)
+  private CommandReport finalizeUpgrade(UpgradeContext upgradeContext)
     throws AmbariException, InterruptedException {
 
     StringBuilder outSB = new StringBuilder();
     StringBuilder errSB = new StringBuilder();
 
     try {
-      outSB.append(MessageFormat.format("Begin finalizing the upgrade of cluster {0} to version {1}\n", clusterName, version));
+      String message;
+      Set<String> servicesInUpgrade = upgradeContext.getSupportedServices();
+      if (servicesInUpgrade.isEmpty()) {
+        message = MessageFormat.format("Finalizing the upgrade to {0} for all cluster services.",
+            upgradeContext.getVersion());
+      } else {
+        message = MessageFormat.format(
+            "Finalizing the upgrade to {0} for the following services: {1}",
+            upgradeContext.getVersion(), StringUtils.join(servicesInUpgrade, ','));
+      }
+
+      outSB.append(message).append(System.lineSeparator());
 
-      Cluster cluster = m_clusters.getCluster(clusterName);
+      Cluster cluster = upgradeContext.getCluster();
       StackId clusterDesiredStackId = cluster.getDesiredStackVersion();
       StackId clusterCurrentStackId = cluster.getCurrentStackVersion();
+      String version = upgradeContext.getVersion();
+      RepositoryVersionEntity repositoryVersion = upgradeContext.getTargetRepositoryVersion();
 
-      ClusterVersionEntity upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion(
-          clusterName, clusterDesiredStackId, version);
-
-      if (upgradingClusterVersion == null) {
-        throw new AmbariException(MessageFormat.format(
-            "Cluster stack version {0} not found", version));
-      }
-
-      // Validate that all of the hosts with a version in the cluster have the
-      // version being upgraded to, and it is in an allowed state.
-      List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion(
-          clusterName, clusterDesiredStackId, version);
+      // for all hosts participating in this upgrade, validate their repo
+      // versions
+      List<HostVersionEntity> hostVersions = hostVersionDAO.findHostVersionByClusterAndRepository(
+          cluster.getClusterId(), repositoryVersion);
 
       // Will include hosts whose state is INSTALLED
       Set<HostVersionEntity> hostVersionsAllowed = new HashSet<>();
@@ -213,25 +200,26 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
 
       // throw an exception if there are hosts which are not not fully upgraded
       if (hostsWithoutCorrectVersionState.size() > 0) {
-        String message = String.format("The following %d host(s) have not been upgraded to version %s. " +
-                "Please install and upgrade the Stack Version on those hosts and try again.\nHosts: %s\n",
+        message = String.format("The following %d host(s) have not been upgraded to version %s. " +
+                "Please install and upgrade the Stack Version on those hosts and try again.\nHosts: %s",
             hostsWithoutCorrectVersionState.size(),
             version,
             StringUtils.join(hostsWithoutCorrectVersionState, ", "));
         outSB.append(message);
+        outSB.append(System.lineSeparator());
         throw new AmbariException(message);
       }
 
       // iterate through all host components and make sure that they are on the
       // correct version; if they are not, then this will throw an exception
-      List<InfoTuple> errors = checkHostComponentVersions(cluster, version, clusterDesiredStackId);
+      List<InfoTuple> errors = getHostComponentsWhichDidNotUpgrade(upgradeContext);
       if (! errors.isEmpty()) {
         StrBuilder messageBuff = new StrBuilder(
             String.format(
                 "The following %d host component(s) "
                     + "have not been upgraded to version %s. Please install and upgrade "
-                    + "the Stack Version on those hosts and try again.\nHost components:\n",
-                errors.size(), version));
+                    + "the Stack Version on those hosts and try again.\nHost components:",
+                errors.size(), version)).append(System.lineSeparator());
 
         for (InfoTuple error : errors) {
           messageBuff.append(String.format("%s on host %s\n", error.componentName, error.hostName));
@@ -240,29 +228,9 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
         throw new AmbariException(messageBuff.toString());
       }
 
-
-      // we're guaranteed to be ready transition to upgraded now; ensure that
-      // the transition will be allowed if the cluster state is not upgraded
-      upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion(clusterName,
-          clusterDesiredStackId, version);
-
-      if (RepositoryVersionState.INSTALLING == upgradingClusterVersion.getState()) {
-        cluster.transitionClusterVersion(clusterDesiredStackId, version, RepositoryVersionState.INSTALLED);
-
-        upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion(
-            clusterName, clusterDesiredStackId, version);
-      }
-
-      // we cannot finalize since the cluster was not ready to move into the
-      // upgraded state
-      if (RepositoryVersionState.INSTALLED != upgradingClusterVersion.getState()) {
-        throw new AmbariException(String.format("The cluster stack version state %s is not allowed to transition directly into %s",
-            upgradingClusterVersion.getState(), RepositoryVersionState.CURRENT.toString()));
-      }
-
       outSB.append(
-          String.format("Finalizing the upgraded state of host components in %d host(s).\n",
-              hostVersionsAllowed.size()));
+          String.format("Finalizing the upgrade state of %d host(s).",
+              hostVersionsAllowed.size())).append(System.lineSeparator());
 
       // Reset the upgrade state
       for (HostVersionEntity hostVersion : hostVersionsAllowed) {
@@ -275,37 +243,29 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
 
       // Impacts all hosts that have a version
       outSB.append(
-          String.format("Finalizing the version for %d host(s).\n", hostVersionsAllowed.size()));
-      cluster.mapHostVersions(hostsToUpdate, upgradingClusterVersion, RepositoryVersionState.CURRENT);
+          String.format("Finalizing the version for %d host(s).",
+              hostVersionsAllowed.size())).append(System.lineSeparator());
+
 
       versionEventPublisher.publish(new StackUpgradeFinishEvent(cluster));
-      // Reset upgrade state
-      cluster.setUpgradeEntity(null);
 
       // transitioning the cluster into CURRENT will update the current/desired
       // stack values
-      outSB.append(String.format("Finalizing the version for cluster %s.\n", clusterName));
+      outSB.append(
+          String.format("Finalizing the version for cluster %s.", cluster.getClusterName())).append(
+              System.lineSeparator());
+
       cluster.transitionClusterVersion(clusterDesiredStackId, version,
           RepositoryVersionState.CURRENT);
 
-      if (commandParams.containsKey(REQUEST_ID)) {
-        String requestId = commandParams.get(REQUEST_ID);
-        UpgradeEntity upgradeEntity = upgradeDAO.findUpgradeByRequestId(Long.valueOf(requestId));
-
-        if (null != upgradeEntity) {
-          outSB.append("Creating upgrade history.\n");
-          writeComponentHistory(cluster, upgradeEntity, clusterCurrentStackId,
-              clusterDesiredStackId);
-        } else {
-          String warning = String.format(
-              "Unable to create upgrade history because no upgrade could be found for request with ID %s\n",
-              requestId);
+      outSB.append("Creating upgrade history...").append(System.lineSeparator());
+      writeComponentHistory(upgradeContext);
 
-          outSB.append(warning);
-        }
-      }
+      // Reset upgrade state
+      cluster.setUpgradeEntity(null);
 
-      outSB.append("Upgrade was successful!\n");
+      message = String.format("The upgrade to %s has completed.", upgradeContext.getVersion());
+      outSB.append(message).append(System.lineSeparator());
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outSB.toString(), errSB.toString());
     } catch (Exception e) {
       errSB.append(e.getMessage());
@@ -316,107 +276,64 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
   /**
    * Execution path for downgrade.
    *
-   * @param clusterName
-   *          the name of the cluster the downgrade is for
-   * @paran originalStackId the stack ID of the cluster before the upgrade.
-   * @paran targetStackId the stack ID that was desired for this upgrade.
-   * @param version
-   *          the target version of the downgrade
+   * @param upgradeContext
+   *          the upgrade context (not {@code null}).
    * @return the command report
    */
-  private CommandReport finalizeDowngrade(String clusterName,
-      StackId originalStackId, StackId targetStackId, String version)
+  private CommandReport finalizeDowngrade(UpgradeContext upgradeContext)
       throws AmbariException, InterruptedException {
 
-    StringBuilder out = new StringBuilder();
-    StringBuilder err = new StringBuilder();
+    StringBuilder outSB = new StringBuilder();
+    StringBuilder errSB = new StringBuilder();
 
     try {
-      Cluster cluster = m_clusters.getCluster(clusterName);
+      Cluster cluster = upgradeContext.getCluster();
       StackId currentClusterStackId = cluster.getCurrentStackVersion();
+      RepositoryVersionEntity repositoryVersion = upgradeContext.getTargetRepositoryVersion();
 
-      // Safety check that the cluster's stack (from clusterstate's current_stack_id) is equivalent to the
-      // cluster's CURRENT repo version's stack. This is to avoid deleting configs from the target stack if the customer
-      // ended up modifying their database manually after a stack upgrade and forgot to call "Save DB State".
-      ClusterVersionEntity currentClusterVersion = cluster.getCurrentClusterVersion();
-      RepositoryVersionEntity currentRepoVersion = currentClusterVersion.getRepositoryVersion();
-      StackId currentRepoStackId = currentRepoVersion.getStackId();
-      if (!currentRepoStackId.equals(originalStackId)) {
-        String msg = String.format("The stack of Cluster %s's CURRENT repo version is %s, yet the original stack id from " +
-            "the Stack Upgrade has a different value of %s. %s",
-            clusterName, currentRepoStackId.getStackId(), originalStackId.getStackId(), PREVIOUS_UPGRADE_NOT_COMPLETED_MSG);
-        out.append(msg);
-        err.append(msg);
-        throw new AmbariException("The source target stack doesn't match the cluster's CURRENT repo version's stack.");
-      }
-
-      // This was a cross-stack upgrade, meaning that configurations were created that now need to be removed.
-      if (!originalStackId.equals(targetStackId)) {
-        out.append(String.format("Will remove configs since the original stack %s differs from the target stack %s " +
-            "that Ambari just downgraded from.", originalStackId.getStackId(), targetStackId.getStackId()));
-        cluster.removeConfigurations(targetStackId);
-      }
-
-      // !!! find and make sure the cluster_version EXCEPT current are set back
-      out.append(String.format("Searching for current version for %s\n",
-          clusterName));
-
-      ClusterVersionEntity clusterVersion = clusterVersionDAO.findByClusterAndStateCurrent(clusterName);
-      if (null == clusterVersion) {
-        throw new AmbariException("Could not find current cluster version");
+      String message;
+      Set<String> servicesInUpgrade = upgradeContext.getSupportedServices();
+      if (servicesInUpgrade.isEmpty()) {
+        message = MessageFormat.format("Finalizing the downgrade to {0} for all cluster services.",
+            upgradeContext.getVersion());
+      } else {
+        message = MessageFormat.format(
+            "Finalizing the downgrade to {0} for the following services: {1}",
+            upgradeContext.getVersion(), StringUtils.join(servicesInUpgrade, ','));
       }
 
-      out.append(String.format("Comparing downgrade version %s to current cluster version %s\n",
-          version,
-          clusterVersion.getRepositoryVersion().getVersion()));
+      outSB.append(message).append(System.lineSeparator());
+      outSB.append(message).append(System.lineSeparator());
 
-      if (!version.equals(clusterVersion.getRepositoryVersion().getVersion())) {
-        throw new AmbariException(
-            String.format("Downgrade version %s is not the current cluster version of %s",
-                version, clusterVersion.getRepositoryVersion().getVersion()));
-      } else {
-        out.append(String.format("Downgrade version is the same as current.  Searching " +
-          "for cluster versions that do not match %s\n", version));
-      }
+      // iterate through all host components and make sure that they are on the
+      // correct version; if they are not, then this will throw an exception
+      List<InfoTuple> errors = getHostComponentsWhichDidNotUpgrade(upgradeContext);
+      if (!errors.isEmpty()) {
+        StrBuilder messageBuff = new StrBuilder(String.format(
+            "The following %d host component(s) " + "have not been downgraded to version %s\n",
+            errors.size(), upgradeContext.getVersion())).append(System.lineSeparator());
 
-      Set<String> badVersions = new HashSet<>();
-
-      // update the cluster version
-      for (ClusterVersionEntity cve : clusterVersionDAO.findByCluster(clusterName)) {
-        switch (cve.getState()) {
-          case INSTALL_FAILED:
-          case INSTALLED:
-          case INSTALLING: {
-              badVersions.add(cve.getRepositoryVersion().getVersion());
-              cve.setState(RepositoryVersionState.INSTALLED);
-              clusterVersionDAO.merge(cve);
-              break;
-            }
-          default:
-            break;
+        for (InfoTuple error : errors) {
+          messageBuff.append(String.format("%s on host %s", error.componentName, error.hostName));
+          messageBuff.append(System.lineSeparator());
         }
-      }
 
-      out.append(String.format("Found %d other version(s) not matching downgrade: %s\n",
-          badVersions.size(), StringUtils.join(badVersions, ", ")));
+        throw new AmbariException(messageBuff.toString());
+      }
 
-      Set<String> badHosts = new HashSet<>();
-      for (String badVersion : badVersions) {
-        List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion(
-            clusterName, targetStackId, badVersion);
+      // find host versions
+      List<HostVersionEntity> hostVersions = hostVersionDAO.findHostVersionByClusterAndRepository(
+          cluster.getClusterId(), repositoryVersion);
 
-        for (HostVersionEntity hostVersion : hostVersions) {
-          badHosts.add(hostVersion.getHostName());
+      for( HostVersionEntity hostVersion : hostVersions ){
+        if( hostVersion.getState() != RepositoryVersionState.INSTALLED ){
           hostVersion.setState(RepositoryVersionState.INSTALLED);
           hostVersionDAO.merge(hostVersion);
         }
-      }
 
-      out.append(String.format("Found %d hosts not matching downgrade version: %s\n",
-          badHosts.size(), version));
+        List<HostComponentStateEntity> hostComponentStates = hostComponentStateDAO.findByHost(
+            hostVersion.getHostName());
 
-      for (String badHost : badHosts) {
-        List<HostComponentStateEntity> hostComponentStates = hostComponentStateDAO.findByHost(badHost);
         for (HostComponentStateEntity hostComponentState : hostComponentStates) {
           hostComponentState.setUpgradeState(UpgradeState.NONE);
           hostComponentStateDAO.merge(hostComponentState);
@@ -427,37 +344,38 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
       // original value
       cluster.setDesiredStackVersion(currentClusterStackId);
       versionEventPublisher.publish(new StackUpgradeFinishEvent(cluster));
+
       // Reset upgrade state
       cluster.setUpgradeEntity(null);
 
-      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
-          out.toString(), err.toString());
-
+      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outSB.toString(), errSB.toString());
     } catch (Exception e) {
       StringWriter sw = new StringWriter();
       e.printStackTrace(new PrintWriter(sw));
-      err.append(sw.toString());
+      errSB.append(sw.toString());
 
-      return createCommandReport(-1, HostRoleStatus.FAILED, "{}",
-          out.toString(), err.toString());
+      return createCommandReport(-1, HostRoleStatus.FAILED, "{}", outSB.toString(), errSB.toString());
     }
   }
 
 
   /**
-   * Confirms that all host components that are able to provide hdp version,
-   * have been upgraded to the target version.
-   * @param cluster         the cluster the upgrade is for
-   * @param desiredVersion  the target version of the upgrade
-   * @param targetStackId     the target stack id for meta-info lookup
-   * @return the list of {@link InfoTuple} objects of host components in error
+   * Gets any host components which have not been propertly upgraded.
+   *
+   * @param upgradeContext
+   *          the upgrade context (not {@code null}).
+   * @return a list of {@link InfoTuple} representing components which should
+   *         have been upgraded but did not.
    */
-  protected List<InfoTuple> checkHostComponentVersions(Cluster cluster, String desiredVersion, StackId targetStackId)
+  protected List<InfoTuple> getHostComponentsWhichDidNotUpgrade(UpgradeContext upgradeContext)
           throws AmbariException {
 
     ArrayList<InfoTuple> errors = new ArrayList<>();
 
-    Set<String> supportedServices = getSupportedServices();
+    Cluster cluster = upgradeContext.getCluster();
+    Set<String> supportedServices = upgradeContext.getSupportedServices();
+    RepositoryVersionEntity repositoryVersionEntity = upgradeContext.getTargetRepositoryVersion();
+    StackId targetStackId = repositoryVersionEntity.getStackId();
 
     for (Service service : cluster.getServices().values()) {
 
@@ -471,17 +389,12 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
           ComponentInfo componentInfo = ambariMetaInfo.getComponent(targetStackId.getStackName(),
                   targetStackId.getStackVersion(), service.getName(), serviceComponent.getName());
 
-          if (!componentInfo.isVersionAdvertised()) {
-            StackId desired = serviceComponentHost.getDesiredStackVersion();
-            StackId actual = serviceComponentHost.getStackVersion();
-            if (!desired.equals(actual)) {
-              serviceComponentHost.setStackVersion(desired);
+          if (componentInfo.isVersionAdvertised()) {
+            if (!StringUtils.equals(upgradeContext.getVersion(),
+                serviceComponentHost.getVersion())) {
+              errors.add(new InfoTuple(service.getName(), serviceComponent.getName(),
+                  serviceComponentHost.getHostName(), serviceComponentHost.getVersion()));
             }
-          } else if (componentInfo.isVersionAdvertised()
-              && !serviceComponentHost.getVersion().equals(desiredVersion)) {
-            errors.add(new InfoTuple(
-                service.getName(), serviceComponent.getName(),
-                serviceComponentHost.getHostName(), serviceComponentHost.getVersion()));
           }
         }
       }
@@ -490,15 +403,36 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
     return errors;
   }
 
-  private void writeComponentHistory(Cluster cluster, UpgradeEntity upgradeEntity,
-      StackId fromStackId, StackId toStackId) {
+  /**
+   * Writes the upgrade history for all components which participated in the
+   * upgrade.
+   *
+   * @param upgradeContext  the upgrade context (not {@code null}).
+   */
+  private void writeComponentHistory(UpgradeContext upgradeContext) throws AmbariException {
+    Cluster cluster = upgradeContext.getCluster();
+    UpgradeEntity upgradeEntity = cluster.getUpgradeInProgress();
+    Collection<Service> services = cluster.getServices().values();
+    RepositoryVersionEntity repositoryVersion = upgradeContext.getTargetRepositoryVersion();
+    StackId sourcceStackId = upgradeContext.getOriginalStackId();
+    StackId targetStackId = repositoryVersion.getStackId();
+
+    StackEntity fromStack = stackDAO.find(sourcceStackId.getStackName(), sourcceStackId.getStackVersion());
+    StackEntity toStack = stackDAO.find(targetStackId.getStackName(), targetStackId.getStackVersion());
+
 
-    StackEntity fromStack = stackDAO.find(fromStackId.getStackName(), fromStackId.getStackVersion());
-    StackEntity toStack = stackDAO.find(toStackId.getStackName(), toStackId.getStackVersion());
+    if (!upgradeContext.getSupportedServices().isEmpty()) {
+      services = new ArrayList<>();
+
+      Set<String> serviceNames = upgradeContext.getSupportedServices();
+      for (String serviceName : serviceNames) {
+        services.add(cluster.getService(serviceName));
+      }
+    }
 
     // for every service component, if it was included in the upgrade then
     // create a historical entry
-    for (Service service : cluster.getServices().values()) {
+    for (Service service : services) {
       for (ServiceComponent serviceComponent : service.getServiceComponents().values()) {
         if (serviceComponent.isVersionAdvertised()) {
           // create the historical entry

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
index 7bcb9d0..22f2e73 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
@@ -21,7 +21,9 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.VERSION;
 
 import java.io.PrintWriter;
 import java.io.StringWriter;
+import java.text.MessageFormat;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.ambari.server.AmbariException;
@@ -31,14 +33,15 @@ import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariServer;
 import org.apache.ambari.server.controller.internal.UpgradeResourceProvider;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.UpgradeContext;
 import org.apache.ambari.server.state.stack.UpgradePack;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -50,7 +53,7 @@ import com.google.inject.Inject;
  * actually changed half-way through calculating the Actions, and this serves to update the database to make it
  * evident to the user at which point it changed.
  */
-public class UpdateDesiredStackAction extends AbstractServerAction {
+public class UpdateDesiredStackAction extends AbstractUpgradeServerAction {
 
   /**
    * Logger.
@@ -91,22 +94,27 @@ public class UpdateDesiredStackAction extends AbstractServerAction {
   @Inject
   private Configuration m_configuration;
 
+  /**
+   * {@inheritDoc}
+   */
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
       throws AmbariException, InterruptedException {
+
     Map<String, String> commandParams = getExecutionCommand().getCommandParams();
+    String clusterName = getExecutionCommand().getClusterName();
+    Cluster cluster = clusters.getCluster(clusterName);
+    UpgradeEntity upgrade = cluster.getUpgradeInProgress();
+
+    UpgradeContext upgradeContext = getUpgradeContext(cluster);
 
     StackId originalStackId = new StackId(commandParams.get(COMMAND_PARAM_ORIGINAL_STACK));
     StackId targetStackId = new StackId(commandParams.get(COMMAND_PARAM_TARGET_STACK));
-    Direction direction = Direction.UPGRADE;
-    if(commandParams.containsKey(COMMAND_PARAM_DIRECTION)
-        && "downgrade".equals(commandParams.get(COMMAND_PARAM_DIRECTION).toLowerCase())) {
-      direction = Direction.DOWNGRADE;
-    }
-    String version = commandParams.get(COMMAND_PARAM_VERSION);
-    String upgradePackName = commandParams.get(COMMAND_PARAM_UPGRADE_PACK);
-    String clusterName = getExecutionCommand().getClusterName();
-    UpgradePack upgradePack = ambariMetaInfo.getUpgradePacks(originalStackId.getStackName(), originalStackId.getStackVersion()).get(upgradePackName);
+
+    String upgradePackName = upgrade.getUpgradePackage();
+
+    UpgradePack upgradePack = ambariMetaInfo.getUpgradePacks(originalStackId.getStackName(),
+        originalStackId.getStackVersion()).get(upgradePackName);
 
     Map<String, String> roleParams = getExecutionCommand().getRoleParams();
 
@@ -120,74 +128,56 @@ public class UpdateDesiredStackAction extends AbstractServerAction {
     }
 
     // invalidate any cached effective ID
-    Cluster cluster = clusters.getCluster(clusterName);
     cluster.invalidateUpgradeEffectiveVersion();
 
-    return updateDesiredStack(cluster, originalStackId, targetStackId, version, direction,
+    return updateDesiredRepositoryVersion(cluster, originalStackId, targetStackId, upgradeContext,
         upgradePack, userName);
   }
 
   /**
-   * Set the cluster's Desired Stack Id during an upgrade.
+   * Sets the desired repository version for services participating in the
+   * upgrade.
    *
-   * @param cluster the cluster
-   * @param originalStackId the stack Id of the cluster before the upgrade.
-   * @param targetStackId the stack Id that was desired for this upgrade.
-   * @param direction direction, either upgrade or downgrade
-   * @param upgradePack Upgrade Pack to use
-   * @param userName username performing the action
+   * @param cluster
+   *          the cluster
+   * @param originalStackId
+   *          the stack Id of the cluster before the upgrade.
+   * @param targetStackId
+   *          the stack Id that was desired for this upgrade.
+   * @param direction
+   *          direction, either upgrade or downgrade
+   * @param upgradePack
+   *          Upgrade Pack to use
+   * @param userName
+   *          username performing the action
    * @return the command report to return
    */
-  private CommandReport updateDesiredStack(
+  private CommandReport updateDesiredRepositoryVersion(
       Cluster cluster, StackId originalStackId, StackId targetStackId,
-      String version, Direction direction, UpgradePack upgradePack, String userName)
+      UpgradeContext upgradeContext, UpgradePack upgradePack, String userName)
       throws AmbariException, InterruptedException {
 
-    String clusterName = cluster.getClusterName();
     StringBuilder out = new StringBuilder();
     StringBuilder err = new StringBuilder();
 
     try {
-      StackId currentClusterStackId = cluster.getCurrentStackVersion();
-      out.append(String.format("Params: %s %s %s %s %s %s\n",
-          clusterName, originalStackId.getStackId(), targetStackId.getStackId(), version, direction.getText(false), upgradePack.getName()));
-
-      out.append(String.format("Checking if can update the Desired Stack Id to %s. The cluster's current Stack Id is %s\n", targetStackId.getStackId(), currentClusterStackId.getStackId()));
-
-      // Ensure that the target stack id exist
-      StackInfo desiredClusterStackInfo = ambariMetaInfo.getStack(targetStackId.getStackName(), targetStackId.getStackVersion());
-      if (null == desiredClusterStackInfo) {
-        String message = String.format("Parameter %s has an invalid value: %s. That Stack Id does not exist.\n",
-            COMMAND_PARAM_TARGET_STACK, targetStackId.getStackId());
-        err.append(message);
-        out.append(message);
-        return createCommandReport(-1, HostRoleStatus.FAILED, "{}", out.toString(), err.toString());
-      }
-
-      // Ensure that the current Stack Id coincides with the parameter that the user passed in.
-      if (!currentClusterStackId.equals(originalStackId)) {
-        String message = String.format("Parameter %s has invalid value: %s. " +
-            "The cluster is currently on stack %s, " + currentClusterStackId.getStackId() +
-            ", yet the parameter to this function indicates a different value.\n", COMMAND_PARAM_ORIGINAL_STACK, originalStackId.getStackId(), currentClusterStackId.getStackId());
-        err.append(message);
-        out.append(message);
-        return createCommandReport(-1, HostRoleStatus.FAILED, "{}", out.toString(), err.toString());
-      }
-
-      // Check for a no-op
-      if (currentClusterStackId.equals(targetStackId)) {
-        String message = String.format("Success! The cluster's Desired Stack Id was already set to %s\n", targetStackId.getStackId());
-        out.append(message);
-        return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", out.toString(), err.toString());
+      UpgradeResourceProvider upgradeResourceProvider = new UpgradeResourceProvider(AmbariServer.getController());
+      upgradeResourceProvider.applyStackAndProcessConfigurations(upgradeContext);
+      m_upgradeHelper.putComponentsToUpgradingState(upgradeContext);
+
+      final String message;
+      Set<String> servicesInUpgrade = upgradeContext.getSupportedServices();
+      if (servicesInUpgrade.isEmpty()) {
+        message = MessageFormat.format(
+            "Updating the desired repository version to {0} for all cluster services.",
+            upgradeContext.getVersion());
+      } else {
+        message = MessageFormat.format(
+            "Updating the desired repository version to {0} for the following services: {1}",
+            upgradeContext.getVersion(), StringUtils.join(servicesInUpgrade, ','));
       }
 
-      // Create Create new configurations that are a merge between the current stack and the desired stack
-      // Also updates the desired stack version.
-      UpgradeResourceProvider upgradeResourceProvider = new UpgradeResourceProvider(AmbariServer.getController());
-      upgradeResourceProvider.applyStackAndProcessConfigurations(targetStackId.getStackName(), cluster, version, direction, upgradePack, userName);
-      String message = String.format("Success! Set cluster's %s Desired Stack Id to %s.\n", clusterName, targetStackId.getStackId());
       out.append(message);
-
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", out.toString(), err.toString());
     } catch (Exception e) {
       StringWriter sw = new StringWriter();

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index 1ef204d..88c5a59 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -196,15 +196,6 @@ public interface Cluster {
   void setDesiredStackVersion(StackId stackVersion) throws AmbariException;
 
   /**
-   * Sets the desired stack version, optionally setting all owned services,
-   * components, and host components
-   * @param stackId the stack id
-   * @param cascade {@code true} to cascade the desired version
-   */
-  void setDesiredStackVersion(StackId stackId, boolean cascade) throws AmbariException;
-
-
-  /**
    * Get current stack version
    * @return
    */
@@ -217,17 +208,6 @@ public interface Cluster {
   void setCurrentStackVersion(StackId stackVersion) throws AmbariException;
 
   /**
-   * Create host versions for all of the hosts that don't already have the stack version.
-   * @param hostNames Collection of host names
-   * @param currentClusterVersion Entity that contains the cluster's current stack (with its name and version)
-   * @param desiredState Desired state must be {@link RepositoryVersionState#CURRENT} or {@link RepositoryVersionState#UPGRADING}
-   * @throws AmbariException
-   */
-  void mapHostVersions(Set<String> hostNames,
-      ClusterVersionEntity currentClusterVersion,
-      RepositoryVersionState desiredState) throws AmbariException;
-
-  /**
    * Creates or updates host versions for all of the hosts within a cluster
    * based on state of cluster stack version. This is used to transition all
    * hosts into the correct state (which may not be
@@ -535,11 +515,17 @@ public interface Cluster {
 
   /**
    * Add service to the cluster
+   * 
    * @param serviceName
+   *          the name of the service to add (not {@code null}).
+   * @param repositoryVersion
+   *          the repository from which the service should be installed (not
+   *          {@code null}).
    * @return
    * @throws AmbariException
    */
-  Service addService(String serviceName) throws AmbariException;
+  Service addService(String serviceName, RepositoryVersionEntity repositoryVersion)
+      throws AmbariException;
 
   /**
    * Fetch desired configs for list of hosts in cluster

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
index 5964e33..7849463 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
@@ -22,6 +22,7 @@ import java.util.Map;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ServiceResponse;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 
 public interface Service {
 
@@ -66,8 +67,6 @@ public interface Service {
 
   StackId getDesiredStackVersion();
 
-  void setDesiredStackVersion(StackId stackVersion);
-
   ServiceResponse convertToResponse();
 
   void debugDump(StringBuilder sb);
@@ -139,6 +138,16 @@ public interface Service {
    */
   void setCredentialStoreEnabled(boolean credentialStoreEnabled);
 
+  /**
+   * @return
+   */
+  RepositoryVersionEntity getDesiredRepositoryVersion();
+
+  /**
+   * @param desiredRepositoryVersion
+   */
+  void setDesiredRepositoryVersion(RepositoryVersionEntity desiredRepositoryVersion);
+
   enum Type {
     HDFS,
     GLUSTERFS,

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
index b5b6821..80b4470 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
@@ -22,6 +22,7 @@ import java.util.Map;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ServiceComponentResponse;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 
 public interface ServiceComponent {
 
@@ -51,13 +52,18 @@ public interface ServiceComponent {
 
   void setDesiredState(State state);
 
-  StackId getDesiredStackVersion();
+  /**
+   * Gets the desired repository for this service component.
+   *
+   * @return
+   */
+  RepositoryVersionEntity getDesiredRepositoryVersion();
 
-  void setDesiredStackVersion(StackId stackVersion);
+  StackId getDesiredStackVersion();
 
   String getDesiredVersion();
 
-  void setDesiredVersion(String version);
+  void setDesiredRepositoryVersion(RepositoryVersionEntity repositoryVersionEntity);
 
   /**
    * Refresh Component info due to current stack

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
index 104e456..b7f8d29 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
@@ -91,10 +91,6 @@ public interface ServiceComponentHost {
 
   void setDesiredState(State state);
 
-  StackId getDesiredStackVersion();
-
-  void setDesiredStackVersion(StackId stackVersion);
-
   State getState();
 
   void setState(State state);
@@ -167,10 +163,6 @@ public interface ServiceComponentHost {
    */
   UpgradeState getUpgradeState();
 
-  StackId getStackVersion();
-
-  void setStackVersion(StackId stackVersion);
-
   HostComponentAdminState getComponentAdminState();
 
   void setComponentAdminState(HostComponentAdminState attribute);
@@ -251,4 +243,11 @@ public interface ServiceComponentHost {
 
   HostComponentDesiredStateEntity getDesiredStateEntity();
 
+  /**
+   * Gets the service component.
+   *
+   * @return the service component (never {@code null}).
+   */
+  ServiceComponent getServiceComponent();
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
index 4cfb250..1f9dc5b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
@@ -120,17 +120,13 @@ public class ServiceComponentImpl implements ServiceComponent {
     this.stackDAO = stackDAO;
     this.eventPublisher = eventPublisher;
 
-    StackId stackId = service.getDesiredStackVersion();
-    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
-
     ServiceComponentDesiredStateEntity desiredStateEntity = new ServiceComponentDesiredStateEntity();
     desiredStateEntity.setComponentName(componentName);
     desiredStateEntity.setDesiredState(State.INIT);
-    desiredStateEntity.setDesiredVersion(State.UNKNOWN.toString());
     desiredStateEntity.setServiceName(service.getName());
     desiredStateEntity.setClusterId(service.getClusterId());
     desiredStateEntity.setRecoveryEnabled(false);
-    desiredStateEntity.setDesiredStack(stackEntity);
+    desiredStateEntity.setDesiredRepositoryVersion(service.getDesiredRepositoryVersion());
 
     updateComponentInfo();
 
@@ -394,22 +390,16 @@ public class ServiceComponentImpl implements ServiceComponent {
     }
   }
 
+  /**
+   * {@inheritDoc}
+   */
   @Override
-  public void setDesiredStackVersion(StackId stack) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Setting DesiredStackVersion of Service" + ", clusterName="
-          + service.getCluster().getClusterName() + ", clusterId="
-          + service.getCluster().getClusterId() + ", serviceName=" + service.getName()
-          + ", serviceComponentName=" + getName() + ", oldDesiredStackVersion="
-          + getDesiredStackVersion() + ", newDesiredStackVersion=" + stack);
-    }
-
+  public void setDesiredRepositoryVersion(RepositoryVersionEntity repositoryVersionEntity) {
     ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
         desiredStateEntityId);
 
     if (desiredStateEntity != null) {
-      StackEntity stackEntity = stackDAO.find(stack.getStackName(), stack.getStackVersion());
-      desiredStateEntity.setDesiredStack(stackEntity);
+      desiredStateEntity.setDesiredRepositoryVersion(repositoryVersionEntity);
       desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
     } else {
       LOG.warn("Setting a member on an entity object that may have been "
@@ -417,26 +407,23 @@ public class ServiceComponentImpl implements ServiceComponent {
     }
   }
 
+  /**
+   * {@inheritDoc}
+   */
   @Override
-  public String getDesiredVersion() {
+  public RepositoryVersionEntity getDesiredRepositoryVersion() {
     ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
         desiredStateEntityId);
 
-    return desiredStateEntity.getDesiredVersion();
+    return desiredStateEntity.getDesiredRepositoryVersion();
   }
 
   @Override
-  public void setDesiredVersion(String version) {
+  public String getDesiredVersion() {
     ServiceComponentDesiredStateEntity desiredStateEntity = serviceComponentDesiredStateDAO.findById(
         desiredStateEntityId);
 
-      if (desiredStateEntity != null) {
-        desiredStateEntity.setDesiredVersion(version);
-      desiredStateEntity = serviceComponentDesiredStateDAO.merge(desiredStateEntity);
-      } else {
-        LOG.warn("Setting a member on an entity object that may have been " +
-          "previously deleted, serviceName = " + (service != null ? service.getName() : ""));
-      }
+    return desiredStateEntity.getDesiredVersion();
   }
 
   @Override
@@ -693,6 +680,7 @@ public class ServiceComponentImpl implements ServiceComponent {
 
     if (MapUtils.isNotEmpty(map)) {
       String desiredVersion = component.getDesiredVersion();
+      RepositoryVersionEntity desiredRepositoryVersion = service.getDesiredRepositoryVersion();
 
       List<HostComponentStateEntity> hostComponents = hostComponentDAO.findByServiceAndComponentAndNotVersion(
           component.getServiceName(), component.getComponentName(), reportedVersion);
@@ -705,7 +693,7 @@ public class ServiceComponentImpl implements ServiceComponent {
       if (StackVersionListener.UNKNOWN_VERSION.equals(desiredVersion)) {
         if (CollectionUtils.isEmpty(hostComponents)) {
           // all host components are the same version as reported
-          component.setDesiredVersion(reportedVersion);
+          component.setDesiredRepositoryVersion(desiredRepositoryVersion);
           component.setRepositoryState(RepositoryVersionState.CURRENT);
         } else {
           // desired is UNKNOWN and there's a mix of versions in the host components

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java
index a3a041b..1e1795e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceFactory.java
@@ -19,10 +19,33 @@
 package org.apache.ambari.server.state;
 
 import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 
 public interface ServiceFactory {
 
-  Service createNew(Cluster cluster, String serviceName);
+  /**
+   * Creates a new service in memory and then persists it to the database.
+   *
+   * @param cluster
+   *          the cluster the service is for (not {@code null).
+   * @param serviceName
+   *          the name of the service (not {@code null).
+   * @param desiredRepositoryVersion
+   *          the repository version of the service (not {@code null).
+   * @return
+   */
+  Service createNew(Cluster cluster, String serviceName,
+      RepositoryVersionEntity desiredRepositoryVersion);
 
+  /**
+   * Creates an in-memory representation of a service from an existing database
+   * object.
+   *
+   * @param cluster
+   *          the cluster the service is installed in (not {@code null).
+   * @param serviceEntity
+   *          the entity the existing database entry (not {@code null).
+   * @return
+   */
   Service createExisting(Cluster cluster, ClusterServiceEntity serviceEntity);
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
index a0c0db1..e537326 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.state;
 
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -39,11 +40,11 @@ import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
 import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
 import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
@@ -82,11 +83,6 @@ public class ServiceImpl implements Service {
   private final ServiceComponentFactory serviceComponentFactory;
 
   /**
-   * Data access object for retrieving stack instances.
-   */
-  private final StackDAO stackDAO;
-
-  /**
    * Used to publish events relating to service CRUD operations.
    */
   private final AmbariEventPublisher eventPublisher;
@@ -97,17 +93,16 @@ public class ServiceImpl implements Service {
   private final String serviceName;
 
   @AssistedInject
-  ServiceImpl(@Assisted Cluster cluster, @Assisted String serviceName, ClusterDAO clusterDAO,
+  ServiceImpl(@Assisted Cluster cluster, @Assisted String serviceName,
+      @Assisted RepositoryVersionEntity desiredRepositoryVersion, ClusterDAO clusterDAO,
       ClusterServiceDAO clusterServiceDAO, ServiceDesiredStateDAO serviceDesiredStateDAO,
-      ServiceComponentFactory serviceComponentFactory, StackDAO stackDAO,
-      AmbariMetaInfo ambariMetaInfo, AmbariEventPublisher eventPublisher)
-      throws AmbariException {
+      ServiceComponentFactory serviceComponentFactory, AmbariMetaInfo ambariMetaInfo,
+      AmbariEventPublisher eventPublisher) throws AmbariException {
     this.cluster = cluster;
     this.clusterDAO = clusterDAO;
     this.clusterServiceDAO = clusterServiceDAO;
     this.serviceDesiredStateDAO = serviceDesiredStateDAO;
     this.serviceComponentFactory = serviceComponentFactory;
-    this.stackDAO = stackDAO;
     this.eventPublisher = eventPublisher;
     this.serviceName = serviceName;
     this.ambariMetaInfo = ambariMetaInfo;
@@ -118,15 +113,14 @@ public class ServiceImpl implements Service {
     ServiceDesiredStateEntity serviceDesiredStateEntity = new ServiceDesiredStateEntity();
     serviceDesiredStateEntity.setServiceName(serviceName);
     serviceDesiredStateEntity.setClusterId(cluster.getClusterId());
+    serviceDesiredStateEntity.setDesiredRepositoryVersion(desiredRepositoryVersion);
     serviceDesiredStateEntityPK = getServiceDesiredStateEntityPK(serviceDesiredStateEntity);
     serviceEntityPK = getServiceEntityPK(serviceEntity);
 
     serviceDesiredStateEntity.setClusterServiceEntity(serviceEntity);
     serviceEntity.setServiceDesiredStateEntity(serviceDesiredStateEntity);
 
-    StackId stackId = cluster.getDesiredStackVersion();
-    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
-    serviceDesiredStateEntity.setDesiredStack(stackEntity);
+    StackId stackId = desiredRepositoryVersion.getStackId();
 
     ServiceInfo sInfo = ambariMetaInfo.getService(stackId.getStackName(),
         stackId.getStackVersion(), serviceName);
@@ -143,15 +137,13 @@ public class ServiceImpl implements Service {
   ServiceImpl(@Assisted Cluster cluster, @Assisted ClusterServiceEntity serviceEntity,
       ClusterDAO clusterDAO, ClusterServiceDAO clusterServiceDAO,
       ServiceDesiredStateDAO serviceDesiredStateDAO,
-      ServiceComponentFactory serviceComponentFactory, StackDAO stackDAO,
-      AmbariMetaInfo ambariMetaInfo, AmbariEventPublisher eventPublisher)
-      throws AmbariException {
+      ServiceComponentFactory serviceComponentFactory, AmbariMetaInfo ambariMetaInfo,
+      AmbariEventPublisher eventPublisher) throws AmbariException {
     this.cluster = cluster;
     this.clusterDAO = clusterDAO;
     this.clusterServiceDAO = clusterServiceDAO;
     this.serviceDesiredStateDAO = serviceDesiredStateDAO;
     this.serviceComponentFactory = serviceComponentFactory;
-    this.stackDAO = stackDAO;
     this.eventPublisher = eventPublisher;
     serviceName = serviceEntity.getServiceName();
     this.ambariMetaInfo = ambariMetaInfo;
@@ -309,37 +301,46 @@ public class ServiceImpl implements Service {
     serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
   }
 
+  /**
+   * {@inheritDoc}
+   */
   @Override
   public StackId getDesiredStackVersion() {
     ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
     StackEntity desiredStackEntity = serviceDesiredStateEntity.getDesiredStack();
-    if( null != desiredStackEntity ) {
-      return new StackId(desiredStackEntity);
-    } else {
-      return null;
-    }
+    return new StackId(desiredStackEntity);
   }
 
+  /**
+   * {@inheritDoc}
+   */
   @Override
-  public void setDesiredStackVersion(StackId stack) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Setting DesiredStackVersion of Service" + ", clusterName="
-          + cluster.getClusterName() + ", clusterId="
-          + cluster.getClusterId() + ", serviceName=" + getName()
-          + ", oldDesiredStackVersion=" + getDesiredStackVersion()
-          + ", newDesiredStackVersion=" + stack);
-    }
+  public RepositoryVersionEntity getDesiredRepositoryVersion() {
+    ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
+    return serviceDesiredStateEntity.getDesiredRepositoryVersion();
+  }
 
-    StackEntity stackEntity = stackDAO.find(stack.getStackName(), stack.getStackVersion());
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  @Transactional
+  public void setDesiredRepositoryVersion(RepositoryVersionEntity repositoryVersionEntity) {
     ServiceDesiredStateEntity serviceDesiredStateEntity = getServiceDesiredStateEntity();
-    serviceDesiredStateEntity.setDesiredStack(stackEntity);
+    serviceDesiredStateEntity.setDesiredRepositoryVersion(repositoryVersionEntity);
     serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
+
+    Collection<ServiceComponent> components = getServiceComponents().values();
+    for (ServiceComponent component : components) {
+      component.setDesiredRepositoryVersion(repositoryVersionEntity);
+    }
   }
 
   @Override
   public ServiceResponse convertToResponse() {
     ServiceResponse r = new ServiceResponse(cluster.getClusterId(), cluster.getClusterName(),
-        getName(), getDesiredStackVersion().getStackId(), getDesiredState().toString(),
+        getName(), getDesiredStackVersion().getStackId(),
+        getDesiredRepositoryVersion().getVersion(), getDesiredState().toString(),
         isCredentialStoreSupported(), isCredentialStoreEnabled());
 
     r.setMaintenanceState(getMaintenanceState().name());
@@ -612,10 +613,6 @@ public class ServiceImpl implements Service {
     return getServiceDesiredStateEntity().getMaintenanceState();
   }
 
-  private ClusterServiceEntity getServiceEntity() {
-    return clusterServiceDAO.findByPK(serviceEntityPK);
-  }
-
   private ClusterServiceEntityPK getServiceEntityPK(ClusterServiceEntity serviceEntity) {
     ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
     pk.setClusterId(serviceEntity.getClusterId());

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 97f5003..f1bd900 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -131,6 +131,13 @@ public class UpgradeContext {
    */
   private StackId m_targetStackId;
 
+  /**
+   * The target repository before the upgrade started. This is the same
+   * regardless of whether the current direction is {@link Direction#UPGRADE} or
+   * {@link Direction#DOWNGRADE}.
+   */
+  private RepositoryVersionEntity m_targetRepositoryVersion;
+
   private MasterHostResolver m_resolver;
   private AmbariMetaInfo m_metaInfo;
   private List<ServiceComponentHost> m_unhealthy = new ArrayList<>();
@@ -214,7 +221,7 @@ public class UpgradeContext {
     m_upgradeRequestMap = upgradeRequestMap;
 
     // sets the original/target stacks - requires direction and cluster
-    setSourceAndTargetStacks();
+    setSourceAndTargetVersions();
   }
 
   /**
@@ -239,7 +246,7 @@ public class UpgradeContext {
     m_version = upgradeEntity.getToVersion();
 
     // sets the original/target stacks - requires direction and cluster
-    setSourceAndTargetStacks();
+    setSourceAndTargetVersions();
 
     if (m_direction == Direction.DOWNGRADE) {
       m_downgradeFromVersion = upgradeEntity.getFromVersion();
@@ -248,16 +255,18 @@ public class UpgradeContext {
     // since this constructor is initialized from an entity, then this map is
     // not present
     m_upgradeRequestMap = Collections.emptyMap();
+
+    m_autoSkipComponentFailures = upgradeEntity.isComponentFailureAutoSkipped();
+    m_autoSkipServiceCheckFailures = upgradeEntity.isServiceCheckFailureAutoSkipped();
   }
 
   /**
-   * Sets the source and target stack IDs. This will also set the effective
-   * stack ID based on the already-set {@link UpgradeType} and
-   * {@link Direction}.
+   * Sets the source and target versions. This will also set the effective stack
+   * ID based on the already-set {@link UpgradeType} and {@link Direction}.
    *
    * @see #getEffectiveStackId()
    */
-  private void setSourceAndTargetStacks() {
+  private void setSourceAndTargetVersions() {
     StackId sourceStackId = null;
 
     // taret stack will not always be what it is today - tagging as experimental
@@ -268,17 +277,20 @@ public class UpgradeContext {
       case UPGRADE:
         sourceStackId = m_cluster.getCurrentStackVersion();
 
-        RepositoryVersionEntity targetRepositoryVersion = m_repoVersionDAO.findByStackNameAndVersion(
+        m_targetRepositoryVersion = m_repoVersionDAO.findByStackNameAndVersion(
             sourceStackId.getStackName(), m_version);
 
         // !!! TODO check the repo_version for patch-ness and restrict the
         // context to those services that require it. Consult the version
         // definition and add the service names to supportedServices
-        targetStackId = targetRepositoryVersion.getStackId();
+        targetStackId = m_targetRepositoryVersion.getStackId();
         break;
       case DOWNGRADE:
         sourceStackId = m_cluster.getCurrentStackVersion();
         targetStackId = m_cluster.getDesiredStackVersion();
+
+        m_targetRepositoryVersion = m_repoVersionDAO.findByStackNameAndVersion(
+            targetStackId.getStackName(), m_version);
         break;
     }
 
@@ -436,11 +448,13 @@ public class UpgradeContext {
   }
 
   /**
-   * @param targetStackId
-   *          the targetStackId to set
+   * Gets the target repository version for this upgrade.
+   *
+   * @return the target repository version for this upgrade (never
+   *         {@code null}).
    */
-  public void setTargetStackId(StackId targetStackId) {
-    m_targetStackId = targetStackId;
+  public RepositoryVersionEntity getTargetRepositoryVersion() {
+    return m_targetRepositoryVersion;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index 92e01c2..bb84fb7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -21,6 +21,7 @@ import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
@@ -28,6 +29,8 @@ import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.internal.TaskResourceProvider;
@@ -722,20 +725,34 @@ public class UpgradeHelper {
    * the upgrade state individually, we wrap this method inside of a transaction
    * to prevent 1000's of transactions from being opened and committed.
    *
-   * @param version
-   *          desired version (like 2.2.1.0-1234) for upgrade
-   * @param targetServices
-   *          targets for upgrade
-   * @param targetStack
-   *          the target stack for the components.  Express and Rolling upgrades determine
-   *          the "correct" stack differently, so the component's desired stack id is not
-   *          a reliable indicator.
+   * @param upgradeContext
+   *          the upgrade context (not {@code null}).
    */
   @Transactional
-  public void putComponentsToUpgradingState(String version,
-      Map<Service, Set<ServiceComponent>> targetServices, StackId targetStack) throws AmbariException {
+  @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES)
+  public void putComponentsToUpgradingState(UpgradeContext upgradeContext) throws AmbariException {
+
+    // determine which services/components will participate in the upgrade
+    Cluster cluster = upgradeContext.getCluster();
+    Set<Service> services = new HashSet<>(cluster.getServices().values());
+    Map<Service, Set<ServiceComponent>> targetServices = new HashMap<>();
+    for (Service service : services) {
+      if (upgradeContext.isServiceSupported(service.getName())) {
+        Set<ServiceComponent> serviceComponents = new HashSet<>(
+            service.getServiceComponents().values());
+
+        targetServices.put(service, serviceComponents);
+      }
+    }
+
+    RepositoryVersionEntity targetRepositoryVersion = upgradeContext.getTargetRepositoryVersion();
+    StackId targetStack = targetRepositoryVersion.getStackId();
 
     for (Map.Entry<Service, Set<ServiceComponent>> entry: targetServices.entrySet()) {
+      // set service desired repo
+      Service service = entry.getKey();
+      service.setDesiredRepositoryVersion(targetRepositoryVersion);
+
       for (ServiceComponent serviceComponent: entry.getValue()) {
 
         boolean versionAdvertised = false;
@@ -751,25 +768,25 @@ public class UpgradeHelper {
               StackVersionListener.UNKNOWN_VERSION);
         }
 
-        UpgradeState upgradeState = UpgradeState.IN_PROGRESS;
-        String desiredVersion = version;
-
+        UpgradeState upgradeStateToSet = UpgradeState.IN_PROGRESS;
         if (!versionAdvertised) {
-          upgradeState = UpgradeState.NONE;
-          desiredVersion = StackVersionListener.UNKNOWN_VERSION;
+          upgradeStateToSet = UpgradeState.NONE;
         }
 
         for (ServiceComponentHost serviceComponentHost: serviceComponent.getServiceComponentHosts().values()) {
-          serviceComponentHost.setUpgradeState(upgradeState);
+          if (serviceComponentHost.getUpgradeState() != upgradeStateToSet) {
+            serviceComponentHost.setUpgradeState(upgradeStateToSet);
+          }
 
           // !!! if we aren't version advertised, but there IS a version, set it.
-          if (!versionAdvertised &&
-              !serviceComponentHost.getVersion().equals(StackVersionListener.UNKNOWN_VERSION)) {
+          if (!versionAdvertised && StringUtils.equals(StackVersionListener.UNKNOWN_VERSION,
+              serviceComponentHost.getVersion())) {
             serviceComponentHost.setVersion(StackVersionListener.UNKNOWN_VERSION);
           }
         }
-        serviceComponent.setDesiredVersion(desiredVersion);
 
+        // set component desired repo
+        serviceComponent.setDesiredRepositoryVersion(targetRepositoryVersion);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 228cf79..21c275b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -42,6 +42,8 @@ import javax.annotation.Nullable;
 import javax.persistence.EntityManager;
 import javax.persistence.RollbackException;
 
+import org.apache.ambari.annotations.Experimental;
+import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ConfigGroupNotFoundException;
 import org.apache.ambari.server.DuplicateResourceException;
@@ -161,7 +163,6 @@ import com.google.common.collect.Iterables;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Multimap;
-import com.google.common.collect.Sets;
 import com.google.common.eventbus.Subscribe;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
@@ -880,14 +881,20 @@ public class ClusterImpl implements Cluster {
     services.put(service.getName(), service);
   }
 
+  /**
+   * {@inheritDoc}
+   */
   @Override
-  public Service addService(String serviceName) throws AmbariException {
+  public Service addService(String serviceName, RepositoryVersionEntity repositoryVersion) throws AmbariException {
     if (services.containsKey(serviceName)) {
-      throw new AmbariException("Service already exists" + ", clusterName=" + getClusterName()
-          + ", clusterId=" + getClusterId() + ", serviceName=" + serviceName);
+      String message = MessageFormat.format("The {0} service already exists in {1}", serviceName,
+          getClusterName());
+
+      throw new AmbariException(message);
     }
 
-    Service service = serviceFactory.createNew(this, serviceName);
+    @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES)
+    Service service = serviceFactory.createNew(this, serviceName, repositoryVersion);
     addService(service);
 
     return service;
@@ -915,11 +922,6 @@ public class ClusterImpl implements Cluster {
 
   @Override
   public void setDesiredStackVersion(StackId stackId) throws AmbariException {
-    setDesiredStackVersion(stackId, false);
-  }
-
-  @Override
-  public void setDesiredStackVersion(StackId stackId, boolean cascade) throws AmbariException {
     clusterGlobalLock.writeLock().lock();
     try {
       if (LOG.isDebugEnabled()) {
@@ -937,19 +939,6 @@ public class ClusterImpl implements Cluster {
       clusterEntity.setDesiredStack(stackEntity);
       clusterEntity = clusterDAO.merge(clusterEntity);
 
-      if (cascade) {
-        for (Service service : getServices().values()) {
-          service.setDesiredStackVersion(stackId);
-
-          for (ServiceComponent sc : service.getServiceComponents().values()) {
-            sc.setDesiredStackVersion(stackId);
-
-            for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-              sch.setDesiredStackVersion(stackId);
-            }
-          }
-        }
-      }
       loadServiceConfigTypes();
     } finally {
       clusterGlobalLock.writeLock().unlock();
@@ -1014,6 +1003,7 @@ public class ClusterImpl implements Cluster {
    * @return
    */
   @Override
+  @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES)
   public ClusterVersionEntity getCurrentClusterVersion() {
     Collection<ClusterVersionEntity> clusterVersionEntities = getClusterEntity().getClusterVersionEntities();
     for (ClusterVersionEntity clusterVersionEntity : clusterVersionEntities) {
@@ -1022,6 +1012,11 @@ public class ClusterImpl implements Cluster {
         return clusterVersionEntity;
       }
     }
+
+    if( clusterVersionEntities.size() == 1 ) {
+      return clusterVersionEntities.iterator().next();
+    }
+
     return null;
   }
 
@@ -1120,81 +1115,6 @@ public class ClusterImpl implements Cluster {
   }
 
   /**
-   * During the Finalize Action, want to transition all Host Versions from INSTALLED to CURRENT, and the last CURRENT one to INSTALLED.
-   * @param hostNames Collection of host names
-   * @param currentClusterVersion Entity that contains the cluster's current stack (with its name and version)
-   * @param desiredState Desired state must be {@link RepositoryVersionState#CURRENT}
-   * @throws AmbariException
-   */
-  @Override
-  public void mapHostVersions(Set<String> hostNames, ClusterVersionEntity currentClusterVersion, RepositoryVersionState desiredState) throws AmbariException {
-    if (currentClusterVersion == null) {
-      throw new AmbariException("Could not find current stack version of cluster " + getClusterName());
-    }
-
-    final Set<RepositoryVersionState> validStates = Sets.newHashSet(RepositoryVersionState.CURRENT);
-
-    if (!validStates.contains(desiredState)) {
-      throw new AmbariException("The state must be one of [" + StringUtils.join(validStates, ", ") + "]");
-    }
-
-    clusterGlobalLock.writeLock().lock();
-    try {
-      StackEntity repoVersionStackEntity = currentClusterVersion.getRepositoryVersion().getStack();
-      StackId repoVersionStackId = new StackId(repoVersionStackEntity);
-
-      Map<String, HostVersionEntity> existingHostToHostVersionEntity = new HashMap<>();
-      List<HostVersionEntity> existingHostVersionEntities = hostVersionDAO.findByClusterStackAndVersion(
-        getClusterName(), repoVersionStackId,
-        currentClusterVersion.getRepositoryVersion().getVersion());
-
-      if (existingHostVersionEntities != null) {
-        for (HostVersionEntity entity : existingHostVersionEntities) {
-          existingHostToHostVersionEntity.put(entity.getHostName(), entity);
-        }
-      }
-
-      Sets.SetView<String> intersection = Sets.intersection(
-        existingHostToHostVersionEntity.keySet(), hostNames);
-
-      for (String hostname : hostNames) {
-        List<HostVersionEntity> currentHostVersions = hostVersionDAO.findByClusterHostAndState(
-            getClusterName(), hostname, RepositoryVersionState.CURRENT);
-        HostVersionEntity currentHostVersionEntity = (currentHostVersions != null && currentHostVersions.size() == 1) ? currentHostVersions.get(0)
-            : null;
-
-          // Notice that if any hosts already have the desired stack and version, regardless of the state, we try
-          // to be robust and only insert records for the missing hosts.
-          if (!intersection.contains(hostname)) {
-            // According to the business logic, we don't create objects in a CURRENT state.
-            HostEntity hostEntity = hostDAO.findByName(hostname);
-            HostVersionEntity hostVersionEntity = new HostVersionEntity(hostEntity, currentClusterVersion.getRepositoryVersion(), desiredState);
-            hostVersionDAO.create(hostVersionEntity);
-          } else {
-            HostVersionEntity hostVersionEntity = existingHostToHostVersionEntity.get(hostname);
-            if (hostVersionEntity.getState() != desiredState) {
-              hostVersionEntity.setState(desiredState);
-            hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
-            }
-
-          // Maintain the invariant that only one HostVersionEntity is allowed
-          // to have a state of CURRENT.
-          if (currentHostVersionEntity != null
-              && !currentHostVersionEntity.getRepositoryVersion().equals(
-                  hostVersionEntity.getRepositoryVersion())
-              && desiredState == RepositoryVersionState.CURRENT
-              && currentHostVersionEntity.getState() == RepositoryVersionState.CURRENT) {
-            currentHostVersionEntity.setState(RepositoryVersionState.INSTALLED);
-            hostVersionDAO.merge(currentHostVersionEntity);
-          }
-        }
-      }
-    } finally {
-      clusterGlobalLock.writeLock().unlock();
-    }
-  }
-
-  /**
    * {@inheritDoc}
    */
   @Override


[6/9] ambari git commit: AMBARI-20894 - Setting Desired Stack Ids to Correct Values During Service and Patch Upgrades (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index 909bf69..83e7d56 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -75,6 +75,8 @@ import org.apache.ambari.server.audit.AuditLogger;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.serveraction.kerberos.KerberosIdentityDataFileWriter;
 import org.apache.ambari.server.serveraction.kerberos.KerberosIdentityDataFileWriterFactory;
 import org.apache.ambari.server.serveraction.kerberos.KerberosServerAction;
@@ -173,7 +175,7 @@ public class TestHeartbeatHandler {
     replay(am);
 
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.addServiceComponent(NAMENODE);
     hdfs.addServiceComponent(SECONDARY_NAMENODE);
@@ -231,7 +233,7 @@ public class TestHeartbeatHandler {
   @SuppressWarnings("unchecked")
   public void testStatusHeartbeatWithAnnotation() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.addServiceComponent(NAMENODE);
     hdfs.addServiceComponent(SECONDARY_NAMENODE);
@@ -282,7 +284,7 @@ public class TestHeartbeatHandler {
   @SuppressWarnings("unchecked")
   public void testLiveStatusUpdateAfterStopFailed() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).
         addServiceComponentHost(DummyHostname1);
@@ -385,7 +387,7 @@ public class TestHeartbeatHandler {
   @Test
   public void testRegistrationRecoveryConfig() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
     hdfs.getServiceComponent(DATANODE);
@@ -458,7 +460,7 @@ public class TestHeartbeatHandler {
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
             injector);
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
 
     /**
      * Add three service components enabled for auto start.
@@ -792,7 +794,7 @@ public class TestHeartbeatHandler {
   @SuppressWarnings("unchecked")
   public void testTaskInProgressHandling() throws Exception, InvalidStateTransitionException {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.addServiceComponent(NAMENODE);
@@ -849,7 +851,7 @@ public class TestHeartbeatHandler {
   @SuppressWarnings("unchecked")
   public void testOPFailedEventForAbortedTask() throws Exception, InvalidStateTransitionException {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.addServiceComponent(NAMENODE);
@@ -921,9 +923,9 @@ public class TestHeartbeatHandler {
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testStatusHeartbeatWithVersion() throws Exception {
+  public void testStatusHeartbeat() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.addServiceComponent(NAMENODE);
@@ -938,15 +940,9 @@ public class TestHeartbeatHandler {
     ServiceComponentHost serviceComponentHost3 = clusters.getCluster(DummyCluster).getService(HDFS).
         getServiceComponent(HDFS_CLIENT).getServiceComponentHost(DummyHostname1);
 
-    StackId stack130 = new StackId("HDP-1.3.0");
-    StackId stack120 = new StackId("HDP-1.2.0");
-
     serviceComponentHost1.setState(State.INSTALLED);
     serviceComponentHost2.setState(State.STARTED);
     serviceComponentHost3.setState(State.STARTED);
-    serviceComponentHost1.setStackVersion(stack130);
-    serviceComponentHost2.setStackVersion(stack120);
-    serviceComponentHost3.setStackVersion(stack120);
 
     HeartBeat hb = new HeartBeat();
     hb.setTimestamp(System.currentTimeMillis());
@@ -980,12 +976,6 @@ public class TestHeartbeatHandler {
     handler.handleHeartBeat(hb);
     heartbeatProcessor.processHeartbeat(hb);
 
-    assertEquals("Matching value " + serviceComponentHost1.getStackVersion(),
-        stack130, serviceComponentHost1.getStackVersion());
-    assertEquals("Matching value " + serviceComponentHost2.getStackVersion(),
-        stack120, serviceComponentHost2.getStackVersion());
-    assertEquals("Matching value " + serviceComponentHost3.getStackVersion(),
-        stack130, serviceComponentHost3.getStackVersion());
     assertTrue(hb.getAgentEnv().getHostHealth().getServerTimeStampAtReporting() >= hb.getTimestamp());
   }
 
@@ -998,7 +988,7 @@ public class TestHeartbeatHandler {
 
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Host hostObject = clusters.getHost(DummyHostname1);
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.addServiceComponent(NAMENODE);
@@ -1079,7 +1069,7 @@ public class TestHeartbeatHandler {
 
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
     Host hostObject = clusters.getHost(DummyHostname1);
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.addServiceComponent(NAMENODE);
@@ -1403,7 +1393,7 @@ public class TestHeartbeatHandler {
   @SuppressWarnings("unchecked")
   public void testCommandStatusProcesses_empty() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
@@ -1590,4 +1580,21 @@ public class TestHeartbeatHandler {
     return dataDirectory;
   }
 
+  /**
+   * Adds the service to the cluster using the current cluster version as the
+   * repository version for the service.
+   *
+   * @param cluster
+   *          the cluster.
+   * @param serviceName
+   *          the service name.
+   * @return the newly added service.
+   * @throws AmbariException
+   */
+  private Service addService(Cluster cluster, String serviceName) throws AmbariException {
+    ClusterVersionEntity clusterVersion = cluster.getCurrentClusterVersion();
+    RepositoryVersionEntity repositoryVersion = clusterVersion.getRepositoryVersion();
+    return cluster.addService(serviceName, repositoryVersion);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
index caf64be..4c536a9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatMonitor.java
@@ -41,6 +41,7 @@ import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
@@ -74,7 +75,6 @@ public class TestHeartbeatMonitor {
 
   private String hostname1 = "host1";
   private String hostname2 = "host2";
-  private String hostname3 = "host3";
   private String clusterName = "cluster1";
   private String serviceName = "HDFS";
   private int heartbeatMonitorWakeupIntervalMS = 30;
@@ -151,7 +151,10 @@ public class TestHeartbeatMonitor {
     setOsFamily(clusters.getHost(hostname2), "redhat", "6.3");
     clusters.addCluster(clusterName, stackId);
     Cluster cluster = clusters.getCluster(clusterName);
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
+
     cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
         RepositoryVersionState.INSTALLING);
     Set<String> hostNames = new HashSet<String>(){{
@@ -166,7 +169,7 @@ public class TestHeartbeatMonitor {
 
 
     clusters.mapAndPublishHostsToCluster(hostNames, clusterName);
-    Service hdfs = cluster.addService(serviceName);
+    Service hdfs = cluster.addService(serviceName, repositoryVersion);
     hdfs.addServiceComponent(Role.DATANODE.name());
     hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);
     hdfs.addServiceComponent(Role.NAMENODE.name());
@@ -233,7 +236,8 @@ public class TestHeartbeatMonitor {
     clusters.addCluster(clusterName, stackId);
     Cluster cluster = clusters.getCluster(clusterName);
 
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
     cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
         RepositoryVersionState.INSTALLING);
     Set<String> hostNames = new HashSet<String>() {{
@@ -255,7 +259,7 @@ public class TestHeartbeatMonitor {
 
 
     clusters.mapAndPublishHostsToCluster(hostNames, clusterName);
-    Service hdfs = cluster.addService(serviceName);
+    Service hdfs = cluster.addService(serviceName, repositoryVersion);
     hdfs.addServiceComponent(Role.DATANODE.name());
     hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost
     (hostname1);
@@ -353,7 +357,8 @@ public class TestHeartbeatMonitor {
     clusters.addCluster(clusterName, stackId);
     Cluster cluster = clusters.getCluster(clusterName);
 
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
     cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
         RepositoryVersionState.INSTALLING);
 
@@ -363,7 +368,7 @@ public class TestHeartbeatMonitor {
 
     clusters.mapAndPublishHostsToCluster(hostNames, clusterName);
 
-    Service hdfs = cluster.addService(serviceName);
+    Service hdfs = cluster.addService(serviceName, repositoryVersion);
     hdfs.addServiceComponent(Role.DATANODE.name());
     hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);
     hdfs.addServiceComponent(Role.NAMENODE.name());
@@ -435,7 +440,8 @@ public class TestHeartbeatMonitor {
     clusters.addCluster(clusterName, stackId);
     Cluster cluster = clusters.getCluster(clusterName);
 
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
     cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
             RepositoryVersionState.INSTALLING);
 
@@ -445,7 +451,7 @@ public class TestHeartbeatMonitor {
 
     clusters.mapAndPublishHostsToCluster(hostNames, clusterName);
 
-    Service hdfs = cluster.addService(serviceName);
+    Service hdfs = cluster.addService(serviceName, repositoryVersion);
     hdfs.addServiceComponent(Role.DATANODE.name());
     hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);
     hdfs.addServiceComponent(Role.NAMENODE.name());
@@ -553,7 +559,8 @@ public class TestHeartbeatMonitor {
     Cluster cluster = clusters.getCluster(clusterName);
 
     cluster.setDesiredStackVersion(stackId);
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
     cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
         RepositoryVersionState.INSTALLING);
 
@@ -564,7 +571,7 @@ public class TestHeartbeatMonitor {
 
     clusters.mapAndPublishHostsToCluster(hostNames, clusterName);
 
-    Service hdfs = cluster.addService(serviceName);
+    Service hdfs = cluster.addService(serviceName, repositoryVersion);
 
     hdfs.addServiceComponent(Role.DATANODE.name());
     hdfs.getServiceComponent(Role.DATANODE.name()).addServiceComponentHost(hostname1);

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnableTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnableTest.java b/ambari-server/src/test/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnableTest.java
index 98f6f44..7ba5bc0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnableTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnableTest.java
@@ -38,7 +38,6 @@ import org.apache.ambari.server.events.publishers.AlertEventPublisher;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.stack.StackManagerFactory;
@@ -48,6 +47,8 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.stack.OsFamily;
@@ -140,6 +141,17 @@ public class ComponentVersionAlertRunnableTest extends EasyMockSupport {
     expect(m_desidredStackId.getStackName()).andReturn("SOME-STACK").atLeastOnce();
     expect(m_desidredStackId.getStackVersion()).andReturn("STACK-VERSION").atLeastOnce();
 
+    RepositoryVersionEntity repositoryVersionEntity = createNiceMock(RepositoryVersionEntity.class);
+    expect(repositoryVersionEntity.getVersion()).andReturn(EXPECTED_VERSION).anyTimes();
+
+    // services
+    Service service = createNiceMock(Service.class);
+    expect(service.getDesiredRepositoryVersion()).andReturn(repositoryVersionEntity).atLeastOnce();
+
+    ServiceComponent serviceComponent = createNiceMock(ServiceComponent.class);
+    expect(serviceComponent.getDesiredStackVersion()).andReturn(m_desidredStackId).atLeastOnce();
+    expect(service.getServiceComponent(EasyMock.anyString())).andReturn(serviceComponent).atLeastOnce();
+
     // components
     ServiceComponentHost sch1_1 = createNiceMock(ServiceComponentHost.class);
     ServiceComponentHost sch1_2 = createNiceMock(ServiceComponentHost.class);
@@ -149,19 +161,15 @@ public class ComponentVersionAlertRunnableTest extends EasyMockSupport {
     expect(sch1_1.getServiceName()).andReturn("FOO").atLeastOnce();
     expect(sch1_1.getServiceComponentName()).andReturn("FOO_COMPONENT").atLeastOnce();
     expect(sch1_1.getVersion()).andReturn(EXPECTED_VERSION).atLeastOnce();
-    expect(sch1_1.getDesiredStackVersion()).andReturn(m_desidredStackId).atLeastOnce();
     expect(sch1_2.getServiceName()).andReturn("BAR").atLeastOnce();
     expect(sch1_2.getServiceComponentName()).andReturn("BAR_COMPONENT").atLeastOnce();
     expect(sch1_2.getVersion()).andReturn(EXPECTED_VERSION).atLeastOnce();
-    expect(sch1_2.getDesiredStackVersion()).andReturn(m_desidredStackId).atLeastOnce();
     expect(sch2_1.getServiceName()).andReturn("FOO").atLeastOnce();
     expect(sch2_1.getServiceComponentName()).andReturn("FOO_COMPONENT").atLeastOnce();
     expect(sch2_1.getVersion()).andReturn(EXPECTED_VERSION).atLeastOnce();
-    expect(sch2_1.getDesiredStackVersion()).andReturn(m_desidredStackId).atLeastOnce();
     expect(sch2_2.getServiceName()).andReturn("BAZ").atLeastOnce();
     expect(sch2_2.getServiceComponentName()).andReturn("BAZ_COMPONENT").atLeastOnce();
     expect(sch2_2.getVersion()).andReturn(EXPECTED_VERSION).atLeastOnce();
-    expect(sch2_2.getDesiredStackVersion()).andReturn(m_desidredStackId).atLeastOnce();
 
     m_hostComponentMap.get(HOSTNAME_1).add(sch1_1);
     m_hostComponentMap.get(HOSTNAME_1).add(sch1_2);
@@ -179,14 +187,7 @@ public class ComponentVersionAlertRunnableTest extends EasyMockSupport {
     expect(m_cluster.getClusterId()).andReturn(CLUSTER_ID).atLeastOnce();
     expect(m_cluster.getClusterName()).andReturn(CLUSTER_NAME).atLeastOnce();
     expect(m_cluster.getHosts()).andReturn(m_hosts).atLeastOnce();
-
-    ClusterVersionEntity clusterVersionEntity = createNiceMock(ClusterVersionEntity.class);
-    RepositoryVersionEntity repositoryVersionEntity = createNiceMock(RepositoryVersionEntity.class);
-    expect(clusterVersionEntity.getRepositoryVersion()).andReturn(
-        repositoryVersionEntity).anyTimes();
-
-    expect(repositoryVersionEntity.getVersion()).andReturn(EXPECTED_VERSION).anyTimes();
-    expect(m_cluster.getCurrentClusterVersion()).andReturn(clusterVersionEntity).anyTimes();
+    expect(m_cluster.getService(EasyMock.anyString())).andReturn(service).atLeastOnce();
 
     // mock clusters
     expect(m_clusters.getClusters()).andReturn(clusterMap).atLeastOnce();
@@ -304,7 +305,6 @@ public class ComponentVersionAlertRunnableTest extends EasyMockSupport {
     expect(sch.getServiceName()).andReturn("FOO").atLeastOnce();
     expect(sch.getServiceComponentName()).andReturn("FOO_COMPONENT").atLeastOnce();
     expect(sch.getVersion()).andReturn(WRONG_VERSION).atLeastOnce();
-    expect(sch.getDesiredStackVersion()).andReturn(m_desidredStackId).atLeastOnce();
 
     replayAll();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/api/resources/BaseResourceDefinitionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/resources/BaseResourceDefinitionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/resources/BaseResourceDefinitionTest.java
index 8e83f56..8bbd49a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/resources/BaseResourceDefinitionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/resources/BaseResourceDefinitionTest.java
@@ -52,6 +52,7 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.view.ViewRegistry;
 import org.easymock.EasyMock;
@@ -88,25 +89,28 @@ public class BaseResourceDefinitionTest {
     TreeNode<Resource> serviceNode = new TreeNodeImpl<>(parentNode, service, "service1");
 
     parentNode.setProperty("isCollection", "true");
-    
+
     ResourceProviderFactory factory = createMock(ResourceProviderFactory.class);
     MaintenanceStateHelper maintenanceStateHelper = createNiceMock(MaintenanceStateHelper.class);
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
+    RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+
     expect(maintenanceStateHelper.isOperationAllowed(anyObject(Resource.Type.class),
             anyObject(Service.class))).andReturn(true).anyTimes();
-    ResourceProvider serviceResourceProvider = new ServiceResourceProvider(PropertyHelper
-        .getPropertyIds(Resource.Type.Service),
-        PropertyHelper.getKeyPropertyIds(Resource.Type.Service),
-        managementController, maintenanceStateHelper);
-    
+
+    ResourceProvider serviceResourceProvider = new ServiceResourceProvider(
+        PropertyHelper.getPropertyIds(Resource.Type.Service),
+        PropertyHelper.getKeyPropertyIds(Resource.Type.Service), managementController,
+        maintenanceStateHelper, repositoryVersionDAO);
+
     expect(factory.getServiceResourceProvider(EasyMock.<Set<String>>anyObject(),
         EasyMock.<Map<Resource.Type, String>>anyObject(),
         anyObject(AmbariManagementController.class))).andReturn(serviceResourceProvider);
-    
+
     AbstractControllerResourceProvider.init(factory);
-    
+
     replay(factory, managementController, maintenanceStateHelper);
-    
+
     processor.process(null, serviceNode, "http://c6401.ambari.apache.org:8080/api/v1/clusters/c1/services");
 
     String href = serviceNode.getStringProperty("href");

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index c9acfe9..e0d399d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -60,6 +60,7 @@ import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.orm.entities.MetainfoEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.stack.StackManager;
 import org.apache.ambari.server.stack.StackManagerFactory;
 import org.apache.ambari.server.state.AutoDeployInfo;
@@ -1911,6 +1912,9 @@ public class AmbariMetaInfoTest {
    */
   @Test
   public void testAlertDefinitionMerging() throws Exception {
+    final String stackVersion = "2.0.6";
+    final String repoVersion = "2.0.6-1234";
+
     Injector injector = Guice.createInjector(Modules.override(
         new InMemoryDefaultTestModule()).with(new MockModule()));
 
@@ -1918,8 +1922,9 @@ public class AmbariMetaInfoTest {
 
     injector.getInstance(GuiceJpaInitializer.class);
     injector.getInstance(EntityManager.class);
-    long clusterId = injector.getInstance(OrmTestHelper.class).createCluster(
-        "cluster" + System.currentTimeMillis());
+
+    OrmTestHelper ormHelper = injector.getInstance(OrmTestHelper.class);
+    long clusterId = ormHelper.createCluster("cluster" + System.currentTimeMillis());
 
     Class<?> c = metaInfo.getClass().getSuperclass();
 
@@ -1934,9 +1939,12 @@ public class AmbariMetaInfoTest {
     Clusters clusters = injector.getInstance(Clusters.class);
     Cluster cluster = clusters.getClusterById(clusterId);
     cluster.setDesiredStackVersion(
-        new StackId(STACK_NAME_HDP, "2.0.6"));
+        new StackId(STACK_NAME_HDP, stackVersion));
+
+    RepositoryVersionEntity repositoryVersion = ormHelper.getOrCreateRepositoryVersion(
+        cluster.getCurrentStackVersion(), repoVersion);
 
-    cluster.addService("HDFS");
+    cluster.addService("HDFS", repositoryVersion);
 
     metaInfo.reconcileAlertDefinitions(clusters);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
index 3a93fbf..00fc962 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
@@ -36,19 +36,22 @@ import org.apache.ambari.server.H2DatabaseCleaner;
 import org.apache.ambari.server.agent.HeartbeatTestHelper;
 import org.apache.ambari.server.agent.RecoveryConfig;
 import org.apache.ambari.server.agent.RecoveryConfigHelper;
-import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.Sets;
 import com.google.common.eventbus.EventBus;
 import com.google.inject.Guice;
 import com.google.inject.Inject;
@@ -69,7 +72,16 @@ public class RecoveryConfigHelperTest {
   private RecoveryConfigHelper recoveryConfigHelper;
 
   @Inject
-  private AmbariEventPublisher eventPublisher;
+  private RepositoryVersionDAO repositoryVersionDAO;
+
+  /**
+   * The repository created when creating the test cluster.
+   */
+  private RepositoryVersionEntity repositoryVersion;
+
+  private final String STACK_VERSION = "0.1";
+  private final String REPO_VERSION = "0.1-1234";
+  private final StackId stackId = new StackId("HDP", STACK_VERSION);
 
   @Before
   public void setup() throws Exception {
@@ -114,7 +126,7 @@ public class RecoveryConfigHelperTest {
   public void testRecoveryConfigValues()
       throws Exception {
     String hostname = "hostname1";
-    Cluster cluster = getDummyCluster(hostname);
+    Cluster cluster = getDummyCluster(Sets.newHashSet(hostname));
     RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), hostname);
     assertEquals(recoveryConfig.getMaxLifetimeCount(), "10");
     assertEquals(recoveryConfig.getMaxCount(), "4");
@@ -134,7 +146,8 @@ public class RecoveryConfigHelperTest {
   public void testServiceComponentInstalled()
       throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    repositoryVersion = repositoryVersionDAO.findByStackAndVersion(stackId, REPO_VERSION);
+    Service hdfs = cluster.addService(HDFS, repositoryVersion);
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
@@ -167,7 +180,8 @@ public class RecoveryConfigHelperTest {
   public void testServiceComponentUninstalled()
       throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    repositoryVersion = repositoryVersionDAO.findByStackAndVersion(stackId, REPO_VERSION);
+    Service hdfs = cluster.addService(HDFS, repositoryVersion);
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
@@ -202,7 +216,8 @@ public class RecoveryConfigHelperTest {
   public void testClusterEnvConfigChanged()
       throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    repositoryVersion = repositoryVersionDAO.findByStackAndVersion(stackId, REPO_VERSION);
+    Service hdfs = cluster.addService(HDFS, repositoryVersion);
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
@@ -241,7 +256,8 @@ public class RecoveryConfigHelperTest {
   public void testMaintenanceModeChanged()
       throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    repositoryVersion = repositoryVersionDAO.findByStackAndVersion(stackId, REPO_VERSION);
+    Service hdfs = cluster.addService(HDFS, repositoryVersion);
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
@@ -275,7 +291,8 @@ public class RecoveryConfigHelperTest {
   public void testServiceComponentRecoveryChanged()
       throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    repositoryVersion = repositoryVersionDAO.findByStackAndVersion(stackId, REPO_VERSION);
+    Service hdfs = cluster.addService(HDFS, repositoryVersion);
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
@@ -316,7 +333,7 @@ public class RecoveryConfigHelperTest {
     Cluster cluster = getDummyCluster(hostNames);
 
     // Add HDFS service with DATANODE component to the cluster
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = cluster.addService(HDFS, repositoryVersion);
 
     hdfs.addServiceComponent(DATANODE).setRecoveryEnabled(true);
 
@@ -349,16 +366,11 @@ public class RecoveryConfigHelperTest {
       put(RecoveryConfigHelper.RECOVERY_RETRY_GAP_KEY, "2");
     }};
 
-    return heartbeatTestHelper.getDummyCluster("cluster1", "HDP-0.1", configProperties, hostNames);
-  }
-
-  private Cluster getDummyCluster(final String hostname)
-      throws Exception {
-
-    Set<String> hostNames = new HashSet<String>(){{
-      add(hostname);
-    }};
+    Cluster cluster = heartbeatTestHelper.getDummyCluster("cluster1", stackId, REPO_VERSION,
+        configProperties, hostNames);
 
-    return getDummyCluster(hostNames);
+    repositoryVersion = repositoryVersionDAO.findByStackAndVersion(stackId, REPO_VERSION);
+    assertNotNull(repositoryVersion);
+    return cluster;
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
index 71a02f5..b0d085b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
@@ -47,6 +47,9 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.security.TestAuthenticationFactory;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.state.Cluster;
@@ -131,7 +134,7 @@ public class AmbariCustomCommandExecutionHelperTest {
     StageUtils.setConfiguration(injector.getInstance(Configuration.class));
 
     SecurityContextHolder.getContext().setAuthentication(TestAuthenticationFactory.createAdministrator());
-    createClusterFixture("c1", "HDP-2.0.6", "c1");
+    createClusterFixture("c1", new StackId("HDP-2.0.6"), "2.0.6-1234", "c1");
 
     EasyMock.expect(hostRoleCommand.getTaskId()).andReturn(1L);
     EasyMock.expect(hostRoleCommand.getStageId()).andReturn(1L);
@@ -515,7 +518,7 @@ public class AmbariCustomCommandExecutionHelperTest {
   public void testIsTopologyRefreshRequired() throws Exception {
     AmbariCustomCommandExecutionHelper helper = injector.getInstance(AmbariCustomCommandExecutionHelper.class);
 
-    createClusterFixture("c2", "HDP-2.1.1", "c2");
+    createClusterFixture("c2", new StackId("HDP-2.1.1"), "2.1.1.0-1234", "c2");
 
     Assert.assertTrue(helper.isTopologyRefreshRequired("START", "c2", "HDFS"));
     Assert.assertTrue(helper.isTopologyRefreshRequired("RESTART", "c2", "HDFS"));
@@ -552,20 +555,27 @@ public class AmbariCustomCommandExecutionHelperTest {
     }
   }
 
-  private void createClusterFixture(String clusterName, String stackVersion, String hostPrefix) throws AmbariException, AuthorizationException {
+  private void createClusterFixture(String clusterName, StackId stackId,
+      String respositoryVersion, String hostPrefix) throws AmbariException, AuthorizationException {
     String hostC6401 = hostPrefix + "-c6401";
     String hostC6402 = hostPrefix + "-c6402";
 
-    createCluster(clusterName, stackVersion);
+    OrmTestHelper ormTestHelper = injector.getInstance(OrmTestHelper.class);
+    RepositoryVersionEntity repositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(stackId,
+        respositoryVersion);
+
+    createCluster(clusterName, stackId.getStackId());
 
     addHost(hostC6401, clusterName);
     addHost(hostC6402, clusterName);
 
-    clusters.getCluster(clusterName);
-    createService(clusterName, "YARN", null);
-    createService(clusterName, "GANGLIA", null);
-    createService(clusterName, "ZOOKEEPER", null);
-    createService(clusterName, "FLUME", null);
+    Cluster cluster = clusters.getCluster(clusterName);
+    Assert.assertNotNull(cluster);
+
+    createService(clusterName, "YARN", repositoryVersion);
+    createService(clusterName, "GANGLIA", repositoryVersion);
+    createService(clusterName, "ZOOKEEPER", repositoryVersion);
+    createService(clusterName, "FLUME", repositoryVersion);
 
     createServiceComponent(clusterName, "YARN", "RESOURCEMANAGER", State.INIT);
     createServiceComponent(clusterName, "YARN", "NODEMANAGER", State.INIT);
@@ -576,7 +586,6 @@ public class AmbariCustomCommandExecutionHelperTest {
     // this component should be not installed on any host
     createServiceComponent(clusterName, "FLUME", "FLUME_HANDLER", State.INIT);
 
-
     createServiceComponentHost(clusterName, "YARN", "RESOURCEMANAGER", hostC6401, null);
     createServiceComponentHost(clusterName, "YARN", "NODEMANAGER", hostC6401, null);
     createServiceComponentHost(clusterName, "GANGLIA", "GANGLIA_SERVER", hostC6401, State.INIT);
@@ -609,17 +618,17 @@ public class AmbariCustomCommandExecutionHelperTest {
     ambariManagementController.createCluster(r);
   }
 
-  private void createService(String clusterName,
-      String serviceName, State desiredState) throws AmbariException, AuthorizationException {
-    String dStateStr = null;
-    if (desiredState != null) {
-      dStateStr = desiredState.toString();
-    }
-    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName, dStateStr);
+  private void createService(String clusterName, String serviceName,
+      RepositoryVersionEntity repositoryVersion) throws AmbariException, AuthorizationException {
+
+    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName,
+        repositoryVersion.getStackId().getStackId(), repositoryVersion.getVersion(), null, "false");
+
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r1);
 
-    ServiceResourceProviderTest.createServices(ambariManagementController, requests);
+    ServiceResourceProviderTest.createServices(ambariManagementController,
+        injector.getInstance(RepositoryVersionDAO.class), requests);
   }
 
   private void createServiceComponent(String clusterName,


[3/9] ambari git commit: AMBARI-20894 - Setting Desired Stack Ids to Correct Values During Service and Patch Upgrades (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index e587f28..a814ba8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -95,6 +95,8 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.UpgradeContextFactory;
 import org.apache.ambari.server.state.UpgradeHelper;
 import org.apache.ambari.server.state.UpgradeState;
 import org.apache.ambari.server.state.stack.UpgradePack;
@@ -140,6 +142,11 @@ public class UpgradeResourceProviderTest {
   private TopologyManager topologyManager;
   private ConfigFactory configFactory;
   private HostRoleCommandDAO hrcDAO;
+  private UpgradeContextFactory upgradeContextFactory;
+
+  RepositoryVersionEntity repoVersionEntity2110;
+  RepositoryVersionEntity repoVersionEntity2111;
+  RepositoryVersionEntity repoVersionEntity2200;
 
   @Before
   public void before() throws Exception {
@@ -176,6 +183,7 @@ public class UpgradeResourceProviderTest {
     amc = injector.getInstance(AmbariManagementController.class);
     ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
     configFactory = injector.getInstance(ConfigFactory.class);
+    upgradeContextFactory = injector.getInstance(UpgradeContextFactory.class);
 
     Field field = AmbariServer.class.getDeclaredField("clusterController");
     field.setAccessible(true);
@@ -199,26 +207,26 @@ public class UpgradeResourceProviderTest {
     StackId stack211 = new StackId("HDP-2.1.1");
     StackId stack220 = new StackId("HDP-2.2.0");
 
-    RepositoryVersionEntity repoVersionEntity = new RepositoryVersionEntity();
-    repoVersionEntity.setDisplayName("My New Version 1");
-    repoVersionEntity.setOperatingSystems("");
-    repoVersionEntity.setStack(stackEntity211);
-    repoVersionEntity.setVersion("2.1.1.0");
-    repoVersionDao.create(repoVersionEntity);
-
-    repoVersionEntity = new RepositoryVersionEntity();
-    repoVersionEntity.setDisplayName("My New Version 2 for patch upgrade");
-    repoVersionEntity.setOperatingSystems("");
-    repoVersionEntity.setStack(stackEntity211);
-    repoVersionEntity.setVersion("2.1.1.1");
-    repoVersionDao.create(repoVersionEntity);
-
-    repoVersionEntity = new RepositoryVersionEntity();
-    repoVersionEntity.setDisplayName("My New Version 3 for major upgrade");
-    repoVersionEntity.setOperatingSystems("");
-    repoVersionEntity.setStack(stackEntity220);
-    repoVersionEntity.setVersion("2.2.0.0");
-    repoVersionDao.create(repoVersionEntity);
+    repoVersionEntity2110 = new RepositoryVersionEntity();
+    repoVersionEntity2110.setDisplayName("My New Version 1");
+    repoVersionEntity2110.setOperatingSystems("");
+    repoVersionEntity2110.setStack(stackEntity211);
+    repoVersionEntity2110.setVersion("2.1.1.0");
+    repoVersionDao.create(repoVersionEntity2110);
+
+    repoVersionEntity2111 = new RepositoryVersionEntity();
+    repoVersionEntity2111.setDisplayName("My New Version 2 for patch upgrade");
+    repoVersionEntity2111.setOperatingSystems("");
+    repoVersionEntity2111.setStack(stackEntity211);
+    repoVersionEntity2111.setVersion("2.1.1.1");
+    repoVersionDao.create(repoVersionEntity2111);
+
+    repoVersionEntity2200 = new RepositoryVersionEntity();
+    repoVersionEntity2200.setDisplayName("My New Version 3 for major upgrade");
+    repoVersionEntity2200.setOperatingSystems("");
+    repoVersionEntity2200.setStack(stackEntity220);
+    repoVersionEntity2200.setVersion("2.2.0.0");
+    repoVersionDao.create(repoVersionEntity2200);
 
     clusters = injector.getInstance(Clusters.class);
 
@@ -242,8 +250,7 @@ public class UpgradeResourceProviderTest {
     clusters.mapHostToCluster("h1", "c1");
 
     // add a single ZK server
-    Service service = cluster.addService("ZOOKEEPER");
-    service.setDesiredStackVersion(cluster.getDesiredStackVersion());
+    Service service = cluster.addService("ZOOKEEPER", repoVersionEntity2110);
 
     ServiceComponent component = service.addServiceComponent("ZOOKEEPER_SERVER");
     ServiceComponentHost sch = component.addServiceComponentHost("h1");
@@ -745,8 +752,7 @@ public class UpgradeResourceProviderTest {
     Cluster cluster = clusters.getCluster("c1");
 
     // add additional service for the test
-    Service service = cluster.addService("HIVE");
-    service.setDesiredStackVersion(cluster.getDesiredStackVersion());
+    Service service = cluster.addService("HIVE", repoVersionEntity2110);
 
     ServiceComponent component = service.addServiceComponent("HIVE_SERVER");
     ServiceComponentHost sch = component.addServiceComponentHost("h1");
@@ -796,7 +802,7 @@ public class UpgradeResourceProviderTest {
 
     // create downgrade with one upgraded service
     StackId stackId = new StackId("HDP", "2.2.0");
-    cluster.setDesiredStackVersion(stackId, true);
+    service.setDesiredRepositoryVersion(repoVersionEntity2200);
 
     requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
     requestProps.put(UpgradeResourceProvider.UPGRADE_VERSION, "2.1.1.0");
@@ -1057,7 +1063,7 @@ public class UpgradeResourceProviderTest {
         assertEquals(oldStack, sc.getDesiredStackVersion());
 
         for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-          assertEquals(oldStack, sch.getDesiredStackVersion());
+          assertEquals(oldStack.getStackVersion(), sch.getVersion());
         }
       }
     }
@@ -1103,7 +1109,7 @@ public class UpgradeResourceProviderTest {
         assertEquals(newStack, sc.getDesiredStackVersion());
 
         for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-          assertEquals(newStack, sch.getDesiredStackVersion());
+          assertEquals(newStack.getStackVersion(), sch.getVersion());
         }
       }
     }
@@ -1200,7 +1206,11 @@ public class UpgradeResourceProviderTest {
 
     Map<String, UpgradePack> upgradePacks = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1");
     UpgradePack upgrade = upgradePacks.get("upgrade_to_new_stack");
-    upgradeResourceProvider.applyStackAndProcessConfigurations(stack211.getStackName(), cluster, "2.2.0.0", Direction.UPGRADE, upgrade, "admin");
+
+    UpgradeContext upgradeContext = upgradeContextFactory.create(cluster, upgrade.getType(),
+        Direction.UPGRADE, "2.2.0.0", new HashMap<String, Object>());
+
+    upgradeResourceProvider.applyStackAndProcessConfigurations(upgradeContext);
 
     Map<String, Map<String, String>> expectedConfigurations = expectedConfigurationsCapture.getValue();
     Map<String, String> expectedFooType = expectedConfigurations.get("foo-site");
@@ -1511,8 +1521,7 @@ public class UpgradeResourceProviderTest {
   @Test
   public void testCreateUpgradeDowngradeCycleAdvertisingVersion() throws Exception {
     Cluster cluster = clusters.getCluster("c1");
-    Service service = cluster.addService("STORM");
-    service.setDesiredStackVersion(cluster.getDesiredStackVersion());
+    Service service = cluster.addService("STORM", repoVersionEntity2110);
 
     ServiceComponent component = service.addServiceComponent("DRPC_SERVER");
     ServiceComponentHost sch = component.addServiceComponentHost("h1");

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
index bee8983..094706e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeSummaryResourceProviderTest.java
@@ -178,8 +178,7 @@ public class UpgradeSummaryResourceProviderTest {
     clusters.mapHostToCluster("h1", "c1");
 
     // add a single ZOOKEEPER server
-    Service service = cluster.addService("ZOOKEEPER");
-    service.setDesiredStackVersion(cluster.getDesiredStackVersion());
+    Service service = cluster.addService("ZOOKEEPER", repoVersionEntity);
 
     ServiceComponent component = service.addServiceComponent("ZOOKEEPER_SERVER");
     ServiceComponentHost sch = component.addServiceComponentHost("h1");

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java
index 09d5569..eb41ba6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/state/GeneralServiceCalculatedStateTest.java
@@ -26,6 +26,8 @@ import org.apache.ambari.server.H2DatabaseCleaner;
 import org.apache.ambari.server.actionmanager.ActionManager;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Service;
@@ -59,10 +61,13 @@ public abstract class GeneralServiceCalculatedStateTest {
   @Inject
   protected Clusters clusters;
 
+  @Inject
+  private OrmTestHelper ormTestHelper;
 
   @Before
   public void setup() throws Exception {
     final StackId stack211 = new StackId("HDP-2.1.1");
+    final String version = "2.1.1-1234";
 
     injector = Guice.createInjector(Modules.override(
       new InMemoryDefaultTestModule()).with(new Module() {
@@ -75,11 +80,13 @@ public abstract class GeneralServiceCalculatedStateTest {
     injector.getInstance(GuiceJpaInitializer.class);
     injector.injectMembers(this);
 
+    RepositoryVersionEntity repositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(stack211,
+        version);
+
     clusters.addCluster(clusterName, stack211);
     cluster = clusters.getCluster(clusterName);
 
-    service = cluster.addService(getServiceName());
-    service.setDesiredStackVersion(cluster.getDesiredStackVersion());
+    service = cluster.addService(getServiceName(), repositoryVersion);
 
     createComponentsAndHosts();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
index 6fb0028..9907153 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/EventsTest.java
@@ -33,6 +33,7 @@ import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.dao.AlertDispatchDAO;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.orm.entities.AlertGroupEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
@@ -78,6 +79,10 @@ public class EventsTest {
   private AlertDefinitionDAO m_definitionDao;
   private AlertDispatchDAO m_alertDispatchDao;
 
+  private final String STACK_VERSION = "2.0.6";
+  private final String REPO_VERSION = "2.0.6-1234";
+  private RepositoryVersionEntity m_repositoryVersion;
+
   /**
    *
    */
@@ -101,7 +106,7 @@ public class EventsTest {
     m_alertDispatchDao = m_injector.getInstance(AlertDispatchDAO.class);
 
     m_clusterName = "foo";
-    StackId stackId = new StackId("HDP", "2.0.6");
+    StackId stackId = new StackId("HDP", STACK_VERSION);
 
     m_clusters.addCluster(m_clusterName, stackId);
     m_clusters.addHost(HOSTNAME);
@@ -117,8 +122,8 @@ public class EventsTest {
     Assert.assertNotNull(m_cluster);
 
     m_cluster.setDesiredStackVersion(stackId);
-    m_helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
-    m_cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
+    m_repositoryVersion = m_helper.getOrCreateRepositoryVersion(stackId, REPO_VERSION);
+    m_cluster.createClusterVersion(stackId, REPO_VERSION, "admin",
         RepositoryVersionState.INSTALLING);
 
     m_clusters.mapHostToCluster(HOSTNAME, m_clusterName);
@@ -282,8 +287,6 @@ public class EventsTest {
     // make sure there are at least 1
     Assert.assertTrue(hdfsDefinitions.size() > 0);
 
-    AlertDefinitionEntity definition = hdfsDefinitions.get(0);
-
     // delete the default alert group
     m_alertDispatchDao.remove(group);
 
@@ -378,7 +381,7 @@ public class EventsTest {
 
   private void installHdfsService() throws Exception {
     String serviceName = "HDFS";
-    Service service = m_serviceFactory.createNew(m_cluster, serviceName);
+    Service service = m_serviceFactory.createNew(m_cluster, serviceName, m_repositoryVersion);
     service = m_cluster.getService(serviceName);
     Assert.assertNotNull(service);
 
@@ -391,7 +394,5 @@ public class EventsTest {
     component.addServiceComponentHost(sch);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLED);
-    sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
-    sch.setStackVersion(new StackId("HDP-2.0.6"));
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
index fef9276..6184d6d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListenerTest.java
@@ -123,7 +123,8 @@ public class HostVersionOutOfSyncListenerTest {
    * @param stackId Stack Id to use
    * @throws AmbariException
    */
-  private void createClusterAndHosts(String INSTALLED_VERSION, StackId stackId) throws AmbariException {
+  private RepositoryVersionEntity createClusterAndHosts(String INSTALLED_VERSION, StackId stackId)
+      throws AmbariException {
     Host h1 = clusters.getHost("h1");
     h1.setState(HostState.HEALTHY);
 
@@ -152,7 +153,7 @@ public class HostVersionOutOfSyncListenerTest {
     Map<String, List<Integer>> zkTopology = new HashMap<>();
     List<Integer> zkServerHosts = Arrays.asList(0, 1, 2);
     zkTopology.put("ZOOKEEPER_SERVER", new ArrayList<>(zkServerHosts));
-    addService(c1, hostList, zkTopology, "ZOOKEEPER");
+    addService(c1, hostList, zkTopology, "ZOOKEEPER", repositoryVersionEntity);
 
     // install new version
     helper.createHostVersion("h1", repositoryVersionEntity, RepositoryVersionState.INSTALLED);
@@ -173,6 +174,8 @@ public class HostVersionOutOfSyncListenerTest {
         assertEquals(hostVersionEntity.getState(), RepositoryVersionState.INSTALLED);
       }
     }
+
+    return repositoryVersionEntity;
   }
 
   /***
@@ -182,7 +185,7 @@ public class HostVersionOutOfSyncListenerTest {
    * @param stackId Stack Id to use
    * @throws AmbariException
    */
-  private void addRepoVersion(String INSTALLED_VERSION, StackId stackId) throws AmbariException {
+  private RepositoryVersionEntity addRepoVersion(String INSTALLED_VERSION, StackId stackId) throws AmbariException {
     // Register and install new version
     RepositoryVersionEntity repositoryVersionEntity = helper.getOrCreateRepositoryVersion(stackId,
             INSTALLED_VERSION);
@@ -200,6 +203,8 @@ public class HostVersionOutOfSyncListenerTest {
         assertEquals(hostVersionEntity.getState(), RepositoryVersionState.INSTALLED);
       }
     }
+
+    return repositoryVersionEntity;
   }
 
   /**
@@ -215,7 +220,7 @@ public class HostVersionOutOfSyncListenerTest {
     StackId yaStackId = new StackId(yetAnotherStackId);
 
     // get new hosts installed with the first repo
-    createClusterAndHosts(INSTALLED_VERSION, stackId);
+    RepositoryVersionEntity repositoryVersion = createClusterAndHosts(INSTALLED_VERSION, stackId);
 
     // register the new repo
     addRepoVersion(INSTALLED_VERSION_2, yaStackId);
@@ -234,7 +239,7 @@ public class HostVersionOutOfSyncListenerTest {
     hdfsTopology.put("SECONDARY_NAMENODE", Collections.singletonList(1));
     List<Integer> datanodeHosts = Arrays.asList(0, 1);
     hdfsTopology.put("DATANODE", new ArrayList<>(datanodeHosts));
-    addService(c1, hostList, hdfsTopology, "HDFS");
+    addService(c1, hostList, hdfsTopology, "HDFS", repositoryVersion);
 
     // Check result
     Set<String> changedHosts = new HashSet<>();
@@ -272,7 +277,7 @@ public class HostVersionOutOfSyncListenerTest {
     String INSTALLED_VERSION = "2.2.0-1000";
     StackId stackId = new StackId(this.stackId);
 
-    createClusterAndHosts(INSTALLED_VERSION, stackId);
+    RepositoryVersionEntity repositoryVersion = createClusterAndHosts(INSTALLED_VERSION, stackId);
 
     //Add Ganglia service
     List<String> hostList = new ArrayList<>();
@@ -283,7 +288,7 @@ public class HostVersionOutOfSyncListenerTest {
     hdfsTopology.put("GANGLIA_SERVER", Collections.singletonList(0));
     List<Integer> monitorHosts = Arrays.asList(0, 1);
     hdfsTopology.put("GANGLIA_MONITOR", new ArrayList<>(monitorHosts));
-    addService(c1, hostList, hdfsTopology, "GANGLIA");
+    addService(c1, hostList, hdfsTopology, "GANGLIA", repositoryVersion);
 
     // Check result
     Set<String> changedHosts = new HashSet<>();
@@ -319,7 +324,7 @@ public class HostVersionOutOfSyncListenerTest {
 
     assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION, RepositoryVersionState.INSTALLED);
     assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION_2, RepositoryVersionState.INSTALLED);
-    
+
     //Add ZOOKEEPER_CLIENT component
     List<String> hostList = new ArrayList<>();
     hostList.add("h1");
@@ -334,7 +339,7 @@ public class HostVersionOutOfSyncListenerTest {
     changedHosts.add("h3");
 
     assertRepoVersionState(stackId.getStackId(), INSTALLED_VERSION,RepositoryVersionState.OUT_OF_SYNC);
-    
+
     List<HostVersionEntity> hostVersions = hostVersionDAO.findAll();
 
     for (HostVersionEntity hostVersionEntity : hostVersions) {
@@ -481,13 +486,13 @@ public class HostVersionOutOfSyncListenerTest {
         .put("NAMENODE", Lists.newArrayList(0))
         .put("DATANODE", Lists.newArrayList(1))
         .build();
-    addService(c1, allHosts, topology, "HDFS");
+    addService(c1, allHosts, topology, "HDFS", repo);
 
     topology = new ImmutableMap.Builder<String, List<Integer>>()
         .put("GANGLIA_SERVER", Lists.newArrayList(0))
         .put("GANGLIA_MONITOR", Lists.newArrayList(2))
         .build();
-    addService(c1, allHosts, topology, "GANGLIA");
+    addService(c1, allHosts, topology, "GANGLIA", repo);
 
     List<HostVersionEntity> hostVersions = hostVersionDAO.findAll();
     assertEquals(3, hostVersions.size());
@@ -552,12 +557,11 @@ public class HostVersionOutOfSyncListenerTest {
     host1.setHostAttributes(hostAttributes);
   }
 
-  private void addService(Cluster cl, List<String> hostList,
-                                Map<String, List<Integer>> topology, String serviceName
-                          ) throws AmbariException {
+  private void addService(Cluster cl, List<String> hostList, Map<String, List<Integer>> topology,
+      String serviceName, RepositoryVersionEntity repositoryVersionEntity) throws AmbariException {
     StackId stackIdObj = new StackId(stackId);
     cl.setDesiredStackVersion(stackIdObj);
-    cl.addService(serviceName);
+    cl.addService(serviceName, repositoryVersionEntity);
 
     for (Map.Entry<String, List<Integer>> component : topology.entrySet()) {
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListenerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListenerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListenerTest.java
index 7b8b68a..d5b2d46 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListenerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListenerTest.java
@@ -167,8 +167,6 @@ public class StackVersionListenerTest extends EasyMockSupport {
   public void testRecalculateHostVersionStateWhenComponentDesiredVersionIsUnknownAndNewVersionIsNotValid() throws AmbariException {
     expect(serviceComponent.getDesiredVersion()).andReturn(UNKNOWN_VERSION);
     expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
-    serviceComponent.setDesiredVersion(INVALID_NEW_VERSION);
-    expectLastCall().once();
     sch.setUpgradeState(UpgradeState.NONE);
     expectLastCall().once();
     sch.setVersion(INVALID_NEW_VERSION);
@@ -184,8 +182,6 @@ public class StackVersionListenerTest extends EasyMockSupport {
   public void testRecalculateClusterVersionStateWhenComponentDesiredVersionIsUnknownAndNewVersionIsValid() throws AmbariException {
     expect(serviceComponent.getDesiredVersion()).andReturn(UNKNOWN_VERSION);
     expect(serviceComponent.isVersionAdvertised()).andReturn(Boolean.TRUE);
-    serviceComponent.setDesiredVersion(VALID_NEW_VERSION);
-    expectLastCall().once();
     sch.setUpgradeState(UpgradeState.NONE);
     expectLastCall().once();
     sch.setVersion(VALID_NEW_VERSION);

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
index 574ffa4..e84e0f6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/OrmTestHelper.java
@@ -435,8 +435,11 @@ public class OrmTestHelper {
   public void installHdfsService(Cluster cluster,
       ServiceFactory serviceFactory, ServiceComponentFactory componentFactory,
       ServiceComponentHostFactory schFactory, String hostName) throws Exception {
+
+    RepositoryVersionEntity repositoryVersion = cluster.getCurrentClusterVersion().getRepositoryVersion();
+
     String serviceName = "HDFS";
-    Service service = serviceFactory.createNew(cluster, serviceName);
+    Service service = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
     service = cluster.getService(serviceName);
     assertNotNull(service);
 
@@ -450,8 +453,6 @@ public class OrmTestHelper {
     datanode.addServiceComponentHost(sch);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLED);
-    sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
-    sch.setStackVersion(new StackId("HDP-2.0.6"));
 
     ServiceComponent namenode = componentFactory.createNew(service, "NAMENODE");
 
@@ -462,15 +463,16 @@ public class OrmTestHelper {
     namenode.addServiceComponentHost(sch);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLED);
-    sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
-    sch.setStackVersion(new StackId("HDP-2.0.6"));
   }
 
   public void installYarnService(Cluster cluster,
       ServiceFactory serviceFactory, ServiceComponentFactory componentFactory,
       ServiceComponentHostFactory schFactory, String hostName) throws Exception {
+
+    RepositoryVersionEntity repositoryVersion = cluster.getCurrentClusterVersion().getRepositoryVersion();
+
     String serviceName = "YARN";
-    Service service = serviceFactory.createNew(cluster, serviceName);
+    Service service = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
     service = cluster.getService(serviceName);
     assertNotNull(service);
 
@@ -485,8 +487,6 @@ public class OrmTestHelper {
     resourceManager.addServiceComponentHost(sch);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLED);
-    sch.setDesiredStackVersion(new StackId("HDP-2.0.6"));
-    sch.setStackVersion(new StackId("HDP-2.0.6"));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index 73ab5e6..91d5f04 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -46,10 +45,15 @@ import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
 import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.HostVersionDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.dao.RequestDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.dao.UpgradeDAO;
 import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
 import org.apache.ambari.server.orm.entities.HostVersionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.RequestEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ConfigFactory;
@@ -64,6 +68,7 @@ import org.apache.ambari.server.state.ServiceComponentHostFactory;
 import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
 import org.apache.ambari.server.utils.EventBusSynchronizer;
 import org.junit.After;
 import org.junit.Assert;
@@ -146,6 +151,9 @@ public class ComponentVersionCheckActionTest {
     clusters.addCluster(clusterName, sourceStack);
 
     StackDAO stackDAO = m_injector.getInstance(StackDAO.class);
+    RequestDAO requestDAO = m_injector.getInstance(RequestDAO.class);
+    UpgradeDAO upgradeDAO = m_injector.getInstance(UpgradeDAO.class);
+
     StackEntity stackEntitySource = stackDAO.find(sourceStack.getStackName(), sourceStack.getStackVersion());
     StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion());
     assertNotNull(stackEntitySource);
@@ -180,9 +188,6 @@ public class ComponentVersionCheckActionTest {
     c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED);
     c.setCurrentStackVersion(targetStack);
 
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-        RepositoryVersionState.CURRENT);
-
     HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
 
     HostVersionEntity entity = new HostVersionEntity();
@@ -190,6 +195,25 @@ public class ComponentVersionCheckActionTest {
     entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo));
     entity.setState(RepositoryVersionState.INSTALLED);
     hostVersionDAO.create(entity);
+
+    RequestEntity requestEntity = new RequestEntity();
+    requestEntity.setClusterId(c.getClusterId());
+    requestEntity.setRequestId(1L);
+    requestEntity.setStartTime(System.currentTimeMillis());
+    requestEntity.setCreateTime(System.currentTimeMillis());
+    requestDAO.create(requestEntity);
+
+    UpgradeEntity upgradeEntity = new UpgradeEntity();
+    upgradeEntity.setId(1L);
+    upgradeEntity.setClusterId(c.getClusterId());
+    upgradeEntity.setRequestEntity(requestEntity);
+    upgradeEntity.setUpgradePackage("");
+    upgradeEntity.setFromVersion(sourceRepo);
+    upgradeEntity.setToVersion(targetRepo);
+    upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING);
+    upgradeDAO.create(upgradeEntity);
+
+    c.setUpgradeEntity(upgradeEntity);
   }
 
   private void makeCrossStackUpgradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack,
@@ -199,6 +223,9 @@ public class ComponentVersionCheckActionTest {
     clusters.addCluster(clusterName, sourceStack);
 
     StackDAO stackDAO = m_injector.getInstance(StackDAO.class);
+    RequestDAO requestDAO = m_injector.getInstance(RequestDAO.class);
+    UpgradeDAO upgradeDAO = m_injector.getInstance(UpgradeDAO.class);
+
     StackEntity stackEntitySource = stackDAO.find(sourceStack.getStackName(), sourceStack.getStackVersion());
     StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion());
 
@@ -225,6 +252,24 @@ public class ComponentVersionCheckActionTest {
     c.createClusterVersion(sourceStack, sourceRepo, "admin", RepositoryVersionState.INSTALLING);
     c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.CURRENT);
 
+    RequestEntity requestEntity = new RequestEntity();
+    requestEntity.setClusterId(c.getClusterId());
+    requestEntity.setRequestId(1L);
+    requestEntity.setStartTime(System.currentTimeMillis());
+    requestEntity.setCreateTime(System.currentTimeMillis());
+    requestDAO.create(requestEntity);
+
+    UpgradeEntity upgradeEntity = new UpgradeEntity();
+    upgradeEntity.setId(1L);
+    upgradeEntity.setClusterId(c.getClusterId());
+    upgradeEntity.setRequestEntity(requestEntity);
+    upgradeEntity.setUpgradePackage("");
+    upgradeEntity.setFromVersion(sourceRepo);
+    upgradeEntity.setToVersion(targetRepo);
+    upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING);
+    upgradeDAO.create(upgradeEntity);
+
+    c.setUpgradeEntity(upgradeEntity);
   }
 
   private void createNewRepoVersion(StackId targetStack, String targetRepo, String clusterName,
@@ -245,9 +290,6 @@ public class ComponentVersionCheckActionTest {
     c.createClusterVersion(targetStack, targetRepo, "admin", RepositoryVersionState.INSTALLING);
     c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED);
 
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-        RepositoryVersionState.CURRENT);
-
     HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
 
     HostVersionEntity entity = new HostVersionEntity();
@@ -389,12 +431,16 @@ public class ComponentVersionCheckActionTest {
     Cluster cluster = clusters.getCluster("c1");
     clusters.mapHostToCluster("h1", "c1");
 
+    RepositoryVersionEntity repositoryVersion2110 = m_helper.getOrCreateRepositoryVersion(
+        HDP_21_STACK, HDP_2_1_1_0);
+
+    RepositoryVersionEntity repositoryVersion2111 = m_helper.getOrCreateRepositoryVersion(
+        HDP_21_STACK, HDP_2_1_1_1);
+
     Service service = installService(cluster, "HDFS");
+    service.setDesiredRepositoryVersion(repositoryVersion2110);
     ServiceComponent sc = addServiceComponent(cluster, service, "NAMENODE");
-    sc.setDesiredVersion(HDP_2_1_1_0);
-
     sc = addServiceComponent(cluster, service, "DATANODE");
-    sc.setDesiredVersion(HDP_2_1_1_0);
 
     ServiceComponentHost sch = createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
     sch.setVersion(HDP_2_1_1_0);
@@ -402,8 +448,8 @@ public class ComponentVersionCheckActionTest {
     sch.setVersion(HDP_2_1_1_0);
 
     service = installService(cluster, "ZOOKEEPER");
+    service.setDesiredRepositoryVersion(repositoryVersion2111);
     sc = addServiceComponent(cluster, service, "ZOOKEEPER_SERVER");
-    sc.setDesiredVersion(HDP_2_1_1_1);
 
     sch = createNewServiceComponentHost(cluster, "ZOOKEEPER", "ZOOKEEPER_SERVER", "h1");
     sch.setVersion(HDP_2_1_1_1);
@@ -415,7 +461,7 @@ public class ComponentVersionCheckActionTest {
     assertEquals(HDP_211_CENTOS6_REPO_URL, repo.getBaseUrl());
 
     // Finalize the upgrade
-    Map<String, String> commandParams = new HashMap<String, String>();
+    Map<String, String> commandParams = new HashMap<>();
     commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
     commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
     commandParams.put(FinalizeUpgradeAction.SUPPORTED_SERVICES_KEY, "ZOOKEEPER");
@@ -450,8 +496,6 @@ public class ComponentVersionCheckActionTest {
     sc.addServiceComponentHost(sch);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLED);
-    sch.setDesiredStackVersion(cluster.getDesiredStackVersion());
-    sch.setStackVersion(cluster.getCurrentStackVersion());
 
     return sch;
   }
@@ -462,7 +506,8 @@ public class ComponentVersionCheckActionTest {
     try {
       service = cluster.getService(serviceName);
     } catch (ServiceNotFoundException e) {
-      service = serviceFactory.createNew(cluster, serviceName);
+      RepositoryVersionEntity repositoryVersion = cluster.getCurrentClusterVersion().getRepositoryVersion();
+      service = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
       cluster.addService(service);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
index 976dea4..860369b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ConfigureActionTest.java
@@ -46,6 +46,7 @@ import org.apache.ambari.server.orm.dao.HostVersionDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.entities.HostVersionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.state.Cluster;
@@ -1695,7 +1696,6 @@ public class ConfigureActionTest {
 
     clusters.addCluster(clusterName, HDP_220_STACK);
 
-
     StackEntity stackEntity = stackDAO.find(HDP_220_STACK.getStackName(),
         HDP_220_STACK.getStackVersion());
 
@@ -1704,9 +1704,13 @@ public class ConfigureActionTest {
     Cluster c = clusters.getCluster(clusterName);
     c.setDesiredStackVersion(HDP_220_STACK);
 
+    // Creating starting repo
+    RepositoryVersionEntity repositoryVersionEntity = m_helper.getOrCreateRepositoryVersion(
+        HDP_220_STACK, HDP_2_2_0_0);
+
     // !!! very important, otherwise the loops that walk the list of installed
     // service properties will not run!
-    installService(c, "ZOOKEEPER");
+    installService(c, "ZOOKEEPER", repositoryVersionEntity);
 
     Config config = cf.createNew(c, "zoo.cfg", "version1", new HashMap<String, String>() {
       {
@@ -1726,8 +1730,6 @@ public class ConfigureActionTest {
     hostAttributes.put("os_release_version", "6");
     host.setHostAttributes(hostAttributes);
 
-    // Creating starting repo
-    m_helper.getOrCreateRepositoryVersion(HDP_220_STACK, HDP_2_2_0_0);
     c.createClusterVersion(HDP_220_STACK, HDP_2_2_0_0, "admin", RepositoryVersionState.INSTALLING);
     c.transitionClusterVersion(HDP_220_STACK, HDP_2_2_0_0, RepositoryVersionState.CURRENT);
 
@@ -1741,10 +1743,6 @@ public class ConfigureActionTest {
     c.transitionClusterVersion(HDP_220_STACK, HDP_2_2_0_1, RepositoryVersionState.INSTALLED);
     c.setCurrentStackVersion(HDP_220_STACK);
 
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-        RepositoryVersionState.CURRENT);
-
-
     HostVersionEntity entity = new HostVersionEntity();
     entity.setHostEntity(hostDAO.findByName(hostName));
     entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(HDP_220_STACK, HDP_2_2_0_1));
@@ -1764,13 +1762,14 @@ public class ConfigureActionTest {
    * @return
    * @throws AmbariException
    */
-  private Service installService(Cluster cluster, String serviceName) throws AmbariException {
+  private Service installService(Cluster cluster, String serviceName,
+      RepositoryVersionEntity repositoryVersion) throws AmbariException {
     Service service = null;
 
     try {
       service = cluster.getService(serviceName);
     } catch (ServiceNotFoundException e) {
-      service = serviceFactory.createNew(cluster, serviceName);
+      service = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
       cluster.addService(service);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index 5513271..0a583ae 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -24,7 +24,6 @@ import static org.junit.Assert.assertTrue;
 
 import java.lang.reflect.Field;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -51,8 +50,6 @@ import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
 import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
-import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.HostVersionDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
@@ -118,6 +115,8 @@ public class UpgradeActionTest {
 
   private static final String HDP_211_CENTOS6_REPO_URL = "http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.1.1.0-118";
 
+  private RepositoryVersionEntity sourceRepositoryVersion;
+
   private Injector m_injector;
 
   private AmbariManagementController amc;
@@ -148,10 +147,6 @@ public class UpgradeActionTest {
   @Inject
   private ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO;
   @Inject
-  private HostComponentDesiredStateDAO hostComponentDesiredStateDAO;
-  @Inject
-  private HostComponentStateDAO hostComponentStateDAO;
-  @Inject
   private StackDAO stackDAO;
   @Inject
   private AmbariMetaInfo ambariMetaInfo;
@@ -209,9 +204,6 @@ public class UpgradeActionTest {
     c.createClusterVersion(targetStack, targetRepo, "admin", RepositoryVersionState.INSTALLING);
     c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED);
 
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-            RepositoryVersionState.CURRENT);
-
     HostVersionEntity entity = new HostVersionEntity();
     entity.setHostEntity(hostDAO.findByName(hostName));
     entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo));
@@ -266,10 +258,6 @@ public class UpgradeActionTest {
     entitySource.setState(RepositoryVersionState.INSTALL_FAILED);
     hostVersionDAO.create(entitySource);
 
-    // Create a host version for the mid repo in CURRENT
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-            RepositoryVersionState.CURRENT);
-
     // Create a host version for the target repo in UPGRADED
     HostVersionEntity entityTarget = new HostVersionEntity();
     entityTarget.setHostEntity(hostDAO.findByName(hostName));
@@ -278,7 +266,8 @@ public class UpgradeActionTest {
     hostVersionDAO.create(entityTarget);
   }
 
-  private void createUpgradeClusterAndSourceRepo(StackId sourceStack, String sourceRepo,
+  private RepositoryVersionEntity createUpgradeClusterAndSourceRepo(StackId sourceStack,
+      String sourceRepo,
                                                  String hostName) throws Exception {
 
     clusters.addCluster(clusterName, sourceStack);
@@ -303,8 +292,8 @@ public class UpgradeActionTest {
     clusters.mapHostToCluster(hostName, clusterName);
 
     // Create the starting repo version
-    RepositoryVersionEntity repoEntity = m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
-    repoEntity.setOperatingSystems("[\n" +
+    sourceRepositoryVersion = m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
+    sourceRepositoryVersion.setOperatingSystems("[\n" +
             "   {\n" +
             "      \"repositories\":[\n" +
             "         {\n" +
@@ -316,14 +305,14 @@ public class UpgradeActionTest {
             "      \"OperatingSystems/os_type\":\"redhat6\"\n" +
             "   }\n" +
             "]");
-    repoVersionDAO.merge(repoEntity);
+    repoVersionDAO.merge(sourceRepositoryVersion);
 
     c.createClusterVersion(sourceStack, sourceRepo, "admin", RepositoryVersionState.INSTALLING);
     c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.CURRENT);
-
+    return sourceRepositoryVersion;
   }
 
-  private void createUpgradeClusterTargetRepo(StackId targetStack, String targetRepo,
+  private RepositoryVersionEntity createUpgradeClusterTargetRepo(StackId targetStack, String targetRepo,
                                               String hostName) throws AmbariException {
     Cluster c = clusters.getCluster(clusterName);
     StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion());
@@ -341,9 +330,6 @@ public class UpgradeActionTest {
     c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED);
     c.setCurrentStackVersion(targetStack);
 
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-            RepositoryVersionState.CURRENT);
-
     // create a single host with the UPGRADED HostVersionEntity
     HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
 
@@ -361,6 +347,8 @@ public class UpgradeActionTest {
 
     assertEquals(1, hostVersions.size());
     assertEquals(RepositoryVersionState.INSTALLED, hostVersions.get(0).getState());
+
+    return repositoryVersionEntity;
   }
 
   private void makeCrossStackUpgradeClusterAndSourceRepo(StackId sourceStack, String sourceRepo,
@@ -388,7 +376,7 @@ public class UpgradeActionTest {
     clusters.mapHostToCluster(hostName, clusterName);
 
     // Create the starting repo version
-    m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
+    sourceRepositoryVersion = m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
     c.createClusterVersion(sourceStack, sourceRepo, "admin", RepositoryVersionState.INSTALLING);
     c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.CURRENT);
   }
@@ -409,9 +397,6 @@ public class UpgradeActionTest {
     c.createClusterVersion(targetStack, targetRepo, "admin", RepositoryVersionState.INSTALLING);
     c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED);
 
-    c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
-        RepositoryVersionState.CURRENT);
-
     HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
 
     HostVersionEntity entity = new HostVersionEntity();
@@ -442,10 +427,11 @@ public class UpgradeActionTest {
     Assert.assertTrue(packs.containsKey(upgradePackName));
 
     makeCrossStackUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
-//    makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
 
     Cluster cluster = clusters.getCluster(clusterName);
 
+    createUpgrade(cluster, sourceStack, sourceRepo, targetRepo);
+
     // Install ZK and HDFS with some components
     Service zk = installService(cluster, "ZOOKEEPER");
     addServiceComponent(cluster, zk, "ZOOKEEPER_SERVER");
@@ -515,6 +501,10 @@ public class UpgradeActionTest {
 
     makeDowngradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
 
+    Cluster cluster = clusters.getCluster(clusterName);
+
+    createUpgrade(cluster, sourceStack, sourceRepo, targetRepo);
+
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "downgrade");
     commandParams.put(FinalizeUpgradeAction.VERSION_KEY, sourceRepo);
@@ -573,6 +563,10 @@ public class UpgradeActionTest {
 
     makeTwoUpgradesWhereLastDidNotComplete(sourceStack, sourceRepo, midStack, midRepo, targetStack, targetRepo);
 
+    Cluster cluster = clusters.getCluster(clusterName);
+
+    createUpgrade(cluster, sourceStack, sourceRepo, targetRepo);
+
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "downgrade");
     commandParams.put(FinalizeUpgradeAction.VERSION_KEY, midRepo);
@@ -606,10 +600,13 @@ public class UpgradeActionTest {
     createUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
     createUpgradeClusterTargetRepo(targetStack, targetRepo, hostName);
 
+    Cluster cluster = clusters.getCluster(clusterName);
+
+    createUpgrade(cluster, sourceStack, sourceRepo, targetRepo);
+
     // Verify the repo before calling Finalize
     AmbariCustomCommandExecutionHelper helper = m_injector.getInstance(AmbariCustomCommandExecutionHelper.class);
     Host host = clusters.getHost("h1");
-    Cluster cluster = clusters.getCluster(clusterName);
 
     RepositoryInfo repo = ambariMetaInfo.getRepository(sourceStack.getStackName(), sourceStack.getStackVersion(), "redhat6", sourceStack.getStackId());
     assertEquals(HDP_211_CENTOS6_REPO_URL, repo.getBaseUrl());
@@ -671,6 +668,8 @@ public class UpgradeActionTest {
     Host host = clusters.getHost("h1");
     Cluster cluster = clusters.getCluster(clusterName);
 
+    createUpgrade(cluster, sourceStack, sourceRepo, targetRepo);
+
     RepositoryInfo repo = ambariMetaInfo.getRepository(sourceStack.getStackName(),
             sourceStack.getStackVersion(), "redhat6", sourceStack.getStackId());
     assertEquals(HDP_211_CENTOS6_REPO_URL, repo.getBaseUrl());
@@ -734,6 +733,8 @@ public class UpgradeActionTest {
     cluster.setCurrentStackVersion(sourceStack);
     cluster.setDesiredStackVersion(targetStack);
 
+    createUpgrade(cluster, sourceStack, sourceRepo, targetRepo);
+
     Map<String, String> commandParams = new HashMap<>();
     commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
     commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
@@ -781,7 +782,6 @@ public class UpgradeActionTest {
     makeCrossStackUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
     Cluster cluster = clusters.getCluster(clusterName);
 
-
     // install HDFS with some components
     Service service = installService(cluster, "HDFS");
     addServiceComponent(cluster, service, "NAMENODE");
@@ -791,6 +791,8 @@ public class UpgradeActionTest {
 
     makeCrossStackUpgradeTargetRepo(targetStack, targetRepo, hostName);
 
+    createUpgrade(cluster, sourceStack, sourceRepo, targetRepo);
+
     // create some configs
     createConfigs(cluster);
 
@@ -889,6 +891,8 @@ public class UpgradeActionTest {
     cluster.setCurrentStackVersion(sourceStack);
     cluster.setDesiredStackVersion(targetStack);
 
+    createUpgrade(cluster, sourceStack, sourceRepo, targetRepo);
+
     // set the SCH versions to the new stack so that the finalize action is
     // happy
     cluster.getServiceComponentHosts("HDFS", "NAMENODE").get(0).setVersion(targetRepo);
@@ -966,32 +970,15 @@ public class UpgradeActionTest {
     ServiceComponentHost nnSCH = createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
     ServiceComponentHost dnSCH = createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
 
-    createUpgradeClusterTargetRepo(targetStack, targetRepo, hostName);
+    RepositoryVersionEntity targetRepositoryVersion = createUpgradeClusterTargetRepo(targetStack,
+        targetRepo, hostName);
 
     // fake their upgrade
-    nnSCH.setStackVersion(nnSCH.getDesiredStackVersion());
+    service.setDesiredRepositoryVersion(targetRepositoryVersion);
     nnSCH.setVersion(targetRepo);
-    dnSCH.setStackVersion(nnSCH.getDesiredStackVersion());
     dnSCH.setVersion(targetRepo);
 
-    // create some entities for the finalize action to work with for patch
-    // history
-    RequestEntity requestEntity = new RequestEntity();
-    requestEntity.setClusterId(cluster.getClusterId());
-    requestEntity.setRequestId(1L);
-    requestEntity.setStartTime(System.currentTimeMillis());
-    requestEntity.setCreateTime(System.currentTimeMillis());
-    requestDAO.create(requestEntity);
-
-    UpgradeEntity upgradeEntity = new UpgradeEntity();
-    upgradeEntity.setId(1L);
-    upgradeEntity.setClusterId(cluster.getClusterId());
-    upgradeEntity.setRequestEntity(requestEntity);
-    upgradeEntity.setUpgradePackage("");
-    upgradeEntity.setFromVersion(sourceRepo);
-    upgradeEntity.setToVersion(targetRepo);
-    upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING);
-    upgradeDAO.create(upgradeEntity);
+    UpgradeEntity upgrade = createUpgrade(cluster, sourceStack, sourceRepo, targetRepo);
 
     // verify that no history exist exists yet
     List<ServiceComponentHistoryEntity> historyEntites = serviceComponentDesiredStateDAO.findHistory(
@@ -1007,7 +994,7 @@ public class UpgradeActionTest {
     // Finalize the upgrade, passing in the request ID so that history is
     // created
     Map<String, String> commandParams = new HashMap<>();
-    commandParams.put(FinalizeUpgradeAction.REQUEST_ID, String.valueOf(requestEntity.getRequestId()));
+    commandParams.put(FinalizeUpgradeAction.REQUEST_ID, String.valueOf(upgrade.getRequestId()));
     commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
     commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
 
@@ -1044,8 +1031,6 @@ public class UpgradeActionTest {
     sc.addServiceComponentHost(sch);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLED);
-    sch.setDesiredStackVersion(cluster.getDesiredStackVersion());
-    sch.setStackVersion(cluster.getCurrentStackVersion());
     return sch;
   }
 
@@ -1055,7 +1040,7 @@ public class UpgradeActionTest {
     try {
       service = cluster.getService(serviceName);
     } catch (ServiceNotFoundException e) {
-      service = serviceFactory.createNew(cluster, serviceName);
+      service = serviceFactory.createNew(cluster, serviceName, sourceRepositoryVersion);
       cluster.addService(service);
     }
 
@@ -1100,4 +1085,40 @@ public class UpgradeActionTest {
     configFactory.createNew(cluster, "foo-site", "version-" + System.currentTimeMillis(),
         properties, propertiesAttributes);
   }
+
+  /**
+   * Creates an upgrade an associates it with the cluster.
+   *
+   * @param cluster
+   * @param sourceRepo
+   * @param targetRepo
+   * @throws Exception
+   */
+  private UpgradeEntity createUpgrade(Cluster cluster, StackId sourceStack, String sourceRepo,
+      String targetRepo) throws Exception {
+
+    // create some entities for the finalize action to work with for patch
+    // history
+    RequestEntity requestEntity = new RequestEntity();
+    requestEntity.setClusterId(cluster.getClusterId());
+    requestEntity.setRequestId(1L);
+    requestEntity.setStartTime(System.currentTimeMillis());
+    requestEntity.setCreateTime(System.currentTimeMillis());
+    requestDAO.create(requestEntity);
+
+    UpgradeEntity upgradeEntity = new UpgradeEntity();
+    upgradeEntity.setId(1L);
+    upgradeEntity.setClusterId(cluster.getClusterId());
+    upgradeEntity.setRequestEntity(requestEntity);
+    upgradeEntity.setUpgradePackage("");
+    upgradeEntity.setFromVersion(sourceRepo);
+    upgradeEntity.setToVersion(targetRepo);
+    upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING);
+
+    upgradeDAO.create(upgradeEntity);
+
+    cluster.setUpgradeEntity(upgradeEntity);
+
+    return upgradeEntity;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
index 2d589ed..d72f018 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
@@ -31,9 +31,7 @@ import java.util.Map;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.H2DatabaseCleaner;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.ServiceComponentResponse;
-import org.apache.ambari.server.events.listeners.upgrade.StackVersionListener;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
@@ -43,6 +41,7 @@ import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.RequestDAO;
 import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.dao.UpgradeDAO;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
@@ -52,6 +51,7 @@ import org.apache.ambari.server.orm.entities.RequestEntity;
 import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.ServiceComponentHistoryEntity;
 import org.apache.ambari.server.orm.entities.ServiceComponentVersionEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.orm.entities.StageEntity;
 import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
@@ -76,7 +76,6 @@ public class ServiceComponentTest {
   private ServiceFactory serviceFactory;
   private ServiceComponentFactory serviceComponentFactory;
   private ServiceComponentHostFactory serviceComponentHostFactory;
-  private AmbariMetaInfo metaInfo;
   private OrmTestHelper helper;
   private HostDAO hostDAO;
 
@@ -92,7 +91,6 @@ public class ServiceComponentTest {
         ServiceComponentHostFactory.class);
     helper = injector.getInstance(OrmTestHelper.class);
     hostDAO = injector.getInstance(HostDAO.class);
-    metaInfo = injector.getInstance(AmbariMetaInfo.class);
 
     clusterName = "foo";
     serviceName = "HDFS";
@@ -103,11 +101,14 @@ public class ServiceComponentTest {
 
     cluster.setDesiredStackVersion(stackId);
     Assert.assertNotNull(cluster);
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
+
     cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
         RepositoryVersionState.INSTALLING);
 
-    Service s = serviceFactory.createNew(cluster, serviceName);
+    Service s = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
     cluster.addService(s);
     service = cluster.getService(serviceName);
     Assert.assertNotNull(service);
@@ -154,8 +155,12 @@ public class ServiceComponentTest {
     sc.setDesiredState(State.INSTALLED);
     Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
 
-    sc.setDesiredStackVersion(new StackId("HDP-1.2.0"));
-    Assert.assertEquals("HDP-1.2.0", sc.getDesiredStackVersion().getStackId());
+    StackId newStackId = new StackId("HDP-1.2.0");
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(newStackId,
+        newStackId.getStackVersion());
+
+    sc.setDesiredRepositoryVersion(repositoryVersion);
+    Assert.assertEquals(newStackId.toString(), sc.getDesiredStackVersion().getStackId());
 
     ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO =
         injector.getInstance(ServiceComponentDesiredStateDAO.class);
@@ -199,8 +204,7 @@ public class ServiceComponentTest {
   @Test
   public void testAddAndGetServiceComponentHosts() throws AmbariException {
     String componentName = "NAMENODE";
-    ServiceComponent component = serviceComponentFactory.createNew(service,
-        componentName);
+    ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
     service.addServiceComponent(component);
 
     ServiceComponent sc = service.getServiceComponent(componentName);
@@ -224,6 +228,8 @@ public class ServiceComponentTest {
 
     ServiceComponentHost sch1 = sc.addServiceComponentHost("h1");
     ServiceComponentHost sch2 = sc.addServiceComponentHost("h2");
+    assertNotNull(sch1);
+    assertNotNull(sch2);
 
     try {
       sc.addServiceComponentHost("h2");
@@ -241,9 +247,7 @@ public class ServiceComponentTest {
     sc.addServiceComponentHost("h3");
     Assert.assertNotNull(sc.getServiceComponentHost("h3"));
 
-    sch1.setDesiredStackVersion(new StackId("HDP-1.2.0"));
     sch1.setState(State.STARTING);
-    sch1.setStackVersion(new StackId("HDP-1.2.0"));
     sch1.setDesiredState(State.STARTED);
 
     HostComponentDesiredStateDAO desiredStateDAO = injector.getInstance(
@@ -268,17 +272,14 @@ public class ServiceComponentTest {
     Assert.assertNotNull(sch);
     Assert.assertEquals(State.STARTING, sch.getState());
     Assert.assertEquals(State.STARTED, sch.getDesiredState());
-    Assert.assertEquals("HDP-1.2.0",
-        sch.getStackVersion().getStackId());
-    Assert.assertEquals("HDP-1.2.0",
-        sch.getDesiredStackVersion().getStackId());
+    Assert.assertEquals(service.getDesiredRepositoryVersion().getVersion(),
+        sch.getServiceComponent().getDesiredVersion());
   }
 
   @Test
   public void testConvertToResponse() throws AmbariException {
     String componentName = "NAMENODE";
-    ServiceComponent component = serviceComponentFactory.createNew(service,
-        componentName);
+    ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
     service.addServiceComponent(component);
 
     addHostToCluster("h1", service.getCluster().getClusterName());
@@ -295,7 +296,6 @@ public class ServiceComponentTest {
     ServiceComponent sc = service.getServiceComponent(componentName);
     Assert.assertNotNull(sc);
     sc.setDesiredState(State.INSTALLED);
-    sc.setDesiredStackVersion(new StackId("HDP-1.2.0"));
 
     ServiceComponentResponse r = sc.convertToResponse();
     Assert.assertEquals(sc.getClusterName(), r.getClusterName());
@@ -362,7 +362,12 @@ public class ServiceComponentTest {
     sc.setDesiredState(State.INSTALLED);
     Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
 
-    sc.setDesiredStackVersion(new StackId("HDP-2.2.0"));
+    StackId newStackId = new StackId("HDP-2.2.0");
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(newStackId,
+        newStackId.getStackVersion());
+
+    sc.setDesiredRepositoryVersion(repositoryVersion);
+
     StackId stackId = sc.getDesiredStackVersion();
     Assert.assertEquals(new StackId("HDP", "2.2.0"), stackId);
 
@@ -479,7 +484,12 @@ public class ServiceComponentTest {
     sc.setDesiredState(State.INSTALLED);
     Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
 
-    sc.setDesiredStackVersion(new StackId("HDP-2.2.0"));
+    StackId newStackId = new StackId("HDP-2.2.0");
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(newStackId,
+        newStackId.getStackVersion());
+
+    sc.setDesiredRepositoryVersion(repositoryVersion);
+
     StackId stackId = sc.getDesiredStackVersion();
     Assert.assertEquals(new StackId("HDP", "2.2.0"), stackId);
 
@@ -545,23 +555,28 @@ public class ServiceComponentTest {
     sc.setDesiredState(State.INSTALLED);
     Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
 
-    sc.setDesiredStackVersion(new StackId("HDP-2.2.0"));
-    StackId stackId = sc.getDesiredStackVersion();
-    Assert.assertEquals(new StackId("HDP", "2.2.0"), stackId);
-
-    Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackVersion().getStackId());
-
     ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
         cluster.getClusterId(), serviceName, componentName);
 
-    Assert.assertNotNull(serviceComponentDesiredStateEntity);
+    StackDAO stackDAO = injector.getInstance(StackDAO.class);
+    StackEntity stackEntity = stackDAO.find("HDP", "2.2.0");
 
-    RepositoryVersionEntity rve = new RepositoryVersionEntity(
-        serviceComponentDesiredStateEntity.getDesiredStack(), "HDP-2.2.0", "2.2.0.1-1111", "[]");
+    RepositoryVersionEntity rve = new RepositoryVersionEntity(stackEntity, "HDP-2.2.0",
+        "2.2.0.1-1111", "[]");
 
     RepositoryVersionDAO repositoryDAO = injector.getInstance(RepositoryVersionDAO.class);
     repositoryDAO.create(rve);
 
+    sc.setDesiredRepositoryVersion(rve);
+
+    Assert.assertEquals(rve, sc.getDesiredRepositoryVersion());
+
+    Assert.assertEquals(new StackId("HDP", "2.2.0"), sc.getDesiredStackVersion());
+
+    Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackVersion().getStackId());
+
+    Assert.assertNotNull(serviceComponentDesiredStateEntity);
+
     ServiceComponentVersionEntity version = new ServiceComponentVersionEntity();
     version.setState(RepositoryVersionState.CURRENT);
     version.setRepositoryVersion(rve);
@@ -595,23 +610,27 @@ public class ServiceComponentTest {
     sc.setDesiredState(State.INSTALLED);
     Assert.assertEquals(State.INSTALLED, sc.getDesiredState());
 
-    sc.setDesiredStackVersion(new StackId("HDP-2.2.0"));
-    StackId stackId = sc.getDesiredStackVersion();
-    Assert.assertEquals(new StackId("HDP", "2.2.0"), stackId);
-
-    Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackVersion().getStackId());
-
     ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
         cluster.getClusterId(), serviceName, componentName);
 
-    Assert.assertNotNull(serviceComponentDesiredStateEntity);
+    StackDAO stackDAO = injector.getInstance(StackDAO.class);
+    StackEntity stackEntity = stackDAO.find("HDP", "2.2.0");
 
-    RepositoryVersionEntity rve = new RepositoryVersionEntity(
-        serviceComponentDesiredStateEntity.getDesiredStack(), "HDP-2.2.0", "2.2.0.1-1111", "[]");
+    RepositoryVersionEntity rve = new RepositoryVersionEntity(stackEntity, "HDP-2.2.0",
+        "2.2.0.1-1111", "[]");
 
     RepositoryVersionDAO repositoryDAO = injector.getInstance(RepositoryVersionDAO.class);
     repositoryDAO.create(rve);
 
+    sc.setDesiredRepositoryVersion(rve);
+
+    StackId stackId = sc.getDesiredStackVersion();
+    Assert.assertEquals(new StackId("HDP", "2.2.0"), stackId);
+
+    Assert.assertEquals("HDP-2.2.0", sc.getDesiredStackVersion().getStackId());
+
+    Assert.assertNotNull(serviceComponentDesiredStateEntity);
+
     ServiceComponentVersionEntity version = new ServiceComponentVersionEntity();
     version.setState(RepositoryVersionState.CURRENT);
     version.setRepositoryVersion(rve);
@@ -650,7 +669,13 @@ public class ServiceComponentTest {
     String componentName = "NAMENODE";
 
     ServiceComponent component = serviceComponentFactory.createNew(service, componentName);
-    component.setDesiredStackVersion(new StackId("HDP-2.2.0"));
+
+    StackId newStackId = new StackId("HDP-2.2.0");
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(newStackId,
+        newStackId.getStackVersion());
+
+    component.setDesiredRepositoryVersion(repositoryVersion);
+
     service.addServiceComponent(component);
 
     ServiceComponent sc = service.getServiceComponent(componentName);
@@ -658,8 +683,11 @@ public class ServiceComponentTest {
 
     ServiceComponentDesiredStateEntity entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName, componentName);
 
-    helper.getOrCreateRepositoryVersion(component.getDesiredStackVersion(), "2.2.0.1");
-    helper.getOrCreateRepositoryVersion(component.getDesiredStackVersion(), "2.2.0.2");
+    RepositoryVersionEntity repoVersion2201 = helper.getOrCreateRepositoryVersion(
+        component.getDesiredStackVersion(), "2.2.0.1");
+
+    RepositoryVersionEntity repoVersion2202 = helper.getOrCreateRepositoryVersion(
+        component.getDesiredStackVersion(), "2.2.0.2");
 
     addHostToCluster("h1", clusterName);
     addHostToCluster("h2", clusterName);
@@ -671,7 +699,7 @@ public class ServiceComponentTest {
     ServiceComponentHost sch2 = sc.addServiceComponentHost("h2");
 
     // !!! case 1: component desired is UNKNOWN, mix of h-c versions
-    sc.setDesiredVersion(StackVersionListener.UNKNOWN_VERSION);
+    sc.setDesiredRepositoryVersion(repositoryVersion);
     sch1.setVersion("2.2.0.1");
     sch2.setVersion("2.2.0.2");
     sc.updateRepositoryState("2.2.0.2");
@@ -679,15 +707,15 @@ public class ServiceComponentTest {
     assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState());
 
     // !!! case 2: component desired is UNKNOWN, all h-c same version
-    sc.setDesiredVersion(StackVersionListener.UNKNOWN_VERSION);
+    sc.setDesiredRepositoryVersion(repositoryVersion);
     sch1.setVersion("2.2.0.1");
     sch2.setVersion("2.2.0.1");
     sc.updateRepositoryState("2.2.0.1");
     entity = serviceComponentDesiredStateDAO.findByName(cluster.getClusterId(), serviceName, componentName);
-    assertEquals(RepositoryVersionState.CURRENT, entity.getRepositoryState());
+    assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState());
 
     // !!! case 3: component desired is known, any component reports different version
-    sc.setDesiredVersion("2.2.0.1");
+    sc.setDesiredRepositoryVersion(repoVersion2201);
     sch1.setVersion("2.2.0.1");
     sch2.setVersion("2.2.0.2");
     sc.updateRepositoryState("2.2.0.2");
@@ -695,7 +723,7 @@ public class ServiceComponentTest {
     assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState());
 
     // !!! case 4: component desired is known, component reports same as desired, mix of h-c versions
-    sc.setDesiredVersion("2.2.0.1");
+    sc.setDesiredRepositoryVersion(repoVersion2201);
     sch1.setVersion("2.2.0.1");
     sch2.setVersion("2.2.0.2");
     sc.updateRepositoryState("2.2.0.1");
@@ -703,7 +731,7 @@ public class ServiceComponentTest {
     assertEquals(RepositoryVersionState.OUT_OF_SYNC, entity.getRepositoryState());
 
     // !!! case 5: component desired is known, component reports same as desired, all h-c the same
-    sc.setDesiredVersion("2.2.0.1");
+    sc.setDesiredRepositoryVersion(repoVersion2201);
     sch1.setVersion("2.2.0.1");
     sch2.setVersion("2.2.0.1");
     sc.updateRepositoryState("2.2.0.1");

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
index fc12e44..dfe8f59 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceTest.java
@@ -27,12 +27,13 @@ import java.util.Map;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.H2DatabaseCleaner;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.ServiceResponse;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
 import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -51,7 +52,12 @@ public class ServiceTest {
   private ServiceFactory serviceFactory;
   private ServiceComponentFactory serviceComponentFactory;
   private ServiceComponentHostFactory serviceComponentHostFactory;
-  private AmbariMetaInfo metaInfo;
+  private OrmTestHelper ormTestHelper;
+
+  private final String STACK_VERSION = "0.1";
+  private final String REPO_VERSION = "0.1-1234";
+  private final StackId STACK_ID = new StackId("HDP", STACK_VERSION);
+  private RepositoryVersionEntity repositoryVersion;
 
   @Before
   public void setup() throws Exception {
@@ -59,13 +65,14 @@ public class ServiceTest {
     injector.getInstance(GuiceJpaInitializer.class);
     clusters = injector.getInstance(Clusters.class);
     serviceFactory = injector.getInstance(ServiceFactory.class);
-    serviceComponentFactory = injector.getInstance(
-            ServiceComponentFactory.class);
-    serviceComponentHostFactory = injector.getInstance(
-            ServiceComponentHostFactory.class);
-    metaInfo = injector.getInstance(AmbariMetaInfo.class);
+    serviceComponentFactory = injector.getInstance(ServiceComponentFactory.class);
+    serviceComponentHostFactory = injector.getInstance(ServiceComponentHostFactory.class);
+
+    ormTestHelper = injector.getInstance(OrmTestHelper.class);
+    repositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(STACK_ID, REPO_VERSION);
+
     clusterName = "foo";
-    clusters.addCluster(clusterName, new StackId("HDP-0.1"));
+    clusters.addCluster(clusterName, STACK_ID);
     cluster = clusters.getCluster(clusterName);
     Assert.assertNotNull(cluster);
   }
@@ -77,7 +84,7 @@ public class ServiceTest {
 
   @Test
   public void testCanBeRemoved() throws Exception{
-    Service service = cluster.addService("HDFS");
+    Service service = cluster.addService("HDFS", repositoryVersion);
 
     for (State state : State.values()) {
       service.setDesiredState(state);
@@ -119,15 +126,20 @@ public class ServiceTest {
   @Test
   public void testGetAndSetServiceInfo() throws AmbariException {
     String serviceName = "HDFS";
-    Service s = serviceFactory.createNew(cluster, serviceName);
+    Service s = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
     cluster.addService(s);
 
     Service service = cluster.getService(serviceName);
     Assert.assertNotNull(service);
 
-    service.setDesiredStackVersion(new StackId("HDP-1.2.0"));
-    Assert.assertEquals("HDP-1.2.0",
-        service.getDesiredStackVersion().getStackId());
+    StackId desiredStackId = new StackId("HDP-1.2.0");
+    String desiredVersion = "1.2.0-1234";
+
+    RepositoryVersionEntity desiredRepositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(
+        desiredStackId, desiredVersion);
+
+    service.setDesiredRepositoryVersion(desiredRepositoryVersion);
+    Assert.assertEquals(desiredStackId, service.getDesiredStackVersion());
 
     service.setDesiredState(State.INSTALLING);
     Assert.assertEquals(State.INSTALLING, service.getDesiredState());
@@ -140,7 +152,7 @@ public class ServiceTest {
   @Test
   public void testAddGetDeleteServiceComponents() throws AmbariException {
     String serviceName = "HDFS";
-    Service s = serviceFactory.createNew(cluster, serviceName);
+    Service s = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
     cluster.addService(s);
 
     Service service = cluster.getService(serviceName);
@@ -223,30 +235,30 @@ public class ServiceTest {
   @Test
   public void testConvertToResponse() throws AmbariException {
     String serviceName = "HDFS";
-    Service s = serviceFactory.createNew(cluster, serviceName);
+    Service s = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
     cluster.addService(s);
     Service service = cluster.getService(serviceName);
     Assert.assertNotNull(service);
 
     ServiceResponse r = s.convertToResponse();
     Assert.assertEquals(s.getName(), r.getServiceName());
-    Assert.assertEquals(s.getCluster().getClusterName(),
-        r.getClusterName());
-    Assert.assertEquals(s.getDesiredStackVersion().getStackId(),
-        r.getDesiredStackVersion());
-    Assert.assertEquals(s.getDesiredState().toString(),
-        r.getDesiredState());
-
-    service.setDesiredStackVersion(new StackId("HDP-1.2.0"));
+    Assert.assertEquals(s.getCluster().getClusterName(), r.getClusterName());
+    Assert.assertEquals(s.getDesiredStackVersion().getStackId(), r.getDesiredStackVersion());
+    Assert.assertEquals(s.getDesiredState().toString(), r.getDesiredState());
+
+    StackId desiredStackId = new StackId("HDP-1.2.0");
+    String desiredVersion = "1.2.0-1234";
+
+    RepositoryVersionEntity desiredRepositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(
+        desiredStackId, desiredVersion);
+
+    service.setDesiredRepositoryVersion(desiredRepositoryVersion);
     service.setDesiredState(State.INSTALLING);
     r = s.convertToResponse();
     Assert.assertEquals(s.getName(), r.getServiceName());
-    Assert.assertEquals(s.getCluster().getClusterName(),
-        r.getClusterName());
-    Assert.assertEquals(s.getDesiredStackVersion().getStackId(),
-        r.getDesiredStackVersion());
-    Assert.assertEquals(s.getDesiredState().toString(),
-        r.getDesiredState());
+    Assert.assertEquals(s.getCluster().getClusterName(), r.getClusterName());
+    Assert.assertEquals(s.getDesiredStackVersion().getStackId(), r.getDesiredStackVersion());
+    Assert.assertEquals(s.getDesiredState().toString(), r.getDesiredState());
     // FIXME add checks for configs
 
     StringBuilder sb = new StringBuilder();
@@ -259,7 +271,7 @@ public class ServiceTest {
   @Test
   public void testServiceMaintenance() throws Exception {
     String serviceName = "HDFS";
-    Service s = serviceFactory.createNew(cluster, serviceName);
+    Service s = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
     cluster.addService(s);
 
     Service service = cluster.getService(serviceName);
@@ -282,7 +294,7 @@ public class ServiceTest {
   @Test
   public void testSecurityState() throws Exception {
     String serviceName = "HDFS";
-    Service s = serviceFactory.createNew(cluster, serviceName);
+    Service s = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
     cluster.addService(s);
 
     Service service = cluster.getService(serviceName);


[7/9] ambari git commit: AMBARI-20894 - Setting Desired Stack Ids to Correct Values During Service and Patch Upgrades (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index f1e93ac..a0d7352 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -681,7 +681,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
           LOG.debug("Updating live stack version during INSTALL event"
               + ", new stack version=" + e.getStackId());
         }
-        impl.setStackVersion(new StackId(e.getStackId()));
       }
     }
   }
@@ -782,7 +781,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     stateEntity.setHostEntity(hostEntity);
     stateEntity.setCurrentState(stateMachine.getCurrentState());
     stateEntity.setUpgradeState(UpgradeState.NONE);
-    stateEntity.setCurrentStack(stackEntity);
 
     HostComponentDesiredStateEntity desiredStateEntity = new HostComponentDesiredStateEntity();
     desiredStateEntity.setClusterId(serviceComponent.getClusterId());
@@ -790,7 +788,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     desiredStateEntity.setServiceName(serviceComponent.getServiceName());
     desiredStateEntity.setHostEntity(hostEntity);
     desiredStateEntity.setDesiredState(State.INIT);
-    desiredStateEntity.setDesiredStack(stackEntity);
 
     if(!serviceComponent.isMasterComponent() && !serviceComponent.isClientComponent()) {
       desiredStateEntity.setAdminState(HostComponentAdminState.INSERVICE);
@@ -1120,36 +1117,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   }
 
   @Override
-  public StackId getStackVersion() {
-    HostComponentStateEntity schStateEntity = getStateEntity();
-    return getStackVersionFromSCHStateEntity(schStateEntity);
-  }
-
-  private StackId getStackVersionFromSCHStateEntity(HostComponentStateEntity schStateEntity) {
-    if (schStateEntity == null) {
-      return new StackId();
-    }
-
-    StackEntity currentStackEntity = schStateEntity.getCurrentStack();
-    return new StackId(currentStackEntity.getStackName(), currentStackEntity.getStackVersion());
-  }
-
-  @Override
-  public void setStackVersion(StackId stackId) {
-    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
-
-    HostComponentStateEntity stateEntity = getStateEntity();
-    if (stateEntity != null) {
-      stateEntity.setCurrentStack(stackEntity);
-      stateEntity = hostComponentStateDAO.merge(stateEntity);
-    } else {
-      LOG.warn("Setting a member on an entity object that may have been "
-          + "previously deleted, serviceName = " + getServiceName() + ", " + "componentName = "
-          + getServiceComponentName() + ", " + "hostName = " + getHostName());
-    }
-  }
-
-  @Override
   public State getDesiredState() {
     HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
     if (desiredStateEntity != null) {
@@ -1180,38 +1147,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   }
 
   @Override
-  public StackId getDesiredStackVersion() {
-    HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
-    return getDesiredStackVersionFromHostComponentDesiredStateEntity(desiredStateEntity);
-  }
-
-  private StackId getDesiredStackVersionFromHostComponentDesiredStateEntity(HostComponentDesiredStateEntity desiredStateEntity) {
-    if (desiredStateEntity != null) {
-      StackEntity desiredStackEntity = desiredStateEntity.getDesiredStack();
-      return new StackId(desiredStackEntity.getStackName(), desiredStackEntity.getStackVersion());
-    } else {
-      LOG.warn("Trying to fetch a member from an entity object that may "
-              + "have been previously deleted, serviceName = " + getServiceName() + ", "
-              + "componentName = " + getServiceComponentName() + ", " + "hostName = " + getHostName());
-    }
-    return null;
-  }
-
-  @Override
-  public void setDesiredStackVersion(StackId stackId) {
-    LOG.debug("Set DesiredStackVersion on serviceName = {} componentName = {} hostName = {} to {}",
-        getServiceName(), getServiceComponentName(), getHostName(), stackId);
-
-    HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
-    if (desiredStateEntity != null) {
-      StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
-
-      desiredStateEntity.setDesiredStack(stackEntity);
-      hostComponentDesiredStateDAO.merge(desiredStateEntity);
-    }
-  }
-
-  @Override
   public HostComponentAdminState getComponentAdminState() {
     HostComponentDesiredStateEntity desiredStateEntity = getDesiredStateEntity();
     return getComponentAdminStateFromDesiredStateEntity(desiredStateEntity);
@@ -1250,14 +1185,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   public ServiceComponentHostResponse convertToResponse(Map<String, DesiredConfig> desiredConfigs) {
     HostComponentStateEntity hostComponentStateEntity = getStateEntity();
     HostEntity hostEntity = hostComponentStateEntity.getHostEntity();
-    if (null == hostComponentStateEntity) {
-      LOG.warn(
-          "Could not convert ServiceComponentHostResponse to a response. It's possible that Host {} was deleted.",
-          getHostName());
-      return null;
-    }
 
-    StackId stackVersion = getStackVersionFromSCHStateEntity(hostComponentStateEntity);
     HostComponentDesiredStateEntity hostComponentDesiredStateEntity = getDesiredStateEntity();
 
     String clusterName = serviceComponent.getClusterName();
@@ -1266,14 +1194,14 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     String hostName = getHostName();
     String publicHostName = hostEntity.getPublicHostName();
     String state = getState().toString();
-    String stackId = stackVersion.getStackId();
     String desiredState = (hostComponentDesiredStateEntity == null) ? null : hostComponentDesiredStateEntity.getDesiredState().toString();
-    String desiredStackId = getDesiredStackVersionFromHostComponentDesiredStateEntity(hostComponentDesiredStateEntity).getStackId();
+    String desiredStackId = serviceComponent.getDesiredStackVersion().getStackId();
     HostComponentAdminState componentAdminState = getComponentAdminStateFromDesiredStateEntity(hostComponentDesiredStateEntity);
     UpgradeState upgradeState = hostComponentStateEntity.getUpgradeState();
 
     String displayName = null;
     try {
+      StackId stackVersion = serviceComponent.getDesiredStackVersion();
       ComponentInfo compInfo = ambariMetaInfo.getComponent(stackVersion.getStackName(),
               stackVersion.getStackVersion(), serviceName, serviceComponentName);
       displayName = compInfo.getDisplayName();
@@ -1281,9 +1209,15 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
       displayName = serviceComponentName;
     }
 
+    String desiredRepositoryVersion = null;
+    RepositoryVersionEntity repositoryVersion = serviceComponent.getDesiredRepositoryVersion();
+    if (null != repositoryVersion) {
+      desiredRepositoryVersion = repositoryVersion.getVersion();
+    }
+
     ServiceComponentHostResponse r = new ServiceComponentHostResponse(clusterName, serviceName,
-        serviceComponentName, displayName, hostName, publicHostName, state, stackId, 
-        desiredState, desiredStackId, componentAdminState);
+        serviceComponentName, displayName, hostName, publicHostName, state, getVersion(),
+        desiredState, desiredStackId, desiredRepositoryVersion, componentAdminState);
 
     r.setActualConfigs(actualConfigs);
     r.setUpgradeState(upgradeState);
@@ -1312,11 +1246,11 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     .append(", serviceName=")
     .append(serviceComponent.getServiceName())
     .append(", desiredStackVersion=")
-    .append(getDesiredStackVersion())
+    .append(serviceComponent.getDesiredStackVersion())
     .append(", desiredState=")
     .append(getDesiredState())
-    .append(", stackVersion=")
-    .append(getStackVersion())
+    .append(", version=")
+    .append(getVersion())
     .append(", state=")
     .append(getState())
     .append(", securityState=")
@@ -1377,7 +1311,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     // completed, but only if it was persisted
     if (fireRemovalEvent) {
       long clusterId = getClusterId();
-      StackId stackId = getStackVersion();
+      StackId stackId = serviceComponent.getDesiredStackVersion();
       String stackVersion = stackId.getStackVersion();
       String stackName = stackId.getStackName();
       String serviceName = getServiceName();
@@ -1614,4 +1548,11 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     return hostComponentStateDAO.findById(hostComponentStateId);
   }
 
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public ServiceComponent getServiceComponent() {
+    return serviceComponent;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 5e0d707..4a1e61f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -72,6 +72,7 @@ import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.RetryHelper;
 import org.slf4j.Logger;
@@ -167,9 +168,10 @@ public class AmbariContext {
 
   public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType, String repoVersion) {
     Stack stack = topology.getBlueprint().getStack();
+    StackId stackId = new StackId(stack.getName(), stack.getVersion());
 
     createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType, repoVersion);
-    createAmbariServiceAndComponentResources(topology, clusterName);
+    createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion);
   }
 
   public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType, String repoVersion) {
@@ -196,7 +198,8 @@ public class AmbariContext {
     }
   }
 
-  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName) {
+  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
+      StackId stackId, String repositoryVersion) {
     Collection<String> services = topology.getBlueprint().getServices();
 
     try {
@@ -209,7 +212,9 @@ public class AmbariContext {
     Set<ServiceComponentRequest> componentRequests = new HashSet<>();
     for (String service : services) {
       String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
-      serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
+      serviceRequests.add(new ServiceRequest(clusterName, service, null, stackId.getStackId(),
+          repositoryVersion, credentialStoreEnabled));
+
       for (String component : topology.getBlueprint().getComponents(service)) {
         String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component);
         componentRequests.add(new ServiceComponentRequest(clusterName, service, component, null, recoveryEnabled));

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index 6c59784..cb12959 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -102,8 +102,6 @@ public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
   protected DBAccessor dbAccessor;
   @Inject
   protected Configuration configuration;
-  @Inject
-  protected StackUpgradeUtil stackUpgradeUtil;
 
   protected Injector injector;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java
deleted file mode 100644
index 0aab0b5..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeHelper.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.upgrade;
-
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.InputMismatchException;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.server.controller.ControllerModule;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.dao.MetainfoDAO;
-import org.apache.ambari.server.orm.entities.MetainfoEntity;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.gson.Gson;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.PersistService;
-import com.google.inject.persist.Transactional;
-
-public class StackUpgradeHelper {
-  private static final Logger LOG = LoggerFactory.getLogger
-    (StackUpgradeHelper.class);
-
-  private static final String STACK_ID_UPDATE_ACTION = "updateStackId";
-  private static final String METAINFO_UPDATE_ACTION = "updateMetaInfo";
-  private static final String STACK_ID_STACK_NAME_KEY = "stackName";
-
-  @Inject
-  private DBAccessor dbAccessor;
-  @Inject
-  private PersistService persistService;
-  @Inject
-  private MetainfoDAO metainfoDAO;
-  @Inject
-  private StackUpgradeUtil stackUpgradeUtil;
-
-  private void startPersistenceService() {
-    persistService.start();
-  }
-
-  private void stopPersistenceService() {
-    persistService.stop();
-  }
-
-  /**
-   * Add key value to the metainfo table.
-   * @param data
-   * @throws SQLException
-   */
-  @Transactional
-  void updateMetaInfo(Map<String, String> data) throws SQLException {
-    if (data != null && !data.isEmpty()) {
-      for (Map.Entry<String, String> entry : data.entrySet()) {
-        MetainfoEntity metainfoEntity = metainfoDAO.findByKey(entry.getKey());
-        if (metainfoEntity != null) {
-          metainfoEntity.setMetainfoName(entry.getKey());
-          metainfoEntity.setMetainfoValue(entry.getValue());
-          metainfoDAO.merge(metainfoEntity);
-        } else {
-          metainfoEntity = new MetainfoEntity();
-          metainfoEntity.setMetainfoName(entry.getKey());
-          metainfoEntity.setMetainfoValue(entry.getValue());
-          metainfoDAO.create(metainfoEntity);
-        }
-      }
-    }
-  }
-
-  /**
-   * Change the stack id in the Ambari DB.
-   * @param stackInfo
-   * @throws SQLException
-   */
-  public void updateStackVersion(Map<String, String> stackInfo) throws Exception {
-    if (stackInfo == null || stackInfo.isEmpty()) {
-      throw new IllegalArgumentException("Empty stack id. " + stackInfo);
-    }
-    
-    String repoUrl = stackInfo.remove("repo_url");
-    String repoUrlOs = stackInfo.remove("repo_url_os");
-    String mirrorList = stackInfo.remove("mirrors_list");
-    
-    Iterator<Map.Entry<String, String>> stackIdEntry = stackInfo.entrySet().iterator();
-    Map.Entry<String, String> stackEntry = stackIdEntry.next();
-
-    String stackName = stackEntry.getKey();
-    String stackVersion = stackEntry.getValue();
-
-    LOG.info("Updating stack id, stackName = " + stackName + ", " +
-      "stackVersion = "+ stackVersion);
-
-    stackUpgradeUtil.updateStackDetails(stackName, stackVersion);
-    
-    if (null != repoUrl) {
-      stackUpgradeUtil.updateLocalRepo(stackName, stackVersion, repoUrl, repoUrlOs, mirrorList);
-    }
-
-    dbAccessor.updateTable("hostcomponentstate", "current_state", "INSTALLED", "where current_state = 'UPGRADING'");
-  }
-
-  private List<String> getValidActions() {
-    return new ArrayList<String>() {{
-      add(STACK_ID_UPDATE_ACTION);
-      add(METAINFO_UPDATE_ACTION);
-    }};
-  }
-
-  /**
-   * Support changes need to support upgrade of Stack
-   * @param args Simple key value json map
-   */
-  public static void main(String[] args) {
-    try {
-      if (args.length < 2) {
-        throw new InputMismatchException("Need to provide action, " +
-          "stack name and stack version.");
-      }
-
-      String action = args[0];
-      String valueMap = args[1];
-
-      Injector injector = Guice.createInjector(new ControllerModule());
-      StackUpgradeHelper stackUpgradeHelper = injector.getInstance(StackUpgradeHelper.class);
-      Gson gson = injector.getInstance(Gson.class);
-
-      if (!stackUpgradeHelper.getValidActions().contains(action)) {
-        throw new IllegalArgumentException("Unsupported action. Allowed " +
-          "actions: " + stackUpgradeHelper.getValidActions());
-      }
-
-      
-      stackUpgradeHelper.startPersistenceService();
-      Map<String, String> values = gson.<Map<String, String>>fromJson(valueMap, Map.class);
-
-      if (action.equals(STACK_ID_UPDATE_ACTION)) {
-        stackUpgradeHelper.updateStackVersion(values);
-        
-      } else if (action.equals(METAINFO_UPDATE_ACTION)) {
-
-        stackUpgradeHelper.updateMetaInfo(values);
-      }
-
-      stackUpgradeHelper.stopPersistenceService();
-
-    } catch (Throwable t) {
-      LOG.error("Caught exception on upgrade. Exiting...", t);
-      System.exit(1);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
deleted file mode 100644
index b258aa8..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/StackUpgradeUtil.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.upgrade;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.ClusterStateDAO;
-import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
-import org.apache.ambari.server.orm.dao.MetainfoDAO;
-import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ClusterStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.MetainfoEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.state.OperatingSystemInfo;
-import org.apache.ambari.server.state.stack.OsFamily;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.Transactional;
-
-public class StackUpgradeUtil {
-  @Inject
-  private Injector injector;
-
-  @Transactional
-  public void updateStackDetails(String stackName, String stackVersion) {
-    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    List<Long> clusterIds = new ArrayList<>();
-
-    StackEntity stackEntity = stackDAO.find(stackName, stackVersion);
-
-    List<ClusterEntity> clusterEntities = clusterDAO.findAll();
-    if (clusterEntities != null && !clusterEntities.isEmpty()) {
-      for (ClusterEntity entity : clusterEntities) {
-        clusterIds.add(entity.getClusterId());
-        entity.setDesiredStack(stackEntity);
-        clusterDAO.merge(entity);
-      }
-    }
-
-    ClusterStateDAO clusterStateDAO = injector.getInstance(ClusterStateDAO.class);
-
-    for (Long clusterId : clusterIds) {
-      ClusterStateEntity clusterStateEntity = clusterStateDAO.findByPK(clusterId);
-      clusterStateEntity.setCurrentStack(stackEntity);
-      clusterStateDAO.merge(clusterStateEntity);
-    }
-
-    HostComponentStateDAO hostComponentStateDAO = injector.getInstance
-      (HostComponentStateDAO.class);
-    List<HostComponentStateEntity> hcEntities = hostComponentStateDAO.findAll();
-
-    if (hcEntities != null) {
-      for (HostComponentStateEntity hc : hcEntities) {
-        hc.setCurrentStack(stackEntity);
-        hostComponentStateDAO.merge(hc);
-      }
-    }
-
-    HostComponentDesiredStateDAO hostComponentDesiredStateDAO =
-      injector.getInstance(HostComponentDesiredStateDAO.class);
-
-    List<HostComponentDesiredStateEntity> hcdEntities = hostComponentDesiredStateDAO.findAll();
-
-    if (hcdEntities != null) {
-      for (HostComponentDesiredStateEntity hcd : hcdEntities) {
-        hcd.setDesiredStack(stackEntity);
-        hostComponentDesiredStateDAO.merge(hcd);
-      }
-    }
-
-    ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO =
-      injector.getInstance(ServiceComponentDesiredStateDAO.class);
-
-    List<ServiceComponentDesiredStateEntity> scdEntities =
-      serviceComponentDesiredStateDAO.findAll();
-
-    if (scdEntities != null) {
-      for (ServiceComponentDesiredStateEntity scd : scdEntities) {
-        scd.setDesiredStack(stackEntity);
-        serviceComponentDesiredStateDAO.merge(scd);
-      }
-    }
-
-    ServiceDesiredStateDAO serviceDesiredStateDAO = injector.getInstance(ServiceDesiredStateDAO.class);
-
-    List<ServiceDesiredStateEntity> sdEntities = serviceDesiredStateDAO.findAll();
-
-    if (sdEntities != null) {
-      for (ServiceDesiredStateEntity sd : sdEntities) {
-        sd.setDesiredStack(stackEntity);
-        serviceDesiredStateDAO.merge(sd);
-      }
-    }
-  }
-
-  /**
-   * @param stackName
-   * @param stackVersion
-   * @param repoUrl
-   * @param repoUrlOs
-   * @param mirrorList
-   * @throws Exception
-   */
-  public void updateLocalRepo(String stackName, String stackVersion,
-                              String repoUrl, String repoUrlOs, String mirrorList) throws Exception {
-
-    if (null == repoUrl ||
-        repoUrl.isEmpty() ||
-        !repoUrl.startsWith("http")) {
-      return;
-    }
-
-    String[] oses = new String[0];
-
-    if (null != repoUrlOs) {
-      oses = repoUrlOs.split(",");
-    }
-
-    AmbariMetaInfo ami = injector.getInstance(AmbariMetaInfo.class);
-    MetainfoDAO metaDao = injector.getInstance(MetainfoDAO.class);
-    OsFamily os_family = injector.getInstance(OsFamily.class);
-
-    String stackRepoId = stackName + "-" + stackVersion;
-
-    if (0 == oses.length) {
-      // do them all
-      for (OperatingSystemInfo osi : ami.getOperatingSystems(stackName, stackVersion)) {
-        ami.updateRepo(stackName, stackVersion, osi.getOsType(),
-            stackRepoId, repoUrl, mirrorList);
-      }
-
-    } else {
-      for (String os : oses) {
-
-        String family = os_family.find(os);
-        if (null != family) {
-          String key = ami.generateRepoMetaKey(stackName, stackVersion, os,
-              stackRepoId, AmbariMetaInfo.REPOSITORY_XML_PROPERTY_BASEURL);
-
-          String familyKey = ami.generateRepoMetaKey(stackName, stackVersion, family,
-              stackRepoId, AmbariMetaInfo.REPOSITORY_XML_PROPERTY_BASEURL);
-
-          // need to use (for example) redhat6 if the os is centos6
-          MetainfoEntity entity = metaDao.findByKey(key);
-          if (null == entity) {
-            entity = new MetainfoEntity();
-            entity.setMetainfoName(key);
-            entity.setMetainfoValue(repoUrl);
-            metaDao.merge(entity);
-          } else {
-            entity.setMetainfoValue(repoUrl);
-            metaDao.merge(entity);
-          }
-
-          entity = metaDao.findByKey(familyKey);
-          if (null == entity) {
-            entity = new MetainfoEntity();
-            entity.setMetainfoName(familyKey);
-            entity.setMetainfoValue(repoUrl);
-            metaDao.merge(entity);
-          } else {
-            entity.setMetainfoValue(repoUrl);
-            metaDao.merge(entity);
-          }
-        }
-      }
-    }
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index 737be6a..deb4993 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -37,7 +37,7 @@ from ambari_server.dbConfiguration import DATABASE_NAMES, LINUX_DBMS_KEYS_LIST
 from ambari_server.serverConfiguration import configDefaults, get_ambari_properties, PID_NAME
 from ambari_server.serverUtils import is_server_runing, refresh_stack_hash, wait_for_server_to_stop
 from ambari_server.serverSetup import reset, setup, setup_jce_policy
-from ambari_server.serverUpgrade import upgrade, upgrade_stack, set_current
+from ambari_server.serverUpgrade import upgrade, set_current
 from ambari_server.setupHttps import setup_https, setup_truststore
 from ambari_server.setupMpacks import install_mpack, uninstall_mpack, upgrade_mpack, STACK_DEFINITIONS_RESOURCE_NAME, \
   SERVICE_DEFINITIONS_RESOURCE_NAME, MPACKS_RESOURCE_NAME
@@ -50,8 +50,8 @@ from ambari_server.enableStack import enable_stack_version
 from ambari_server.setupActions import BACKUP_ACTION, LDAP_SETUP_ACTION, LDAP_SYNC_ACTION, PSTART_ACTION, \
   REFRESH_STACK_HASH_ACTION, RESET_ACTION, RESTORE_ACTION, UPDATE_HOST_NAMES_ACTION, CHECK_DATABASE_ACTION, \
   SETUP_ACTION, SETUP_SECURITY_ACTION,START_ACTION, STATUS_ACTION, STOP_ACTION, RESTART_ACTION, UPGRADE_ACTION, \
-  UPGRADE_STACK_ACTION, SETUP_JCE_ACTION, SET_CURRENT_ACTION, START_ACTION, STATUS_ACTION, STOP_ACTION, UPGRADE_ACTION, \
-  UPGRADE_STACK_ACTION, SETUP_JCE_ACTION, SET_CURRENT_ACTION, ENABLE_STACK_ACTION, SETUP_SSO_ACTION, \
+  SETUP_JCE_ACTION, SET_CURRENT_ACTION, START_ACTION, STATUS_ACTION, STOP_ACTION, UPGRADE_ACTION, \
+  SETUP_JCE_ACTION, SET_CURRENT_ACTION, ENABLE_STACK_ACTION, SETUP_SSO_ACTION, \
   DB_CLEANUP_ACTION, INSTALL_MPACK_ACTION, UNINSTALL_MPACK_ACTION, UPGRADE_MPACK_ACTION, PAM_SETUP_ACTION, KERBEROS_SETUP_ACTION
 from ambari_server.setupSecurity import setup_ldap, sync_ldap, setup_master_key, setup_ambari_krb5_jaas, setup_pam
 from ambari_server.userInput import get_validated_string_input
@@ -753,7 +753,6 @@ def create_user_action_map(args, options):
         RESET_ACTION: UserAction(reset, options),
         STATUS_ACTION: UserAction(status, options),
         UPGRADE_ACTION: UserAction(upgrade, options),
-        UPGRADE_STACK_ACTION: UserActionPossibleArgs(upgrade_stack, [2, 4], args),
         LDAP_SETUP_ACTION: UserAction(setup_ldap, options),
         LDAP_SYNC_ACTION: UserAction(sync_ldap, options),
         SET_CURRENT_ACTION: UserAction(set_current, options),

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/python/ambari_server/serverUpgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverUpgrade.py b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
index 6f17900..1bc7682 100644
--- a/ambari-server/src/main/python/ambari_server/serverUpgrade.py
+++ b/ambari-server/src/main/python/ambari_server/serverUpgrade.py
@@ -60,10 +60,6 @@ SCHEMA_UPGRADE_HELPER_CMD = "{0} -cp {1} " + \
                             "org.apache.ambari.server.upgrade.SchemaUpgradeHelper" + \
                             " > " + configDefaults.SERVER_OUT_FILE + " 2>&1"
 
-STACK_UPGRADE_HELPER_CMD = "{0} -cp {1} " + \
-                           "org.apache.ambari.server.upgrade.StackUpgradeHelper" + \
-                           " {2} {3} > " + configDefaults.SERVER_OUT_FILE + " 2>&1"
-
 SCHEMA_UPGRADE_HELPER_CMD_DEBUG = "{0} " \
                          "-server -XX:NewRatio=2 " \
                          "-XX:+UseConcMarkSweepGC " + \
@@ -77,52 +73,6 @@ SCHEMA_UPGRADE_DEBUG = False
 
 SUSPEND_START_MODE = False
 
-#
-# Stack upgrade
-#
-
-def upgrade_stack(args):
-  logger.info("Upgrade stack.")
-  if not is_root():
-    err = 'Ambari-server upgradestack should be run with ' \
-          'root-level privileges'
-    raise FatalException(4, err)
-
-  check_database_name_property()
-
-  try:
-    stack_id = args[1]
-  except IndexError:
-    #stack_id is mandatory
-    raise FatalException("Invalid number of stack upgrade arguments")
-
-  try:
-    repo_url = args[2]
-  except IndexError:
-    repo_url = None
-
-  try:
-    repo_url_os = args[3]
-  except IndexError:
-    repo_url_os = None
-
-  parser = optparse.OptionParser()
-  parser.add_option("-d", type="int", dest="database_index")
-
-  db = get_ambari_properties()[JDBC_DATABASE_PROPERTY]
-
-  idx = LINUX_DBMS_KEYS_LIST.index(db)
-
-  (options, opt_args) = parser.parse_args(["-d {0}".format(idx)])
-
-  stack_name, stack_version = stack_id.split(STACK_NAME_VER_SEP)
-  retcode = run_stack_upgrade(options, stack_name, stack_version, repo_url, repo_url_os)
-
-  if not retcode == 0:
-    raise FatalException(retcode, 'Stack upgrade failed.')
-
-  return retcode
-
 def load_stack_values(version, filename):
   import xml.etree.ElementTree as ET
   values = {}
@@ -143,52 +93,6 @@ def load_stack_values(version, filename):
 
   return values
 
-
-def run_stack_upgrade(args, stackName, stackVersion, repo_url, repo_url_os):
-  jdk_path = get_java_exe_path()
-  if jdk_path is None:
-    print_error_msg("No JDK found, please run the \"setup\" "
-                    "command to install a JDK automatically or install any "
-                    "JDK manually to " + configDefaults.JDK_INSTALL_DIR)
-    return 1
-  stackId = {}
-  stackId[stackName] = stackVersion
-  if repo_url is not None:
-    stackId['repo_url'] = repo_url
-  if repo_url_os is not None:
-    stackId['repo_url_os'] = repo_url_os
-
-  serverClassPath = ServerClassPath(get_ambari_properties(), args)
-  command = STACK_UPGRADE_HELPER_CMD.format(jdk_path, serverClassPath.get_full_ambari_classpath_escaped_for_shell(),
-                                            "updateStackId",
-                                            "'" + json.dumps(stackId) + "'")
-  (retcode, stdout, stderr) = run_os_command(command)
-  print_info_msg("Return code from stack upgrade command, retcode = {0}".format(str(retcode)))
-  if retcode > 0:
-    print_error_msg("Error executing stack upgrade, please check the server logs.")
-  return retcode
-
-def run_metainfo_upgrade(args, keyValueMap=None):
-  jdk_path = get_java_exe_path()
-  if jdk_path is None:
-    print_error_msg("No JDK found, please run the \"setup\" "
-                    "command to install a JDK automatically or install any "
-                    "JDK manually to " + configDefaults.JDK_INSTALL_DIR)
-
-  retcode = 1
-  if keyValueMap:
-    serverClassPath = ServerClassPath(get_ambari_properties(), args)
-    command = STACK_UPGRADE_HELPER_CMD.format(jdk_path, serverClassPath.get_full_ambari_classpath_escaped_for_shell(),
-                                              'updateMetaInfo',
-                                              "'" + json.dumps(keyValueMap) + "'")
-    (retcode, stdout, stderr) = run_os_command(command)
-    print_info_msg("Return code from stack upgrade command, retcode = {0}".format(str(retcode)))
-    if retcode > 0:
-      print_error_msg("Error executing metainfo upgrade, please check the server logs.")
-
-  return retcode
-
-
 #
 # Repo upgrade
 #
@@ -203,47 +107,6 @@ def change_objects_owner(args):
 
   dbms.change_db_files_owner()
 
-def upgrade_local_repo(args):
-  properties = get_ambari_properties()
-  if properties == -1:
-    print_error_msg("Error getting ambari properties")
-    return -1
-
-  stack_location = get_stack_location(properties)
-  stack_root_local = os.path.join(stack_location, "HDPLocal")
-  if not os.path.exists(stack_root_local):
-    print_info_msg("HDPLocal stack directory does not exist, skipping")
-    return
-
-  stack_root = os.path.join(stack_location, "HDP")
-  if not os.path.exists(stack_root):
-    print_info_msg("HDP stack directory does not exist, skipping")
-    return
-
-  for stack_version_local in os.listdir(stack_root_local):
-    repo_file_local = os.path.join(stack_root_local, stack_version_local, "repos", "repoinfo.xml.rpmsave")
-    if not os.path.exists(repo_file_local):
-      repo_file_local = os.path.join(stack_root_local, stack_version_local, "repos", "repoinfo.xml")
-
-    repo_file = os.path.join(stack_root, stack_version_local, "repos", "repoinfo.xml")
-
-    print_info_msg("Local repo file: {0}".format(repo_file_local))
-    print_info_msg("Repo file: {0}".format(repo_file_local))
-
-    metainfo_update_items = {}
-
-    if os.path.exists(repo_file_local) and os.path.exists(repo_file):
-      local_values = load_stack_values(stack_version_local, repo_file_local)
-      repo_values = load_stack_values(stack_version_local, repo_file)
-      for k, v in local_values.iteritems():
-        if repo_values.has_key(k):
-          local_url = local_values[k]
-          repo_url = repo_values[k]
-          if repo_url != local_url:
-            metainfo_update_items[k] = local_url
-
-    run_metainfo_upgrade(args, metainfo_update_items)
-
 #
 # Schema upgrade
 #
@@ -384,9 +247,6 @@ def upgrade(args):
   else:
     adjust_directory_permissions(user)
 
-  # local repo
-  upgrade_local_repo(args)
-
   # create jdbc symlinks if jdbc drivers are available in resources
   check_jdbc_drivers(args)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/python/ambari_server/setupActions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupActions.py b/ambari-server/src/main/python/ambari_server/setupActions.py
index 358bfc9..758e42f 100644
--- a/ambari-server/src/main/python/ambari_server/setupActions.py
+++ b/ambari-server/src/main/python/ambari_server/setupActions.py
@@ -26,7 +26,6 @@ STOP_ACTION = "stop"
 RESTART_ACTION = "restart"
 RESET_ACTION = "reset"
 UPGRADE_ACTION = "upgrade"
-UPGRADE_STACK_ACTION = "upgradestack"
 REFRESH_STACK_HASH_ACTION = "refresh-stack-hash"
 STATUS_ACTION = "status"
 SETUP_HTTPS_ACTION = "setup-https"

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
index b241dc2..e25cfa3 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
@@ -178,22 +178,20 @@ CREATE TABLE servicecomponentdesiredstate (
   id BIGINT NOT NULL,
   component_name VARCHAR(255) NOT NULL,
   cluster_id BIGINT NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
+  desired_repo_version_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
-  desired_version VARCHAR(255) NOT NULL DEFAULT 'UNKNOWN',
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   repo_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
-  CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_scds_desired_repo_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
 CREATE TABLE hostcomponentdesiredstate (
   id BIGINT NOT NULL,
   cluster_id BIGINT NOT NULL,
   component_name VARCHAR(255) NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   host_id BIGINT NOT NULL,
   service_name VARCHAR(255) NOT NULL,
@@ -204,7 +202,6 @@ CREATE TABLE hostcomponentdesiredstate (
   CONSTRAINT PK_hostcomponentdesiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_hcdesiredstate_name UNIQUE (component_name, service_name, host_id, cluster_id),
   CONSTRAINT FK_hcdesiredstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
-  CONSTRAINT FK_hcds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id));
 
 
@@ -213,14 +210,12 @@ CREATE TABLE hostcomponentstate (
   cluster_id BIGINT NOT NULL,
   component_name VARCHAR(255) NOT NULL,
   version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN',
-  current_stack_id BIGINT NOT NULL,
   current_state VARCHAR(255) NOT NULL,
   host_id BIGINT NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE',
   security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
   CONSTRAINT pk_hostcomponentstate PRIMARY KEY (id),
-  CONSTRAINT FK_hcs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id));
 
@@ -250,14 +245,14 @@ CREATE TABLE host_version (
 CREATE TABLE servicedesiredstate (
   cluster_id BIGINT NOT NULL,
   desired_host_role_mapping INTEGER NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
+  desired_repo_version_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   maintenance_state VARCHAR(32) NOT NULL,
   security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
   credential_store_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT PK_servicedesiredstate PRIMARY KEY (cluster_id, service_name),
-  CONSTRAINT FK_sds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_repo_version_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
 CREATE TABLE adminprincipaltype (

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index 670bf17..8e1f51f 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -198,22 +198,20 @@ CREATE TABLE servicecomponentdesiredstate (
   id BIGINT NOT NULL,
   component_name VARCHAR(100) NOT NULL,
   cluster_id BIGINT NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
-  desired_version VARCHAR(255) NOT NULL DEFAULT 'UNKNOWN',
+  desired_repo_version_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(100) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   repo_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
-  CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_scds_desired_repo_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
 CREATE TABLE hostcomponentdesiredstate (
   id BIGINT NOT NULL,
   cluster_id BIGINT NOT NULL,
   component_name VARCHAR(100) NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   host_id BIGINT NOT NULL,
   service_name VARCHAR(100) NOT NULL,
@@ -224,7 +222,6 @@ CREATE TABLE hostcomponentdesiredstate (
   CONSTRAINT PK_hostcomponentdesiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_hcdesiredstate_name UNIQUE (component_name, service_name, host_id, cluster_id),
   CONSTRAINT FK_hcdesiredstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
-  CONSTRAINT FK_hcds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id));
 
 
@@ -233,14 +230,12 @@ CREATE TABLE hostcomponentstate (
   cluster_id BIGINT NOT NULL,
   component_name VARCHAR(100) NOT NULL,
   version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN',
-  current_stack_id BIGINT NOT NULL,
   current_state VARCHAR(255) NOT NULL,
   host_id BIGINT NOT NULL,
   service_name VARCHAR(100) NOT NULL,
   upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE',
   security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
   CONSTRAINT pk_hostcomponentstate PRIMARY KEY (id),
-  CONSTRAINT FK_hcs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id));
 
@@ -270,14 +265,14 @@ CREATE TABLE host_version (
 CREATE TABLE servicedesiredstate (
   cluster_id BIGINT NOT NULL,
   desired_host_role_mapping INTEGER NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
+  desired_repo_version_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   maintenance_state VARCHAR(32) NOT NULL DEFAULT 'ACTIVE',
   security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
   credential_store_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT PK_servicedesiredstate PRIMARY KEY (cluster_id, service_name),
-  CONSTRAINT FK_sds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_repo_version_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
 CREATE TABLE adminprincipaltype (

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index 00b3248..5ae144a 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -179,22 +179,20 @@ CREATE TABLE servicecomponentdesiredstate (
   id NUMBER(19) NOT NULL,
   component_name VARCHAR2(255) NOT NULL,
   cluster_id NUMBER(19) NOT NULL,
-  desired_stack_id NUMBER(19) NOT NULL,
+  desired_repo_version_id NUMBER(19) NOT NULL,
   desired_state VARCHAR2(255) NOT NULL,
-  desired_version VARCHAR(255) DEFAULT 'UNKNOWN' NOT NULL,
   service_name VARCHAR2(255) NOT NULL,
   recovery_enabled SMALLINT DEFAULT 0 NOT NULL,
   repo_state VARCHAR2(255) DEFAULT 'INIT' NOT NULL,
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
-  CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_scds_desired_repo_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
 CREATE TABLE hostcomponentdesiredstate (
   id NUMBER(19) NOT NULL,
   cluster_id NUMBER(19) NOT NULL,
   component_name VARCHAR2(255) NOT NULL,
-  desired_stack_id NUMBER(19) NULL,
   desired_state VARCHAR2(255) NOT NULL,
   host_id NUMBER(19) NOT NULL,
   service_name VARCHAR2(255) NOT NULL,
@@ -205,7 +203,6 @@ CREATE TABLE hostcomponentdesiredstate (
   CONSTRAINT PK_hostcomponentdesiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_hcdesiredstate_name UNIQUE (component_name, service_name, host_id, cluster_id),
   CONSTRAINT FK_hcdesiredstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
-  CONSTRAINT FK_hcds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id));
 
 CREATE TABLE hostcomponentstate (
@@ -213,14 +210,12 @@ CREATE TABLE hostcomponentstate (
   cluster_id NUMBER(19) NOT NULL,
   component_name VARCHAR2(255) NOT NULL,
   version VARCHAR2(32) DEFAULT 'UNKNOWN' NOT NULL,
-  current_stack_id NUMBER(19) NOT NULL,
   current_state VARCHAR2(255) NOT NULL,
   host_id NUMBER(19) NOT NULL,
   service_name VARCHAR2(255) NOT NULL,
   upgrade_state VARCHAR2(32) DEFAULT 'NONE' NOT NULL,
   security_state VARCHAR2(32) DEFAULT 'UNSECURED' NOT NULL,
   CONSTRAINT pk_hostcomponentstate PRIMARY KEY (id),
-  CONSTRAINT FK_hcs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id));
 
@@ -250,14 +245,14 @@ CREATE TABLE host_version (
 CREATE TABLE servicedesiredstate (
   cluster_id NUMBER(19) NOT NULL,
   desired_host_role_mapping NUMBER(10) NOT NULL,
-  desired_stack_id NUMBER(19) NOT NULL,
+  desired_repo_version_id NUMBER(19) NOT NULL,
   desired_state VARCHAR2(255) NOT NULL,
   service_name VARCHAR2(255) NOT NULL,
   maintenance_state VARCHAR2(32) NOT NULL,
   security_state VARCHAR2(32) DEFAULT 'UNSECURED' NOT NULL,
   credential_store_enabled SMALLINT DEFAULT 0 NOT NULL,
   CONSTRAINT PK_servicedesiredstate PRIMARY KEY (cluster_id, service_name),
-  CONSTRAINT FK_sds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_repo_version_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
 CREATE TABLE adminprincipaltype (

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index f6af968..a4b296d 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -178,22 +178,20 @@ CREATE TABLE servicecomponentdesiredstate (
   id BIGINT NOT NULL,
   component_name VARCHAR(255) NOT NULL,
   cluster_id BIGINT NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
-  desired_version VARCHAR(255) NOT NULL DEFAULT 'UNKNOWN',
+  desired_repo_version_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   repo_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
-  CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_scds_desired_repo_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
 CREATE TABLE hostcomponentdesiredstate (
   id BIGINT NOT NULL,
   cluster_id BIGINT NOT NULL,
   component_name VARCHAR(255) NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   host_id BIGINT NOT NULL,
   service_name VARCHAR(255) NOT NULL,
@@ -204,7 +202,6 @@ CREATE TABLE hostcomponentdesiredstate (
   CONSTRAINT PK_hostcomponentdesiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_hcdesiredstate_name UNIQUE (component_name, service_name, host_id, cluster_id),
   CONSTRAINT FK_hcdesiredstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
-  CONSTRAINT FK_hcds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id));
 
 CREATE TABLE hostcomponentstate (
@@ -212,14 +209,12 @@ CREATE TABLE hostcomponentstate (
   cluster_id BIGINT NOT NULL,
   component_name VARCHAR(255) NOT NULL,
   version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN',
-  current_stack_id BIGINT NOT NULL,
   current_state VARCHAR(255) NOT NULL,
   host_id BIGINT NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE',
   security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
   CONSTRAINT pk_hostcomponentstate PRIMARY KEY (id),
-  CONSTRAINT FK_hcs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id));
 
@@ -249,16 +244,17 @@ CREATE TABLE host_version (
 CREATE TABLE servicedesiredstate (
   cluster_id BIGINT NOT NULL,
   desired_host_role_mapping INTEGER NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
+  desired_repo_version_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   maintenance_state VARCHAR(32) NOT NULL,
   security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
   credential_store_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT PK_servicedesiredstate PRIMARY KEY (cluster_id, service_name),
-  CONSTRAINT FK_sds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_repo_version_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
+
 CREATE TABLE adminprincipaltype (
   principal_type_id INTEGER NOT NULL,
   principal_type_name VARCHAR(255) NOT NULL,

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
index 64a0137..2f346d2 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
@@ -177,22 +177,20 @@ CREATE TABLE servicecomponentdesiredstate (
   id NUMERIC(19) NOT NULL,
   component_name VARCHAR(255) NOT NULL,
   cluster_id NUMERIC(19) NOT NULL,
-  desired_stack_id NUMERIC(19) NOT NULL,
-  desired_version VARCHAR(255) NOT NULL DEFAULT 'UNKNOWN',
+  desired_repo_version_id NUMERIC(19) NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   repo_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
-  CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_scds_desired_repo_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
 CREATE TABLE hostcomponentdesiredstate (
   id NUMERIC(19) NOT NULL,
   cluster_id NUMERIC(19) NOT NULL,
   component_name VARCHAR(255) NOT NULL,
-  desired_stack_id NUMERIC(19) NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   host_id NUMERIC(19) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
@@ -203,7 +201,6 @@ CREATE TABLE hostcomponentdesiredstate (
   CONSTRAINT PK_hostcomponentdesiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_hcdesiredstate_name UNIQUE (component_name, service_name, host_id, cluster_id),
   CONSTRAINT FK_hcdesiredstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
-  CONSTRAINT FK_hcds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id));
 
 CREATE TABLE hostcomponentstate (
@@ -211,14 +208,12 @@ CREATE TABLE hostcomponentstate (
   cluster_id NUMERIC(19) NOT NULL,
   component_name VARCHAR(255) NOT NULL,
   version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN',
-  current_stack_id NUMERIC(19) NOT NULL,
   current_state VARCHAR(255) NOT NULL,
   host_id NUMERIC(19) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE',
   security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
   CONSTRAINT PK_hostcomponentstate PRIMARY KEY (id),
-  CONSTRAINT FK_hcs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id));
 
@@ -248,14 +243,14 @@ CREATE TABLE host_version (
 CREATE TABLE servicedesiredstate (
   cluster_id NUMERIC(19) NOT NULL,
   desired_host_role_mapping INTEGER NOT NULL,
-  desired_stack_id NUMERIC(19) NOT NULL,
+  desired_repo_version_id NUMERIC(19) NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   maintenance_state VARCHAR(32) NOT NULL DEFAULT 'ACTIVE',
   security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
   credential_store_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT PK_servicedesiredstate PRIMARY KEY (cluster_id, service_name),
-  CONSTRAINT FK_sds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_repo_version_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
 CREATE TABLE adminprincipaltype (

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
index 22b2c3d..e57fec9 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
@@ -191,22 +191,20 @@ CREATE TABLE servicecomponentdesiredstate (
   id BIGINT NOT NULL,
   component_name VARCHAR(255) NOT NULL,
   cluster_id BIGINT NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
-  desired_version VARCHAR(255) NOT NULL DEFAULT 'UNKNOWN',
+  desired_repo_version_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   recovery_enabled SMALLINT NOT NULL DEFAULT 0,
   repo_state VARCHAR(255) NOT NULL DEFAULT 'INIT',
   CONSTRAINT pk_sc_desiredstate PRIMARY KEY (id),
   CONSTRAINT UQ_scdesiredstate_name UNIQUE(component_name, service_name, cluster_id),
-  CONSTRAINT FK_scds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_scds_desired_repo_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT srvccmponentdesiredstatesrvcnm FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
 CREATE TABLE hostcomponentdesiredstate (
   id BIGINT NOT NULL,
   cluster_id BIGINT NOT NULL,
   component_name VARCHAR(255) NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   host_id BIGINT NOT NULL,
   service_name VARCHAR(255) NOT NULL,
@@ -216,7 +214,6 @@ CREATE TABLE hostcomponentdesiredstate (
   restart_required BIT NOT NULL DEFAULT 0,
   CONSTRAINT PK_hostcomponentdesiredstate PRIMARY KEY CLUSTERED (id),
   CONSTRAINT UQ_hcdesiredstate_name UNIQUE NONCLUSTERED (component_name, service_name, host_id, cluster_id),
-  CONSTRAINT FK_hcds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT hstcmpnntdesiredstatecmpnntnme FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id),
   CONSTRAINT hstcmponentdesiredstatehstid FOREIGN KEY (host_id) REFERENCES hosts (host_id));
 
@@ -226,14 +223,12 @@ CREATE TABLE hostcomponentstate (
   cluster_id BIGINT NOT NULL,
   component_name VARCHAR(255) NOT NULL,
   version VARCHAR(32) NOT NULL DEFAULT 'UNKNOWN',
-  current_stack_id BIGINT NOT NULL,
   current_state VARCHAR(255) NOT NULL,
   host_id BIGINT NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   upgrade_state VARCHAR(32) NOT NULL DEFAULT 'NONE',
   security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
   CONSTRAINT PK_hostcomponentstate PRIMARY KEY CLUSTERED (id),
-  CONSTRAINT FK_hcs_current_stack_id FOREIGN KEY (current_stack_id) REFERENCES stack(stack_id),
   CONSTRAINT FK_hostcomponentstate_host_id FOREIGN KEY (host_id) REFERENCES hosts (host_id),
   CONSTRAINT hstcomponentstatecomponentname FOREIGN KEY (component_name, service_name, cluster_id) REFERENCES servicecomponentdesiredstate (component_name, service_name, cluster_id));
 
@@ -253,14 +248,14 @@ CREATE TABLE hoststate (
 CREATE TABLE servicedesiredstate (
   cluster_id BIGINT NOT NULL,
   desired_host_role_mapping INTEGER NOT NULL,
-  desired_stack_id BIGINT NOT NULL,
+  desired_repo_version_id BIGINT NOT NULL,
   desired_state VARCHAR(255) NOT NULL,
   service_name VARCHAR(255) NOT NULL,
   maintenance_state VARCHAR(32) NOT NULL,
   security_state VARCHAR(32) NOT NULL DEFAULT 'UNSECURED',
   credential_store_enabled SMALLINT NOT NULL DEFAULT 0,
   CONSTRAINT PK_servicedesiredstate PRIMARY KEY CLUSTERED (cluster_id,service_name),
-  CONSTRAINT FK_sds_desired_stack_id FOREIGN KEY (desired_stack_id) REFERENCES stack(stack_id),
+  CONSTRAINT FK_repo_version_id FOREIGN KEY (desired_repo_version_id) REFERENCES repo_version (repo_version_id),
   CONSTRAINT servicedesiredstateservicename FOREIGN KEY (service_name, cluster_id) REFERENCES clusterservices (service_name, cluster_id));
 
 CREATE TABLE adminprincipaltype (

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/resources/properties.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/properties.json b/ambari-server/src/main/resources/properties.json
index 9b2bbf8..a6bf367 100644
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@ -15,6 +15,8 @@
     "Service":[
         "ServiceInfo/service_name",
         "ServiceInfo/cluster_name",
+        "ServiceInfo/desired_stack",
+        "ServiceInfo/desired_repository_version",
         "ServiceInfo/state",
         "ServiceInfo/maintenance_state",
         "ServiceInfo/credential_store_supported",
@@ -83,14 +85,14 @@
         "HostRoles/display_name",
         "HostRoles/state",
         "HostRoles/desired_state",
-        "HostRoles/stack_id",
+        "HostRoles/version",
         "HostRoles/desired_stack_id",
+        "HostRoles/desired_repository_version",
         "HostRoles/actual_configs",
         "params/run_smoke_test",
         "HostRoles/stale_configs",
         "HostRoles/desired_admin_state",
         "HostRoles/maintenance_state",
-        "HostRoles/hdp_version",
         "HostRoles/service_name",
         "HostRoles/upgrade_state",
         "_"

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java
index a7c53d5..69980dc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java
@@ -36,6 +36,7 @@ public interface DummyHeartbeatConstants {
   String DummyHostStatus = "I am ok";
 
   String DummyStackId = "HDP-0.1";
+  String DummyRepositoryVersion = "0.1-1234";
 
   String HDFS = "HDFS";
   String HBASE = "HBASE";

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index 2dd91c0..5afeb77 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -64,6 +64,7 @@ import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Alert;
@@ -94,7 +95,6 @@ import com.google.gson.JsonObject;
 import com.google.inject.Guice;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
-import com.google.inject.persist.UnitOfWork;
 
 import junit.framework.Assert;
 
@@ -109,9 +109,6 @@ public class HeartbeatProcessorTest {
   private Clusters clusters;
 
   @Inject
-  private UnitOfWork unitOfWork;
-
-  @Inject
   Configuration config;
 
   @Inject
@@ -159,7 +156,7 @@ public class HeartbeatProcessorTest {
   @SuppressWarnings("unchecked")
   public void testHeartbeatWithConfigs() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.addServiceComponent(NAMENODE);
@@ -228,7 +225,7 @@ public class HeartbeatProcessorTest {
   @SuppressWarnings("unchecked")
   public void testRestartRequiredAfterInstallClient() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(HDFS_CLIENT);
     hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
 
@@ -293,7 +290,7 @@ public class HeartbeatProcessorTest {
   @SuppressWarnings("unchecked")
   public void testHeartbeatCustomCommandWithConfigs() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.addServiceComponent(NAMENODE);
@@ -377,7 +374,7 @@ public class HeartbeatProcessorTest {
   @SuppressWarnings("unchecked")
   public void testHeartbeatCustomStartStop() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.addServiceComponent(NAMENODE);
@@ -461,7 +458,7 @@ public class HeartbeatProcessorTest {
   @SuppressWarnings("unchecked")
   public void testStatusHeartbeat() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.addServiceComponent(NAMENODE);
@@ -587,7 +584,7 @@ public class HeartbeatProcessorTest {
   public void testCommandReportOnHeartbeatUpdatedState()
       throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
 
@@ -706,7 +703,7 @@ public class HeartbeatProcessorTest {
   @SuppressWarnings("unchecked")
   public void testUpgradeSpecificHandling() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
 
@@ -800,7 +797,7 @@ public class HeartbeatProcessorTest {
   @SuppressWarnings("unchecked")
   public void testCommandStatusProcesses() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1).setState(State.STARTED);
@@ -877,93 +874,9 @@ public class HeartbeatProcessorTest {
 
   @Test
   @SuppressWarnings("unchecked")
-  public void testComponentUpgradeCompleteReport() throws Exception {
-    Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
-    hdfs.addServiceComponent(DATANODE);
-    hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
-    hdfs.addServiceComponent(NAMENODE);
-    hdfs.getServiceComponent(NAMENODE).addServiceComponentHost(DummyHostname1);
-    hdfs.addServiceComponent(HDFS_CLIENT);
-    hdfs.getServiceComponent(HDFS_CLIENT).addServiceComponentHost(DummyHostname1);
-
-    ServiceComponentHost serviceComponentHost1 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(DATANODE).getServiceComponentHost(DummyHostname1);
-    ServiceComponentHost serviceComponentHost2 = clusters.getCluster(DummyCluster).getService(HDFS).
-        getServiceComponent(NAMENODE).getServiceComponentHost(DummyHostname1);
-
-    StackId stack130 = new StackId("HDP-1.3.0");
-    StackId stack120 = new StackId("HDP-1.2.0");
-
-    serviceComponentHost1.setState(State.UPGRADING);
-    serviceComponentHost2.setState(State.INSTALLING);
-
-    serviceComponentHost1.setStackVersion(stack120);
-    serviceComponentHost1.setDesiredStackVersion(stack130);
-    serviceComponentHost2.setStackVersion(stack120);
-
-    HeartBeat hb = new HeartBeat();
-    hb.setTimestamp(System.currentTimeMillis());
-    hb.setResponseId(0);
-    hb.setHostname(DummyHostname1);
-    hb.setNodeStatus(new HostStatus(HostStatus.Status.HEALTHY, DummyHostStatus));
-    CommandReport cr1 = new CommandReport();
-    cr1.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr1.setTaskId(1);
-    cr1.setClusterName(DummyCluster);
-    cr1.setServiceName(HDFS);
-    cr1.setRole(DATANODE);
-    cr1.setStatus(HostRoleStatus.COMPLETED.toString());
-    cr1.setStdErr("none");
-    cr1.setStdOut("dummy output");
-    cr1.setExitCode(0);
-    cr1.setRoleCommand(RoleCommand.UPGRADE.toString());
-
-    CommandReport cr2 = new CommandReport();
-    cr2.setActionId(StageUtils.getActionId(requestId, stageId));
-    cr2.setTaskId(2);
-    cr2.setClusterName(DummyCluster);
-    cr2.setServiceName(HDFS);
-    cr2.setRole(NAMENODE);
-    cr2.setStatus(HostRoleStatus.COMPLETED.toString());
-    cr2.setStdErr("none");
-    cr2.setStdOut("dummy output");
-    cr2.setExitCode(0);
-    cr2.setRoleCommand(RoleCommand.UPGRADE.toString());
-    ArrayList<CommandReport> reports = new ArrayList<>();
-    reports.add(cr1);
-    reports.add(cr2);
-    hb.setReports(reports);
-
-    ActionQueue aq = new ActionQueue();
-    final HostRoleCommand command = hostRoleCommandFactory.create(DummyHostname1,
-        Role.DATANODE, null, null);
-
-    ActionManager am = actionManagerTestHelper.getMockActionManager();
-    expect(am.getTasks(EasyMock.<List<Long>>anyObject())).andReturn(
-        new ArrayList<HostRoleCommand>() {{
-          add(command);
-          add(command);
-        }});
-    replay(am);
-
-    HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, aq);
-    HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
-    heartbeatProcessor.processHeartbeat(hb);
-
-    assertEquals("Stack version for SCH should be updated to " +
-            serviceComponentHost1.getDesiredStackVersion(),
-        stack130, serviceComponentHost1.getStackVersion());
-    assertEquals("Stack version for SCH should not change ",
-        stack120, serviceComponentHost2.getStackVersion());
-  }
-
-
-  @Test
-  @SuppressWarnings("unchecked")
   public void testComponentUpgradeFailReport() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.addServiceComponent(NAMENODE);
@@ -982,10 +895,6 @@ public class HeartbeatProcessorTest {
     serviceComponentHost1.setState(State.UPGRADING);
     serviceComponentHost2.setState(State.INSTALLING);
 
-    serviceComponentHost1.setStackVersion(stack120);
-    serviceComponentHost1.setDesiredStackVersion(stack130);
-    serviceComponentHost2.setStackVersion(stack120);
-
     Stage s = stageFactory.createNew(requestId, "/a/b", "cluster1", 1L, "action manager test",
         "clusterHostInfo", "commandParamsStage", "hostParamsStage");
     s.setStageId(stageId);
@@ -1071,10 +980,6 @@ public class HeartbeatProcessorTest {
     assertEquals("State of SCH should change after fail report",
         State.INSTALL_FAILED, serviceComponentHost2.getState());
     assertEquals("Stack version of SCH should not change after fail report",
-        stack120, serviceComponentHost1.getStackVersion());
-    assertEquals("Stack version of SCH should not change after fail report",
-        stack130, serviceComponentHost1.getDesiredStackVersion());
-    assertEquals("Stack version of SCH should not change after fail report",
         State.INSTALL_FAILED, serviceComponentHost2.getState());
   }
 
@@ -1083,7 +988,7 @@ public class HeartbeatProcessorTest {
   @SuppressWarnings("unchecked")
   public void testComponentUpgradeInProgressReport() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
     hdfs.addServiceComponent(NAMENODE);
@@ -1102,10 +1007,6 @@ public class HeartbeatProcessorTest {
     serviceComponentHost1.setState(State.UPGRADING);
     serviceComponentHost2.setState(State.INSTALLING);
 
-    serviceComponentHost1.setStackVersion(stack120);
-    serviceComponentHost1.setDesiredStackVersion(stack130);
-    serviceComponentHost2.setStackVersion(stack120);
-
     HeartBeat hb = new HeartBeat();
     hb.setTimestamp(System.currentTimeMillis());
     hb.setResponseId(0);
@@ -1155,8 +1056,6 @@ public class HeartbeatProcessorTest {
     handler.handleHeartBeat(hb);
     assertEquals("State of SCH not change while operation is in progress",
         State.UPGRADING, serviceComponentHost1.getState());
-    assertEquals("Stack version of SCH should not change after in progress report",
-        stack130, serviceComponentHost1.getDesiredStackVersion());
     assertEquals("State of SCH not change while operation is  in progress",
         State.INSTALLING, serviceComponentHost2.getState());
   }
@@ -1240,6 +1139,7 @@ public class HeartbeatProcessorTest {
     replay(am);
 
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
+
     HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am, new ActionQueue());
     HeartbeatProcessor heartbeatProcessor = handler.getHeartbeatProcessor();
     HeartBeat hb = new HeartBeat();
@@ -1247,7 +1147,7 @@ public class HeartbeatProcessorTest {
     JsonObject json = new JsonObject();
     json.addProperty("actual_version", "2.2.1.0-2222");
     json.addProperty("package_installation_result", "SUCCESS");
-    json.addProperty("installed_repository_version", "0.1");
+    json.addProperty("installed_repository_version", "0.1-1234");
     json.addProperty("stack_id", cluster.getDesiredStackVersion().getStackId());
 
 
@@ -1273,12 +1173,12 @@ public class HeartbeatProcessorTest {
     StackId stackId = new StackId("HDP", "0.1");
 
     RepositoryVersionDAO dao = injector.getInstance(RepositoryVersionDAO.class);
-    RepositoryVersionEntity entity = dao.findByStackAndVersion(stackId, "0.1");
+    RepositoryVersionEntity entity = dao.findByStackAndVersion(stackId, "0.1-1234");
     Assert.assertNotNull(entity);
 
     heartbeatProcessor.processHeartbeat(hb);
 
-    entity = dao.findByStackAndVersion(stackId, "0.1");
+    entity = dao.findByStackAndVersion(stackId, "0.1-1234");
     Assert.assertNull(entity);
 
     entity = dao.findByStackAndVersion(stackId, "2.2.1.0-2222");
@@ -1289,7 +1189,7 @@ public class HeartbeatProcessorTest {
   @SuppressWarnings("unchecked")
   public void testComponentInProgressStatusSafeAfterStatusReport() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service hdfs = cluster.addService(HDFS);
+    Service hdfs = addService(cluster, HDFS);
     hdfs.addServiceComponent(DATANODE);
     hdfs.getServiceComponent(DATANODE).
         addServiceComponentHost(DummyHostname1);
@@ -1358,5 +1258,20 @@ public class HeartbeatProcessorTest {
   }
 
 
-
+  /**
+   * Adds the service to the cluster using the current cluster version as the
+   * repository version for the service.
+   *
+   * @param cluster
+   *          the cluster.
+   * @param serviceName
+   *          the service name.
+   * @return the newly added service.
+   * @throws AmbariException
+   */
+  private Service addService(Cluster cluster, String serviceName) throws AmbariException {
+    ClusterVersionEntity clusterVersion = cluster.getCurrentClusterVersion();
+    RepositoryVersionEntity repositoryVersion = clusterVersion.getRepositoryVersion();
+    return cluster.addService(serviceName, repositoryVersion);
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
index 2e65e8d..6e1ebdd 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
@@ -21,6 +21,7 @@ import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyCluste
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOSRelease;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOs;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyRepositoryVersion;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyStackId;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE;
 
@@ -151,13 +152,14 @@ public class HeartbeatTestHelper {
       add(DummyHostname1);
     }};
 
-    return getDummyCluster(DummyCluster, DummyStackId, configProperties, hostNames);
+    return getDummyCluster(DummyCluster, new StackId(DummyStackId), DummyRepositoryVersion,
+        configProperties, hostNames);
   }
 
-  public Cluster getDummyCluster(String clusterName, String desiredStackId,
-                                 Map<String, String> configProperties, Set<String> hostNames)
+  public Cluster getDummyCluster(String clusterName, StackId stackId, String repositoryVersion,
+      Map<String, String> configProperties, Set<String> hostNames)
       throws Exception {
-    StackEntity stackEntity = stackDAO.find(HDP_22_STACK.getStackName(), HDP_22_STACK.getStackVersion());
+    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
     org.junit.Assert.assertNotNull(stackEntity);
 
     // Create the cluster
@@ -177,8 +179,6 @@ public class HeartbeatTestHelper {
 
     clusterDAO.create(clusterEntity);
 
-    StackId stackId = new StackId(desiredStackId);
-
     // because this test method goes around the Clusters business object, we
     // forcefully will refresh the internal state so that any tests which
     // incorrect use Clusters after calling this won't be affected
@@ -196,8 +196,8 @@ public class HeartbeatTestHelper {
     Config config = cf.createNew(cluster, "cluster-env", "version1", configProperties, new HashMap<String, Map<String, String>>());
     cluster.addDesiredConfig("user", Collections.singleton(config));
 
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
-    cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
+    helper.getOrCreateRepositoryVersion(stackId, repositoryVersion);
+    cluster.createClusterVersion(stackId, repositoryVersion, "admin",
         RepositoryVersionState.INSTALLING);
 
     Map<String, String> hostAttributes = new HashMap<>();


[4/9] ambari git commit: AMBARI-20894 - Setting Desired Stack Ids to Correct Values During Service and Patch Upgrades (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
index 9d499c0..86a11e3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
@@ -41,12 +41,16 @@ import org.apache.ambari.server.controller.internal.RequestResourceFilter;
 import org.apache.ambari.server.controller.internal.ServiceResourceProviderTest;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.security.TestAuthenticationFactory;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.topology.TopologyManager;
 import org.apache.ambari.server.utils.StageUtils;
@@ -77,6 +81,11 @@ public class BackgroundCustomCommandExecutionTest {
   @Captor ArgumentCaptor<Request> requestCapture;
   @Mock ActionManager am;
 
+  private final String STACK_VERSION = "2.0.6";
+  private final String REPO_VERSION = "2.0.6-1234";
+  private final StackId STACK_ID = new StackId("HDP", STACK_VERSION);
+  private RepositoryVersionEntity m_repositoryVersion;
+
   @Before
   public void setup() throws Exception {
     Configuration configuration;
@@ -100,6 +109,7 @@ public class BackgroundCustomCommandExecutionTest {
     clusters = injector.getInstance(Clusters.class);
     configuration = injector.getInstance(Configuration.class);
     topologyManager = injector.getInstance(TopologyManager.class);
+    OrmTestHelper ormTestHelper = injector.getInstance(OrmTestHelper.class);
 
     Assert.assertEquals("src/main/resources/custom_action_definitions", configuration.getCustomActionDefinitionPath());
 
@@ -111,6 +121,9 @@ public class BackgroundCustomCommandExecutionTest {
     // Set the authenticated user
     // TODO: remove this or replace the authenticated user to test authorization rules
     SecurityContextHolder.getContext().setAuthentication(TestAuthenticationFactory.createAdministrator());
+
+    m_repositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(STACK_ID, REPO_VERSION);
+    Assert.assertNotNull(m_repositoryVersion);
   }
   @After
   public void teardown() throws AmbariException, SQLException {
@@ -192,7 +205,9 @@ public class BackgroundCustomCommandExecutionTest {
   }
 
   private void createCluster(String clusterName) throws AmbariException, AuthorizationException {
-    ClusterRequest r = new ClusterRequest(null, clusterName, State.INSTALLED.name(), SecurityType.NONE, "HDP-2.0.6", null);
+    ClusterRequest r = new ClusterRequest(null, clusterName, State.INSTALLED.name(),
+        SecurityType.NONE, STACK_ID.getStackId(), null);
+
     controller.createCluster(r);
   }
 
@@ -202,11 +217,14 @@ public class BackgroundCustomCommandExecutionTest {
     if (desiredState != null) {
       dStateStr = desiredState.toString();
     }
-    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName, dStateStr);
+    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName,
+        m_repositoryVersion.getStackId().getStackId(), m_repositoryVersion.getVersion(), dStateStr);
+
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r1);
 
-    ServiceResourceProviderTest.createServices(controller, requests);
+    ServiceResourceProviderTest.createServices(controller,
+        injector.getInstance(RepositoryVersionDAO.class), requests);
   }
 
   private void createServiceComponent(String clusterName,

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
index 5275580..8cfe258 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/KerberosHelperTest.java
@@ -965,14 +965,11 @@ public class KerberosHelperTest extends EasyMockSupport {
     KerberosHelper kerberosHelper = injector.getInstance(KerberosHelper.class);
     boolean identitiesManaged = (manageIdentities == null) || !"false".equalsIgnoreCase(manageIdentities);
 
-    final StackId stackVersion = createMock(StackId.class);
-
     final ServiceComponentHost schKerberosClient = createMock(ServiceComponentHost.class);
     expect(schKerberosClient.getServiceName()).andReturn(Service.Type.KERBEROS.name()).anyTimes();
     expect(schKerberosClient.getServiceComponentName()).andReturn(Role.KERBEROS_CLIENT.name()).anyTimes();
     expect(schKerberosClient.getSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
     expect(schKerberosClient.getDesiredSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
-    expect(schKerberosClient.getStackVersion()).andReturn(stackVersion).anyTimes();
     expect(schKerberosClient.getHostName()).andReturn("host1").anyTimes();
     expect(schKerberosClient.getState()).andReturn(State.INSTALLED).anyTimes();
 
@@ -981,7 +978,6 @@ public class KerberosHelperTest extends EasyMockSupport {
     expect(sch1.getServiceComponentName()).andReturn("COMPONENT1").anyTimes();
     expect(sch1.getSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
     expect(sch1.getDesiredSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
-    expect(sch1.getStackVersion()).andReturn(stackVersion).anyTimes();
     expect(sch1.getHostName()).andReturn("host1").anyTimes();
     expect(sch1.getState()).andReturn(State.INSTALLED).anyTimes();
 
@@ -995,7 +991,6 @@ public class KerberosHelperTest extends EasyMockSupport {
     expect(sch2.getServiceComponentName()).andReturn("COMPONENT2").anyTimes();
     expect(sch2.getSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
     expect(sch2.getDesiredSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
-    expect(sch2.getStackVersion()).andReturn(stackVersion).anyTimes();
     expect(sch2.getHostName()).andReturn("host1").anyTimes();
     expect(sch2.getState()).andReturn(State.INSTALLED).anyTimes();
 
@@ -1156,14 +1151,11 @@ public class KerberosHelperTest extends EasyMockSupport {
     KerberosHelper kerberosHelper = injector.getInstance(KerberosHelper.class);
     boolean identitiesManaged = (manageIdentities == null) || !"false".equalsIgnoreCase(manageIdentities);
 
-    final StackId stackVersion = createMock(StackId.class);
-
     final ServiceComponentHost schKerberosClient = createMock(ServiceComponentHost.class);
     expect(schKerberosClient.getServiceName()).andReturn(Service.Type.KERBEROS.name()).anyTimes();
     expect(schKerberosClient.getServiceComponentName()).andReturn(Role.KERBEROS_CLIENT.name()).anyTimes();
     expect(schKerberosClient.getSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
     expect(schKerberosClient.getDesiredSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
-    expect(schKerberosClient.getStackVersion()).andReturn(stackVersion).anyTimes();
     expect(schKerberosClient.getHostName()).andReturn("host1").anyTimes();
     expect(schKerberosClient.getState()).andReturn(State.INSTALLED).anyTimes();
 
@@ -1172,7 +1164,6 @@ public class KerberosHelperTest extends EasyMockSupport {
     expect(sch1.getServiceComponentName()).andReturn("COMPONENT1").anyTimes();
     expect(sch1.getSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
     expect(sch1.getDesiredSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
-    expect(sch1.getStackVersion()).andReturn(stackVersion).anyTimes();
     expect(sch1.getHostName()).andReturn("host1").anyTimes();
     expect(sch1.getState()).andReturn(State.INSTALLED).anyTimes();
 
@@ -1186,7 +1177,6 @@ public class KerberosHelperTest extends EasyMockSupport {
     expect(sch2.getServiceComponentName()).andReturn("COMPONENT2").anyTimes();
     expect(sch2.getSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
     expect(sch2.getDesiredSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
-    expect(sch2.getStackVersion()).andReturn(stackVersion).anyTimes();
     expect(sch2.getHostName()).andReturn("host1").anyTimes();
     expect(sch2.getState()).andReturn(State.INSTALLED).anyTimes();
 
@@ -1344,14 +1334,11 @@ public class KerberosHelperTest extends EasyMockSupport {
 
     KerberosHelper kerberosHelper = injector.getInstance(KerberosHelper.class);
 
-    final StackId stackVersion = createMock(StackId.class);
-
     final ServiceComponentHost schKerberosClient = createMock(ServiceComponentHost.class);
     expect(schKerberosClient.getServiceName()).andReturn(Service.Type.KERBEROS.name()).anyTimes();
     expect(schKerberosClient.getServiceComponentName()).andReturn(Role.KERBEROS_CLIENT.name()).anyTimes();
     expect(schKerberosClient.getSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
     expect(schKerberosClient.getDesiredSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
-    expect(schKerberosClient.getStackVersion()).andReturn(stackVersion).anyTimes();
     expect(schKerberosClient.getHostName()).andReturn("host1").anyTimes();
     expect(schKerberosClient.getState()).andReturn(State.INSTALLED).anyTimes();
 
@@ -1360,7 +1347,6 @@ public class KerberosHelperTest extends EasyMockSupport {
     expect(sch1.getServiceComponentName()).andReturn("COMPONENT1").once();
     expect(sch1.getSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
     expect(sch1.getDesiredSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
-    expect(sch1.getStackVersion()).andReturn(stackVersion).anyTimes();
     expect(sch1.getHostName()).andReturn("host1").anyTimes();
     expect(sch1.getState()).andReturn(State.INSTALLED).anyTimes();
 
@@ -1374,7 +1360,6 @@ public class KerberosHelperTest extends EasyMockSupport {
     expect(sch2.getServiceComponentName()).andReturn("COMPONENT2").anyTimes();
     expect(sch2.getSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
     expect(sch2.getDesiredSecurityState()).andReturn(SecurityState.SECURED_KERBEROS).anyTimes();
-    expect(sch2.getStackVersion()).andReturn(stackVersion).anyTimes();
     expect(sch2.getHostName()).andReturn("host1").anyTimes();
     expect(sch2.getState()).andReturn(State.INSTALLED).anyTimes();
 
@@ -1536,8 +1521,6 @@ public class KerberosHelperTest extends EasyMockSupport {
 
     KerberosHelper kerberosHelper = injector.getInstance(KerberosHelper.class);
 
-    final StackId stackVersion = createMock(StackId.class);
-
     final ServiceComponentHost schKerberosClient = createMock(ServiceComponentHost.class);
     expect(schKerberosClient.getServiceName()).andReturn(Service.Type.KERBEROS.name()).anyTimes();
     expect(schKerberosClient.getServiceComponentName()).andReturn(Role.KERBEROS_CLIENT.name()).anyTimes();
@@ -1549,7 +1532,6 @@ public class KerberosHelperTest extends EasyMockSupport {
     expect(sch1.getServiceComponentName()).andReturn("COMPONENT1").anyTimes();
     expect(sch1.getSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
     expect(sch1.getDesiredSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
-    expect(sch1.getStackVersion()).andReturn(stackVersion).anyTimes();
     expect(sch1.getHostName()).andReturn("host1").anyTimes();
 
     final ServiceComponentHost sch2 = createMock(ServiceComponentHost.class);
@@ -1557,7 +1539,6 @@ public class KerberosHelperTest extends EasyMockSupport {
     expect(sch2.getServiceComponentName()).andReturn("COMPONENT2").anyTimes();
     expect(sch2.getSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
     expect(sch2.getDesiredSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
-    expect(sch2.getStackVersion()).andReturn(stackVersion).anyTimes();
     expect(sch2.getHostName()).andReturn("host1").anyTimes();
 
     final Host host = createMockHost("host1");
@@ -1577,7 +1558,6 @@ public class KerberosHelperTest extends EasyMockSupport {
       expect(sch1a.getServiceComponentName()).andReturn("COMPONENT1").anyTimes();
       expect(sch1a.getSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
       expect(sch1a.getDesiredSecurityState()).andReturn(SecurityState.UNSECURED).anyTimes();
-      expect(sch1a.getStackVersion()).andReturn(stackVersion).anyTimes();
       expect(sch1a.getHostName()).andReturn("host2").anyTimes();
 
       hostInvalid = createMockHost("host1");

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java
index 34e2e06..575f5e4 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/RefreshYarnCapacitySchedulerReleaseConfigTest.java
@@ -31,6 +31,9 @@ import org.apache.ambari.server.controller.internal.ComponentResourceProviderTes
 import org.apache.ambari.server.controller.internal.ServiceResourceProviderTest;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.security.TestAuthenticationFactory;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.state.Cluster;
@@ -42,6 +45,7 @@ import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.junit.After;
 import org.junit.Before;
@@ -60,16 +64,18 @@ public class RefreshYarnCapacitySchedulerReleaseConfigTest {
   private AmbariManagementController controller;
   private Clusters clusters;
   private ConfigHelper configHelper;
+  private OrmTestHelper ormTestHelper;
 
   @Before
   public void setup() throws Exception {
 
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    
+
     injector.getInstance(GuiceJpaInitializer.class);
     controller = injector.getInstance(AmbariManagementController.class);
     clusters = injector.getInstance(Clusters.class);
     configHelper = injector.getInstance(ConfigHelper.class);
+    ormTestHelper = injector.getInstance(OrmTestHelper.class);
 
     // Set the authenticated user
     // TODO: remove this or replace the authenticated user to test authorization rules
@@ -85,27 +91,27 @@ public class RefreshYarnCapacitySchedulerReleaseConfigTest {
   }
 
 
-  
+
   @Test
   public void testRMRequiresRestart() throws AmbariException, AuthorizationException {
     createClusterFixture("HDP-2.0.7");
-    
-    
+
+
     Cluster cluster = clusters.getCluster("c1");
-    
+
     // Start
     ClusterRequest cr = new ClusterRequest(cluster.getClusterId(), "c1", cluster.getDesiredStackVersion().getStackVersion(), null);
 
     cr.setDesiredConfig(Collections.singletonList(new ConfigurationRequest("c1","capacity-scheduler","version2",new HashMap<String, String>(), null)));
-    
+
     controller.updateClusters(Collections.singleton(cr) , null);
-    
-    
+
+
     ServiceComponentHostRequest r = new ServiceComponentHostRequest("c1", null, null, null, null);
     r.setStaleConfig("true");
     Set<ServiceComponentHostResponse> resps = controller.getHostComponents(Collections.singleton(r));
     Assert.assertEquals(1, resps.size());
-    
+
     Assert.assertEquals(true, configHelper.isStaleConfigs(clusters.getCluster("c1").getService("YARN").getServiceComponent("RESOURCEMANAGER").getServiceComponentHost("c6401"), null));
   }
 
@@ -113,29 +119,29 @@ public class RefreshYarnCapacitySchedulerReleaseConfigTest {
   public void testAllRequiresRestart() throws AmbariException, AuthorizationException {
     createClusterFixture("HDP-2.0.7");
     Cluster cluster = clusters.getCluster("c1");
-    
+
     // Start
     ClusterRequest cr = new ClusterRequest(cluster.getClusterId(), "c1", cluster.getDesiredStackVersion().getStackVersion(), null);
-    
+
     cr.setDesiredConfig(Collections.singletonList(new ConfigurationRequest("c1","core-site","version2",new HashMap<String, String>(),null)));
-    
+
     controller.updateClusters(Collections.singleton(cr) , null);
-    
-    
+
+
     ServiceComponentHostRequest r = new ServiceComponentHostRequest("c1", null, null, null, null);
     r.setStaleConfig("true");
     Set<ServiceComponentHostResponse> resps = controller.getHostComponents(Collections.singleton(r));
     Assert.assertEquals(4, resps.size());
-    
+
   }
 
   @Test
   public void testConfigInComponent() throws Exception {
     StackServiceRequest requestWithParams = new StackServiceRequest("HDP", "2.0.6", "YARN");
     Set<StackServiceResponse> responsesWithParams = controller.getStackServices(Collections.singleton(requestWithParams));
-    
+
     Assert.assertEquals(1, responsesWithParams.size());
-    
+
     for (StackServiceResponse responseWithParams: responsesWithParams) {
       Assert.assertEquals(responseWithParams.getServiceName(), "YARN");
       Assert.assertTrue(responseWithParams.getConfigTypes().containsKey("capacity-scheduler"));
@@ -146,9 +152,9 @@ public class RefreshYarnCapacitySchedulerReleaseConfigTest {
   public void testConfigInComponentOverwrited() throws Exception {
     StackServiceRequest requestWithParams = new StackServiceRequest("HDP", "2.0.7", "YARN");
     Set<StackServiceResponse> responsesWithParams = controller.getStackServices(Collections.singleton(requestWithParams));
-    
+
     Assert.assertEquals(1, responsesWithParams.size());
-    
+
     for (StackServiceResponse responseWithParams: responsesWithParams) {
       Assert.assertEquals(responseWithParams.getServiceName(), "YARN");
       Assert.assertTrue(responseWithParams.getConfigTypes().containsKey("capacity-scheduler"));
@@ -159,17 +165,17 @@ public class RefreshYarnCapacitySchedulerReleaseConfigTest {
     createCluster("c1", stackName);
     addHost("c6401","c1");
     addHost("c6402","c1");
-    
+
     clusters.getCluster("c1");
     createService("c1", "YARN", null);
-    
+
     createServiceComponent("c1","YARN","RESOURCEMANAGER", State.INIT);
     createServiceComponent("c1","YARN","NODEMANAGER", State.INIT);
     createServiceComponent("c1","YARN","YARN_CLIENT", State.INIT);
-    
+
     createServiceComponentHost("c1","YARN","RESOURCEMANAGER","c6401", null);
     createServiceComponentHost("c1","YARN","NODEMANAGER","c6401", null);
-    
+
     createServiceComponentHost("c1","YARN","NODEMANAGER","c6402", null);
     createServiceComponentHost("c1","YARN","YARN_CLIENT","c6402", null);
   }
@@ -187,7 +193,7 @@ public class RefreshYarnCapacitySchedulerReleaseConfigTest {
     Map<String, String> hostAttributes = new HashMap<>();
     hostAttributes.put("os_family", osFamily);
     hostAttributes.put("os_release_version", osVersion);
-    
+
     host.setHostAttributes(hostAttributes);
   }
 
@@ -195,18 +201,26 @@ public class RefreshYarnCapacitySchedulerReleaseConfigTest {
     ClusterRequest r = new ClusterRequest(null, clusterName, State.INSTALLED.name(), SecurityType.NONE, stackName, null);
     controller.createCluster(r);
   }
-  
+
   private void createService(String clusterName,
       String serviceName, State desiredState) throws AmbariException, AuthorizationException {
     String dStateStr = null;
+
     if (desiredState != null) {
       dStateStr = desiredState.toString();
     }
-    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName, dStateStr);
+
+    RepositoryVersionEntity repositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(
+        new StackId("HDP-2.0.7"), "2.0.7-1234");
+
+    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName,
+        repositoryVersion.getStackId().getStackId(), repositoryVersion.getVersion(), dStateStr);
+
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r1);
 
-    ServiceResourceProviderTest.createServices(controller, requests);
+    ServiceResourceProviderTest.createServices(controller,
+        injector.getInstance(RepositoryVersionDAO.class), requests);
   }
 
   private void createServiceComponent(String clusterName,
@@ -236,13 +250,13 @@ public class RefreshYarnCapacitySchedulerReleaseConfigTest {
       new HashSet<>();
     requests.add(r);
     controller.createHostComponents(requests);
-    
-    
+
+
     //set actual config
       Service service = clusters.getCluster(clusterName).getService(serviceName);
       ServiceComponent rm = service.getServiceComponent(componentName);
       ServiceComponentHost rmc1 = rm.getServiceComponentHost(hostname);
-      
+
       rmc1.updateActualConfigs((new HashMap<String, Map<String,String>>() {{
         put("capacity-scheduler", new HashMap<String,String>() {{ put("tag", "version1"); }});
         put("hive-group", new HashMap<String,String>() {{ put("tag", "version1"); }});

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProviderTest.java
index 094e49d..35ce868 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProviderTest.java
@@ -35,6 +35,7 @@ import org.apache.ambari.server.controller.MaintenanceStateHelper;
 import org.apache.ambari.server.controller.ResourceProviderFactory;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.junit.Test;
 
 import junit.framework.Assert;
@@ -61,12 +62,16 @@ public class AbstractControllerResourceProviderTest {
     ResourceProviderFactory factory = createMock(ResourceProviderFactory.class);
 
     MaintenanceStateHelper maintenanceStateHelper = createNiceMock(MaintenanceStateHelper.class);
-    ResourceProvider serviceResourceProvider = new ServiceResourceProvider(propertyIds, keyPropertyIds, managementController, maintenanceStateHelper);
+    RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+
+    ResourceProvider serviceResourceProvider = new ServiceResourceProvider(propertyIds,
+        keyPropertyIds, managementController, maintenanceStateHelper, repositoryVersionDAO);
+
     expect(factory.getServiceResourceProvider(propertyIds, keyPropertyIds, managementController)).andReturn(serviceResourceProvider);
 
     AbstractControllerResourceProvider.init(factory);
 
-    replay(managementController, factory, maintenanceStateHelper);
+    replay(managementController, factory, maintenanceStateHelper, repositoryVersionDAO);
 
     AbstractResourceProvider provider =
         (AbstractResourceProvider) AbstractControllerResourceProvider.getResourceProvider(

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractResourceProviderTest.java
index c761323..8f0a6bb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/AbstractResourceProviderTest.java
@@ -53,6 +53,7 @@ import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
 import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.state.SecurityType;
 import org.easymock.EasyMock;
 import org.easymock.IArgumentMatcher;
@@ -79,11 +80,11 @@ public class AbstractResourceProviderTest {
 
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
     MaintenanceStateHelper maintenanceStateHelper = createNiceMock(MaintenanceStateHelper.class);
-    replay(maintenanceStateHelper);
-    AbstractResourceProvider provider = new ServiceResourceProvider(
-            propertyIds,
-            keyPropertyIds,
-            managementController, maintenanceStateHelper);
+    RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+    replay(maintenanceStateHelper, repositoryVersionDAO);
+
+    AbstractResourceProvider provider = new ServiceResourceProvider(propertyIds, keyPropertyIds,
+        managementController, maintenanceStateHelper, repositoryVersionDAO);
 
     Set<String> unsupported = provider.checkPropertyIds(Collections.singleton("foo"));
     Assert.assertTrue(unsupported.isEmpty());
@@ -118,12 +119,11 @@ public class AbstractResourceProviderTest {
 
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
     MaintenanceStateHelper maintenanceStateHelper = createNiceMock(MaintenanceStateHelper.class);
-    replay(maintenanceStateHelper);
+    RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+    replay(maintenanceStateHelper, repositoryVersionDAO);
 
-    AbstractResourceProvider provider = new ServiceResourceProvider(
-            propertyIds,
-            keyPropertyIds,
-            managementController, maintenanceStateHelper);
+    AbstractResourceProvider provider = new ServiceResourceProvider(propertyIds, keyPropertyIds,
+        managementController, maintenanceStateHelper, repositoryVersionDAO);
 
     Set<String> supportedPropertyIds = provider.getPropertyIds();
     Assert.assertTrue(supportedPropertyIds.containsAll(propertyIds));
@@ -135,12 +135,11 @@ public class AbstractResourceProviderTest {
     Map<Resource.Type, String> keyPropertyIds = new HashMap<>();
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
     MaintenanceStateHelper maintenanceStateHelper = createNiceMock(MaintenanceStateHelper.class);
-    replay(maintenanceStateHelper);
+    RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+    replay(maintenanceStateHelper, repositoryVersionDAO);
 
-    AbstractResourceProvider provider = new ServiceResourceProvider(
-            propertyIds,
-            keyPropertyIds,
-            managementController, maintenanceStateHelper);
+    AbstractResourceProvider provider = new ServiceResourceProvider(propertyIds, keyPropertyIds,
+        managementController, maintenanceStateHelper, repositoryVersionDAO);
 
     RequestStatus status = provider.getRequestStatus(null);
 
@@ -358,7 +357,7 @@ public class AbstractResourceProviderTest {
       EasyMock.reportMatcher(new StackConfigurationRequestSetMatcher(stackName, stackVersion, serviceName, propertyName));
       return null;
     }
-    
+
     public static Set<StackConfigurationDependencyRequest> getStackConfigurationDependencyRequestSet(String stackName, String stackVersion,
         String serviceName, String propertyName, String dependencyName)
     {

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
index e7c1588..7b3837e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
@@ -249,7 +249,9 @@ public class ClientConfigResourceProviderTest {
     HashMap<String, ServiceOsSpecific> serviceOsSpecificHashMap = new HashMap<>();
     serviceOsSpecificHashMap.put("key",serviceOsSpecific);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(clusterName, serviceName, componentName, displayName, hostName, publicHostname,desiredState, "", null, null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(clusterName, serviceName,
+        componentName, displayName, hostName, publicHostname, desiredState, "", null, null, null,
+        null);
 
     Set<ServiceComponentHostResponse> responses = new LinkedHashSet<>();
     responses.add(shr1);
@@ -497,7 +499,9 @@ public class ClientConfigResourceProviderTest {
     HashMap<String, ServiceOsSpecific> serviceOsSpecificHashMap = new HashMap<>();
     serviceOsSpecificHashMap.put("key",serviceOsSpecific);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(clusterName, serviceName, componentName, displayName, hostName, publicHostName, desiredState, "", null, null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse(clusterName, serviceName,
+        componentName, displayName, hostName, publicHostName, desiredState, "", null, null, null,
+        null);
 
     Set<ServiceComponentHostResponse> responses = new LinkedHashSet<>();
     responses.add(shr1);

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 6245708..caf7210 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -31,7 +31,6 @@ import static org.easymock.EasyMock.verify;
 
 import java.io.File;
 import java.io.FileInputStream;
-import java.lang.reflect.Field;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -54,7 +53,6 @@ import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.Stage;
 import org.apache.ambari.server.actionmanager.StageFactory;
-import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -80,14 +78,10 @@ import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
 import org.apache.ambari.server.orm.entities.HostVersionEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.security.TestAuthenticationFactory;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
-import org.apache.ambari.server.security.authorization.ResourceType;
-import org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ConfigHelper;
@@ -1116,319 +1110,6 @@ public class ClusterStackVersionResourceProviderTest {
   }
 
    @Test
-   public void testUpdateResourcesAsAdministrator() throws Exception {
-     testUpdateResources(TestAuthenticationFactory.createAdministrator());
-   }
-
-   @Test
-   public void testUpdateResourcesAsClusterAdministrator() throws Exception {
-     testUpdateResources(TestAuthenticationFactory.createClusterAdministrator());
-   }
-
-   @Test(expected = AuthorizationException.class)
-   public void testUpdateResourcesAsClusterOperator() throws Exception {
-     testUpdateResources(TestAuthenticationFactory.createClusterOperator());
-   }
-
-   private void testUpdateResources(Authentication authentication) throws Exception {
-    Resource.Type type = Resource.Type.ClusterStackVersion;
-    String clusterName = "Cluster100";
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    StackId stackId = new StackId("HDP", "2.0.1");
-    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
-    Assert.assertNotNull(stackEntity);
-
-    ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
-    if (resourceTypeEntity == null) {
-      resourceTypeEntity = new ResourceTypeEntity();
-      resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
-      resourceTypeEntity.setName(ResourceType.CLUSTER.name());
-      resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
-    }
-    ResourceEntity resourceEntity = new ResourceEntity();
-    resourceEntity.setResourceType(resourceTypeEntity);
-
-    final Host host1 = createNiceMock("host1", Host.class);
-    final Host host2 = createNiceMock("host2", Host.class);
-
-    expect(host1.getHostName()).andReturn("host1").anyTimes();
-    expect(host2.getHostName()).andReturn("host2").anyTimes();
-    replay(host1, host2);
-
-    ServiceComponentHost sch = createMock(ServiceComponentHost.class);
-    List<ServiceComponentHost> schs = Collections.singletonList(sch);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    cluster.setClusterName(clusterName);
-
-    ArrayList<Host> hosts = new ArrayList<Host>() {{
-      add(host1);
-      add(host2);
-    }};
-
-    Clusters clusters = createNiceMock(Clusters.class);
-    expect(clusters.getCluster(anyObject(String.class))).andReturn(cluster);
-
-    RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
-    repoVersion.setOperatingSystems(OS_JSON);
-    StackEntity newDesiredStack = stackDAO.find("HDP", "2.0.1");
-    repoVersion.setStack(newDesiredStack);
-
-    final ServiceOsSpecific.Package hivePackage = new ServiceOsSpecific.Package();
-    hivePackage.setName("hive");
-    final ServiceOsSpecific.Package mysqlPackage = new ServiceOsSpecific.Package();
-    mysqlPackage.setName("mysql");
-    mysqlPackage.setSkipUpgrade(Boolean.TRUE);
-    List<ServiceOsSpecific.Package> packages = new ArrayList<ServiceOsSpecific.Package>() {{
-      add(hivePackage);
-      add(mysqlPackage);
-    }};
-
-    ActionManager actionManager = createNiceMock(ActionManager.class);
-
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-    ResourceProviderFactory resourceProviderFactory = createNiceMock(ResourceProviderFactory.class);
-    ResourceProvider csvResourceProvider = createNiceMock(ClusterStackVersionResourceProvider.class);
-
-    CommandReport report = createNiceMock(CommandReport.class);
-    FinalizeUpgradeAction finalizeUpgradeAction = createNiceMock(FinalizeUpgradeAction.class);
-
-    AbstractControllerResourceProvider.init(resourceProviderFactory);
-
-    Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
-    expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
-
-    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
-    expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
-    expect(managementController.getAuthName()).andReturn("admin").anyTimes();
-    expect(managementController.getActionManager()).andReturn(actionManager).anyTimes();
-    expect(managementController.getJdkResourceUrl()).andReturn("/JdkResourceUrl").anyTimes();
-    expect(managementController.getPackagesForServiceHost(anyObject(ServiceInfo.class),
-            EasyMock.<Map<String, String>>anyObject(), anyObject(String.class))).andReturn(packages).anyTimes();
-
-    expect(resourceProviderFactory.getHostResourceProvider(EasyMock.<Set<String>>anyObject(), EasyMock.<Map<Resource.Type, String>>anyObject(),
-            eq(managementController))).andReturn(csvResourceProvider).anyTimes();
-
-    expect(cluster.getCurrentStackVersion()).andReturn(stackId);
-    expect(cluster.getServiceComponentHosts(anyObject(String.class))).andReturn(schs).anyTimes();
-
-    Capture<StackId> capturedStackId = EasyMock.newCapture();
-    cluster.setDesiredStackVersion(capture(capturedStackId));
-      expectLastCall().once();
-    expect(cluster.getHosts()).andReturn(hosts).anyTimes();
-
-
-    expect(sch.getServiceName()).andReturn("HIVE").anyTimes();
-
-    expect(repositoryVersionDAOMock.findByDisplayName(anyObject(String.class))).andReturn(repoVersion);
-
-    expect(actionManager.getRequestTasks(anyLong())).andReturn(Collections.<HostRoleCommand>emptyList()).anyTimes();
-
-    expect(finalizeUpgradeAction.execute(null)).andReturn(report);
-
-    expect(report.getStdOut()).andReturn("Dummy stdout");
-    expect(report.getStdErr()).andReturn("Dummy stderr");
-    expect(report.getStatus()).andReturn("COMPLETED");
-
-    // replay
-    replay(managementController, response, clusters, resourceProviderFactory, csvResourceProvider,
-            cluster, repositoryVersionDAOMock, configHelper, sch, actionManager, finalizeUpgradeAction, report,
-            stageFactory);
-
-    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
-            type,
-            PropertyHelper.getPropertyIds(type),
-            PropertyHelper.getKeyPropertyIds(type),
-            managementController);
-
-    injector.injectMembers(provider);
-
-    // Have to inject instance manually because injection via DI fails
-    Field field = ClusterStackVersionResourceProvider.class.getDeclaredField("finalizeUpgradeAction");
-    field.setAccessible(true);
-    field.set(provider, finalizeUpgradeAction);
-
-    // add the property map to a set for the request.  add more maps for multiple creates
-    Map<String, Object> properties = new LinkedHashMap<>();
-
-    // add properties to the request map
-    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_STATE_PROPERTY_ID, "CURRENT");
-    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID, "HDP-2.2.2.0-2561");
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties, null);
-
-    SecurityContextHolder.getContext().setAuthentication(authentication);
-
-    provider.updateResources(request, null);
-
-    // verify
-    verify(managementController, response);
-    Assert.assertEquals(capturedStackId.getValue(),
-            new StackId(newDesiredStack.getStackName(), newDesiredStack.getStackVersion()));
-  }
-
-   @Test
-   public void testUpdateResourcesWithForceAsAdministrator() throws Exception {
-     testUpdateResourcesWithForce(TestAuthenticationFactory.createAdministrator());
-   }
-
-   @Test
-   public void testUpdateResourcesWithForceAsClusterAdministrator() throws Exception {
-     testUpdateResourcesWithForce(TestAuthenticationFactory.createClusterAdministrator());
-   }
-
-   @Test(expected = AuthorizationException.class)
-   public void testUpdateResourcesWithForceAsClusterOperator() throws Exception {
-     testUpdateResourcesWithForce(TestAuthenticationFactory.createClusterOperator());
-   }
-
-   private void testUpdateResourcesWithForce(Authentication authentication) throws Exception {
-    Resource.Type type = Resource.Type.ClusterStackVersion;
-    String clusterName = "Cluster100";
-
-    AmbariManagementController managementController = createMock(AmbariManagementController.class);
-
-    StackId stackId = new StackId("HDP", "2.0.1");
-    StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
-    Assert.assertNotNull(stackEntity);
-
-    ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
-    if (resourceTypeEntity == null) {
-      resourceTypeEntity = new ResourceTypeEntity();
-      resourceTypeEntity.setId(ResourceType.CLUSTER.getId());
-      resourceTypeEntity.setName(ResourceType.CLUSTER.name());
-      resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
-    }
-    ResourceEntity resourceEntity = new ResourceEntity();
-    resourceEntity.setResourceType(resourceTypeEntity);
-
-    final Host host1 = createNiceMock("host1", Host.class);
-    final Host host2 = createNiceMock("host2", Host.class);
-
-    expect(host1.getHostName()).andReturn("host1").anyTimes();
-    expect(host2.getHostName()).andReturn("host2").anyTimes();
-    replay(host1, host2);
-
-    ServiceComponentHost sch = createMock(ServiceComponentHost.class);
-    List<ServiceComponentHost> schs = Collections.singletonList(sch);
-
-    Cluster cluster = createNiceMock(Cluster.class);
-    cluster.setClusterName(clusterName);
-
-    ArrayList<Host> hosts = new ArrayList<Host>() {{
-      add(host1);
-      add(host2);
-    }};
-
-    Clusters clusters = createNiceMock(Clusters.class);
-    expect(clusters.getCluster(anyObject(String.class))).andReturn(cluster);
-
-    RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
-    repoVersion.setOperatingSystems(OS_JSON);
-    StackEntity newDesiredStack = stackDAO.find("HDP", "2.0.1");
-    repoVersion.setStack(newDesiredStack);
-
-    final ServiceOsSpecific.Package hivePackage = new ServiceOsSpecific.Package();
-    hivePackage.setName("hive");
-    final ServiceOsSpecific.Package mysqlPackage = new ServiceOsSpecific.Package();
-    mysqlPackage.setName("mysql");
-    mysqlPackage.setSkipUpgrade(Boolean.TRUE);
-    List<ServiceOsSpecific.Package> packages = new ArrayList<ServiceOsSpecific.Package>() {{
-      add(hivePackage);
-      add(mysqlPackage);
-    }};
-
-    ActionManager actionManager = createNiceMock(ActionManager.class);
-
-    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
-    ResourceProviderFactory resourceProviderFactory = createNiceMock(ResourceProviderFactory.class);
-    ResourceProvider csvResourceProvider = createNiceMock(ClusterStackVersionResourceProvider.class);
-
-    AbstractControllerResourceProvider.init(resourceProviderFactory);
-
-    Map<String, Map<String, String>> hostConfigTags = new HashMap<>();
-    expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
-
-    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
-    expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
-    expect(managementController.getAuthName()).andReturn("admin").anyTimes();
-    expect(managementController.getActionManager()).andReturn(actionManager).anyTimes();
-    expect(managementController.getJdkResourceUrl()).andReturn("/JdkResourceUrl").anyTimes();
-    expect(managementController.getPackagesForServiceHost(anyObject(ServiceInfo.class),
-            EasyMock.<Map<String, String>>anyObject(), anyObject(String.class))).andReturn(packages).anyTimes();
-
-    expect(resourceProviderFactory.getHostResourceProvider(EasyMock.<Set<String>>anyObject(), EasyMock.<Map<Resource.Type, String>>anyObject(),
-            eq(managementController))).andReturn(csvResourceProvider).anyTimes();
-
-    expect(cluster.getCurrentStackVersion()).andReturn(stackId);
-    expect(cluster.getServiceComponentHosts(anyObject(String.class))).andReturn(schs).anyTimes();
-
-    RepositoryVersionEntity currentRepo = new RepositoryVersionEntity();
-    currentRepo.setVersion("2.2.2.0-2122");
-    ClusterVersionEntity current = new ClusterVersionEntity();
-    current.setRepositoryVersion(currentRepo);
-
-    Capture<StackId> capturedStackId = EasyMock.newCapture();
-    cluster.setDesiredStackVersion(capture(capturedStackId));
-      expectLastCall().once();
-    expect(cluster.getHosts()).andReturn(hosts).anyTimes();
-    expect(cluster.getCurrentClusterVersion()).andReturn(current).anyTimes();
-
-    expect(sch.getServiceName()).andReturn("HIVE").anyTimes();
-
-    expect(repositoryVersionDAOMock.findByDisplayName(anyObject(String.class))).andReturn(repoVersion);
-
-    clusterVersionDAO.updateVersions((Long) anyObject(),
-        (RepositoryVersionEntity) anyObject(), (RepositoryVersionEntity) anyObject());
-    expectLastCall().once();
-
-    hostVersionDAO.updateVersions((RepositoryVersionEntity) anyObject(), (RepositoryVersionEntity) anyObject());
-    expectLastCall().once();
-
-    hostComponentStateDAO.updateVersions((String) anyObject());
-    expectLastCall().once();
-
-    // replay
-    replay(managementController, response, clusters, resourceProviderFactory, csvResourceProvider,
-            cluster, repositoryVersionDAOMock, configHelper, sch, actionManager, clusterVersionDAO,
-            hostVersionDAO, hostComponentStateDAO);
-
-    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
-            type,
-            PropertyHelper.getPropertyIds(type),
-            PropertyHelper.getKeyPropertyIds(type),
-            managementController);
-
-    injector.injectMembers(provider);
-
-
-    // add the property map to a set for the request.  add more maps for multiple creates
-    Map<String, Object> properties = new LinkedHashMap<>();
-
-    // add properties to the request map
-    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, clusterName);
-    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_STATE_PROPERTY_ID, "CURRENT");
-    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID, "HDP-2.2.2.0-2561");
-    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_FORCE, "true");
-
-    // create the request
-    Request request = PropertyHelper.getUpdateRequest(properties, null);
-
-    SecurityContextHolder.getContext().setAuthentication(authentication);
-
-    provider.updateResources(request, null);
-
-    // verify
-    verify(managementController, response, clusterVersionDAO, hostVersionDAO, hostComponentStateDAO);
-    Assert.assertEquals(capturedStackId.getValue(),
-            new StackId(newDesiredStack.getStackName(), newDesiredStack.getStackVersion()));
-  }
-
-   @Test
    public void testCreateResourcesMixedAsAdministrator() throws Exception {
      testCreateResourcesMixed(TestAuthenticationFactory.createAdministrator());
    }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java
index a2b0a0c..cb9e31e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java
@@ -183,23 +183,33 @@ public class HostComponentResourceProviderTest {
     Set<ServiceComponentHostResponse> allResponse = new HashSet<>();
     StackId stackId = new StackId("HDP-0.1");
     StackId stackId2 = new StackId("HDP-0.2");
+
+    String repositoryVersion2 = "0.2-1234";
+
     allResponse.add(new ServiceComponentHostResponse(
-        "Cluster100", "Service100", "Component100", "Component 100", "Host100", "Host100", State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(),
-        stackId2.getStackId(), null));
+        "Cluster100", "Service100", "Component100", "Component 100", "Host100", "Host100",
+        State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(),
+        stackId2.getStackId(), repositoryVersion2, null));
+
     allResponse.add(new ServiceComponentHostResponse(
-        "Cluster100", "Service100", "Component101", "Component 101", "Host100", "Host100", State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(),
-        stackId2.getStackId(), null));
+        "Cluster100", "Service100", "Component101", "Component 101", "Host100", "Host100",
+        State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(),
+        stackId2.getStackId(), repositoryVersion2, null));
 
     allResponse.add(new ServiceComponentHostResponse(
-        "Cluster100", "Service100", "Component102", "Component 102", "Host100","Host100", State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(),
-        stackId2.getStackId(), null));
+        "Cluster100", "Service100", "Component102", "Component 102", "Host100", "Host100",
+        State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(),
+        stackId2.getStackId(), repositoryVersion2, null));
+
     Map<String, String> expectedNameValues = new HashMap<>();
     expectedNameValues.put(
         HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
     expectedNameValues.put(
         HostComponentResourceProvider.HOST_COMPONENT_STATE_PROPERTY_ID, State.INSTALLED.toString());
     expectedNameValues.put(
-        HostComponentResourceProvider.HOST_COMPONENT_STACK_ID_PROPERTY_ID, stackId.getStackId());
+        HostComponentResourceProvider.HOST_COMPONENT_VERSION_PROPERTY_ID, repositoryVersion2);
+    expectedNameValues.put(
+        HostComponentResourceProvider.HOST_COMPONENT_DESIRED_REPOSITORY_VERSION, repositoryVersion2);
     expectedNameValues.put(
         HostComponentResourceProvider.HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, State.STARTED.toString());
     expectedNameValues.put(
@@ -219,7 +229,8 @@ public class HostComponentResourceProviderTest {
     propertyIds.add(HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID);
     propertyIds.add(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
     propertyIds.add(HostComponentResourceProvider.HOST_COMPONENT_STATE_PROPERTY_ID);
-    propertyIds.add(HostComponentResourceProvider.HOST_COMPONENT_STACK_ID_PROPERTY_ID);
+    propertyIds.add(HostComponentResourceProvider.HOST_COMPONENT_VERSION_PROPERTY_ID);
+    propertyIds.add(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_REPOSITORY_VERSION);
     propertyIds.add(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID);
     propertyIds.add(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID);
 
@@ -236,9 +247,11 @@ public class HostComponentResourceProviderTest {
     hostsComponentResource1.setProperty(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "Component100");
     hostsComponentResource1.setProperty(HostComponentResourceProvider.HOST_COMPONENT_STATE_PROPERTY_ID, State.INSTALLED.name());
     hostsComponentResource1.setProperty(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, State.STARTED.name());
-    hostsComponentResource1.setProperty(HostComponentResourceProvider.HOST_COMPONENT_STACK_ID_PROPERTY_ID, stackId.getStackId());
+    hostsComponentResource1.setProperty(
+        HostComponentResourceProvider.HOST_COMPONENT_VERSION_PROPERTY_ID, repositoryVersion2);
     hostsComponentResource1.setProperty(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID, stackId2.getStackId());
     hostsComponentResource1.setProperty(HostComponentResourceProvider.HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID, UpgradeState.NONE.name());
+    hostsComponentResource1.setProperty(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_REPOSITORY_VERSION, repositoryVersion2);
 
     Resource hostsComponentResource2 = new ResourceImpl(Resource.Type.HostComponent);
     hostsComponentResource2.setProperty(HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
@@ -247,9 +260,11 @@ public class HostComponentResourceProviderTest {
     hostsComponentResource2.setProperty(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "Component101");
     hostsComponentResource2.setProperty(HostComponentResourceProvider.HOST_COMPONENT_STATE_PROPERTY_ID, State.INSTALLED.name());
     hostsComponentResource2.setProperty(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, State.STARTED.name());
-    hostsComponentResource2.setProperty(HostComponentResourceProvider.HOST_COMPONENT_STACK_ID_PROPERTY_ID, stackId.getStackId());
+    hostsComponentResource2.setProperty(
+        HostComponentResourceProvider.HOST_COMPONENT_VERSION_PROPERTY_ID, repositoryVersion2);
     hostsComponentResource2.setProperty(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID, stackId2.getStackId());
     hostsComponentResource2.setProperty(HostComponentResourceProvider.HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID, UpgradeState.NONE.name());
+    hostsComponentResource2.setProperty(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_REPOSITORY_VERSION, repositoryVersion2);
 
     Resource hostsComponentResource3 = new ResourceImpl(Resource.Type.HostComponent);
     hostsComponentResource3.setProperty(HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
@@ -258,9 +273,11 @@ public class HostComponentResourceProviderTest {
     hostsComponentResource3.setProperty(HostComponentResourceProvider.HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, "Component102");
     hostsComponentResource3.setProperty(HostComponentResourceProvider.HOST_COMPONENT_STATE_PROPERTY_ID, State.INSTALLED.name());
     hostsComponentResource3.setProperty(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, State.STARTED.name());
-    hostsComponentResource3.setProperty(HostComponentResourceProvider.HOST_COMPONENT_STACK_ID_PROPERTY_ID, stackId.getStackId());
+    hostsComponentResource3.setProperty(
+        HostComponentResourceProvider.HOST_COMPONENT_VERSION_PROPERTY_ID, repositoryVersion2);
     hostsComponentResource3.setProperty(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID, stackId2.getStackId());
     hostsComponentResource3.setProperty(HostComponentResourceProvider.HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID, UpgradeState.NONE.name());
+    hostsComponentResource3.setProperty(HostComponentResourceProvider.HOST_COMPONENT_DESIRED_REPOSITORY_VERSION, repositoryVersion2);
 
     hostsComponentResources.add(hostsComponentResource1);
     hostsComponentResources.add(hostsComponentResource2);
@@ -337,7 +354,8 @@ public class HostComponentResourceProviderTest {
 
     Set<ServiceComponentHostResponse> nameResponse = new HashSet<>();
     nameResponse.add(new ServiceComponentHostResponse(
-        "Cluster102", "Service100", "Component100", "Component 100", "Host100", "Host100","INSTALLED", "", "", "", null));
+        "Cluster102", "Service100", "Component100", "Component 100", "Host100", "Host100",
+        "INSTALLED", "", "", "", "", null));
 
     // set expectations
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
@@ -538,7 +556,8 @@ public class HostComponentResourceProviderTest {
 
     Set<ServiceComponentHostResponse> nameResponse = new HashSet<>();
     nameResponse.add(new ServiceComponentHostResponse(
-        "Cluster102", "Service100", "Component100", "Component 100", "Host100", "Host100","INSTALLED", "", "", "", null));
+        "Cluster102", "Service100", "Component100", "Component 100", "Host100", "Host100",
+        "INSTALLED", "", "", "", "", null));
 
     // set expectations
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
index 8772b24..d1a4a1a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
@@ -301,11 +301,11 @@ public class HostResourceProviderTest extends EasyMockSupport {
     clusterSet.add(cluster);
 
     ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100", "Component 100",
-        "Host100", "Host100", "STARTED", "", null, null, null);
+        "Host100", "Host100", "STARTED", "", null, null, null, null);
     ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102", "Component 102",
-        "Host100", "Host100", "STARTED", "", null, null, null);
+        "Host100", "Host100", "STARTED", "", null, null, null, null);
     ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103", "Component 103",
-        "Host100", "Host100", "STARTED", "", null, null, null);
+        "Host100", "Host100", "STARTED", "", null, null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new HashSet<>();
     responses.add(shr1);
@@ -402,11 +402,11 @@ public class HostResourceProviderTest extends EasyMockSupport {
     clusterSet.add(cluster);
 
     ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100", "Component 100",
-        "Host100", "Host100", "STARTED", "", null, null, null);
+        "Host100", "Host100", "STARTED", "", null, null, null, null);
     ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102", "Component 102",
-        "Host100", "Host100", "STARTED", "", null, null, null);
+        "Host100", "Host100", "STARTED", "", null, null, null, null);
     ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103", "Component 103",
-        "Host100", "Host100", "STARTED", "", null, null, null);
+        "Host100", "Host100", "STARTED", "", null, null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new HashSet<>();
     responses.add(shr1);
@@ -498,11 +498,11 @@ public class HostResourceProviderTest extends EasyMockSupport {
     clusterSet.add(cluster);
 
     ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100", "Component 100",
-        "Host100", "Host100", "STARTED", "", null, null, null);
+        "Host100", "Host100", "STARTED", "", null, null, null, null);
     ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102", "Component 102",
-        "Host100", "Host100", "INSTALLED", "", null, null, null);
+        "Host100", "Host100", "INSTALLED", "", null, null, null, null);
     ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103", "Component 103",
-        "Host100", "Host100", "STARTED", "", null, null, null);
+        "Host100", "Host100", "STARTED", "", null, null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new HashSet<>();
     responses.add(shr1);
@@ -698,7 +698,7 @@ public class HostResourceProviderTest extends EasyMockSupport {
     clusterSet.add(cluster);
 
     ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100", "Component 100",
-        "Host100", "Host100", "STARTED", "", null, null, null);
+        "Host100", "Host100", "STARTED", "", null, null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new HashSet<>();
     responses.add(shr1);
@@ -785,11 +785,11 @@ public class HostResourceProviderTest extends EasyMockSupport {
     clusterSet.add(cluster);
 
     ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100", "Component 100",
-        "Host100", "Host100", "STARTED", "", null, null, null);
+        "Host100", "Host100", "STARTED", "", null, null, null, null);
     ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102", "Component 102",
-        "Host100", "Host100", "INSTALLED", "", null, null, null);
+        "Host100", "Host100", "INSTALLED", "", null, null, null, null);
     ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103", "Component 103",
-        "Host100", "Host100", "STARTED", "", null, null, null);
+        "Host100", "Host100", "STARTED", "", null, null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new HashSet<>();
     responses.add(shr1);

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
index 3cc6416..9486f9d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/JMXHostProviderTest.java
@@ -47,6 +47,9 @@ import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.security.TestAuthenticationFactory;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.state.Cluster;
@@ -83,6 +86,11 @@ public class JMXHostProviderTest {
   private static final String MAPREDUCE_HTTPS_POLICY = "mapreduce.jobhistory.http.policy";
   private static final String MAPREDUCE_HTTPS_PORT = "mapreduce.jobhistory.webapp.https.address";
 
+  private final String STACK_VERSION = "2.0.6";
+  private final String REPO_VERSION = "2.0.6-1234";
+  private final StackId STACK_ID = new StackId("HDP", STACK_VERSION);
+  private RepositoryVersionEntity m_repositoryVersion;
+
   @Before
   public void setup() throws Exception {
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
@@ -90,6 +98,10 @@ public class JMXHostProviderTest {
     injector.getInstance(GuiceJpaInitializer.class);
     clusters = injector.getInstance(Clusters.class);
     controller = injector.getInstance(AmbariManagementController.class);
+    OrmTestHelper ormTestHelper = injector.getInstance(OrmTestHelper.class);
+
+    m_repositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(STACK_ID, REPO_VERSION);
+    Assert.assertNotNull(m_repositoryVersion);
 
     // Set the authenticated user
     // TODO: remove this or replace the authenticated user to test authorization rules
@@ -104,17 +116,22 @@ public class JMXHostProviderTest {
     SecurityContextHolder.getContext().setAuthentication(null);
   }
 
-  private void createService(String clusterName,
-                             String serviceName, State desiredState)
+  private void createService(String clusterName, String serviceName, State desiredState)
       throws AmbariException, AuthorizationException {
     String dStateStr = null;
+
     if (desiredState != null) {
       dStateStr = desiredState.toString();
     }
-    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName, dStateStr);
+
+    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName, STACK_ID.getStackId(),
+        REPO_VERSION, dStateStr);
+
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r1);
-    ServiceResourceProviderTest.createServices(controller, requests);
+
+    ServiceResourceProviderTest.createServices(controller,
+        injector.getInstance(RepositoryVersionDAO.class), requests);
   }
 
   private void createServiceComponent(String clusterName,
@@ -640,14 +657,17 @@ public class JMXHostProviderTest {
 
     Injector injector = createNiceMock(Injector.class);
     MaintenanceStateHelper maintenanceStateHelper = createNiceMock(MaintenanceStateHelper.class);
+    RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+
     {
       expect(injector.getInstance(Clusters.class)).andReturn(null);
       replay(maintenanceStateHelper, injector);
     }
 
-    ResourceProvider serviceResourceProvider = new ServiceResourceProvider(PropertyHelper
-      .getPropertyIds(Resource.Type.Service),
-      PropertyHelper.getKeyPropertyIds(Resource.Type.Service), controller, maintenanceStateHelper);
+    ResourceProvider serviceResourceProvider = new ServiceResourceProvider(
+        PropertyHelper.getPropertyIds(Resource.Type.Service),
+        PropertyHelper.getKeyPropertyIds(Resource.Type.Service), controller, maintenanceStateHelper,
+        repositoryVersionDAO);
 
     ResourceProvider hostCompResourceProvider = new
       HostComponentResourceProvider(PropertyHelper.getPropertyIds(Resource

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
index 4aacf91..2e712d1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestImplTest.java
@@ -61,7 +61,8 @@ public class RequestImplTest {
     Assert.assertTrue(validPropertyIds.contains("params/run_smoke_test"));
     Assert.assertTrue(validPropertyIds.contains("HostRoles/actual_configs"));
     Assert.assertTrue(validPropertyIds.contains("HostRoles/desired_stack_id"));
-    Assert.assertTrue(validPropertyIds.contains("HostRoles/stack_id"));
+    Assert.assertTrue(validPropertyIds.contains("HostRoles/version"));
+    Assert.assertTrue(validPropertyIds.contains("HostRoles/desired_repository_version"));
     Assert.assertTrue(validPropertyIds.contains("HostRoles/desired_state"));
     Assert.assertTrue(validPropertyIds.contains("HostRoles/state"));
     Assert.assertTrue(validPropertyIds.contains("HostRoles/component_name"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
index 44c2493..04b7933 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
@@ -62,6 +62,9 @@ import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.metadata.RoleCommandOrder;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.security.TestAuthenticationFactory;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.serveraction.kerberos.KerberosAdminAuthenticationException;
@@ -117,10 +120,17 @@ public class ServiceResourceProviderTest {
     AmbariMetaInfo ambariMetaInfo = createNiceMock(AmbariMetaInfo.class);
     ServiceInfo serviceInfo = createNiceMock(ServiceInfo.class);
 
+    ClusterVersionEntity clusterVersion = createNiceMock(ClusterVersionEntity.class);
+    RepositoryVersionEntity repositoryVersion = createNiceMock(RepositoryVersionEntity.class);
+    expect(clusterVersion.getRepositoryVersion()).andReturn(repositoryVersion).atLeastOnce();
+
     expect(managementController.getClusters()).andReturn(clusters).anyTimes();
     expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
 
-    expect(cluster.addService("Service100")).andReturn(service);
+    expect(cluster.getCurrentClusterVersion()).andReturn(clusterVersion).atLeastOnce();
+
+    expect(cluster.addService(eq("Service100"),
+        EasyMock.anyObject(RepositoryVersionEntity.class))).andReturn(service);
 
     expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
 
@@ -135,7 +145,8 @@ public class ServiceResourceProviderTest {
     expect(ambariMetaInfo.getService((String)anyObject(), (String)anyObject(), (String)anyObject())).andReturn(serviceInfo).anyTimes();
 
     // replay
-    replay(managementController, clusters, cluster, service, ambariMetaInfo, stackId, serviceFactory, serviceInfo);
+    replay(managementController, clusters, cluster, clusterVersion, repositoryVersion, service,
+        ambariMetaInfo, stackId, serviceFactory, serviceInfo);
 
     SecurityContextHolder.getContext().setAuthentication(authentication);
 
@@ -160,7 +171,8 @@ public class ServiceResourceProviderTest {
     provider.createResources(request);
 
     // verify
-    verify(managementController, clusters, cluster, service, ambariMetaInfo, stackId, serviceFactory, serviceInfo);
+    verify(managementController, clusters, cluster, clusterVersion, repositoryVersion, service,
+        ambariMetaInfo, stackId, serviceFactory, serviceInfo);
   }
 
   @Test
@@ -596,6 +608,7 @@ public class ServiceResourceProviderTest {
 
   private void testUpdateResources(Authentication authentication) throws Exception{
     MaintenanceStateHelper maintenanceStateHelper = createNiceMock(MaintenanceStateHelper.class);
+    RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
     Clusters clusters = createNiceMock(Clusters.class);
     Cluster cluster = createNiceMock(Cluster.class);
@@ -656,11 +669,13 @@ public class ServiceResourceProviderTest {
 
     // replay
     replay(managementController, clusters, cluster, rco, maintenanceStateHelper,
-        service0, serviceFactory, ambariMetaInfo, requestStages, requestStatusResponse, stackId, serviceInfo);
+        repositoryVersionDAO, service0, serviceFactory, ambariMetaInfo, requestStages,
+        requestStatusResponse, stackId, serviceInfo);
 
     SecurityContextHolder.getContext().setAuthentication(authentication);
 
-    ServiceResourceProvider provider = getServiceProvider(managementController, maintenanceStateHelper);
+    ServiceResourceProvider provider = getServiceProvider(managementController,
+        maintenanceStateHelper, repositoryVersionDAO);
 
     // add the property map to a set for the request.
     Map<String, Object> properties = new LinkedHashMap<>();
@@ -697,6 +712,7 @@ public class ServiceResourceProviderTest {
 
   private void testReconfigureClientsFlag(Authentication authentication) throws Exception {
     MaintenanceStateHelper maintenanceStateHelper = createNiceMock(MaintenanceStateHelper.class);
+    RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
     AmbariManagementController managementController1 = createMock(AmbariManagementController.class);
     AmbariManagementController managementController2 = createMock
         (AmbariManagementController.class);
@@ -787,14 +803,17 @@ public class ServiceResourceProviderTest {
     andReturn(Collections.<Service>emptySet()).anyTimes();
 
     // replay
-    replay(managementController1, response1, managementController2, requestStages1, requestStages2, response2,
-        clusters, cluster, service0, serviceResponse0, ambariMetaInfo, rco, maintenanceStateHelper, stackId, serviceInfo);
+    replay(managementController1, response1, managementController2, requestStages1, requestStages2,
+        response2, clusters, cluster, service0, serviceResponse0, ambariMetaInfo, rco,
+        maintenanceStateHelper, repositoryVersionDAO, stackId, serviceInfo);
 
     SecurityContextHolder.getContext().setAuthentication(authentication);
 
-    ServiceResourceProvider provider1 = getServiceProvider(managementController1, maintenanceStateHelper);
+    ServiceResourceProvider provider1 = getServiceProvider(managementController1,
+        maintenanceStateHelper, repositoryVersionDAO);
 
-    ServiceResourceProvider provider2 = getServiceProvider(managementController2, maintenanceStateHelper);
+    ServiceResourceProvider provider2 = getServiceProvider(managementController2,
+        maintenanceStateHelper, repositoryVersionDAO);
 
     // add the property map to a set for the request.
     Map<String, Object> properties = new LinkedHashMap<>();
@@ -1122,9 +1141,11 @@ public class ServiceResourceProviderTest {
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
 
     MaintenanceStateHelper maintenanceStateHelperMock = createNiceMock(MaintenanceStateHelper.class);
-    AbstractResourceProvider provider = new ServiceResourceProvider(propertyIds,
-        keyPropertyIds,
-        managementController, maintenanceStateHelperMock);
+    RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+    replay(maintenanceStateHelperMock, repositoryVersionDAO);
+
+    AbstractResourceProvider provider = new ServiceResourceProvider(propertyIds, keyPropertyIds,
+        managementController, maintenanceStateHelperMock, repositoryVersionDAO);
 
     Set<String> unsupported = provider.checkPropertyIds(Collections.singleton("foo"));
     Assert.assertTrue(unsupported.isEmpty());
@@ -1156,26 +1177,30 @@ public class ServiceResourceProviderTest {
    */
   public static ServiceResourceProvider getServiceProvider(AmbariManagementController managementController) throws  AmbariException {
     MaintenanceStateHelper maintenanceStateHelperMock = createNiceMock(MaintenanceStateHelper.class);
+    RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
     expect(maintenanceStateHelperMock.isOperationAllowed(anyObject(Resource.Type.class), anyObject(Service.class))).andReturn(true).anyTimes();
     expect(maintenanceStateHelperMock.isOperationAllowed(anyObject(Resource.Type.class), anyObject(ServiceComponentHost.class))).andReturn(true).anyTimes();
-    replay(maintenanceStateHelperMock);
-    return getServiceProvider(managementController, maintenanceStateHelperMock);
+    replay(maintenanceStateHelperMock, repositoryVersionDAO);
+    return getServiceProvider(managementController, maintenanceStateHelperMock, repositoryVersionDAO);
   }
 
   /**
    * This factory method allows to define custom MaintenanceStateHelper mock.
    */
-  public static ServiceResourceProvider getServiceProvider(AmbariManagementController managementController,
-                                                           MaintenanceStateHelper maintenanceStateHelper) {
+  public static ServiceResourceProvider getServiceProvider(
+      AmbariManagementController managementController,
+      MaintenanceStateHelper maintenanceStateHelper, RepositoryVersionDAO repositoryVersionDAO) {
     Resource.Type type = Resource.Type.Service;
     return new ServiceResourceProvider(PropertyHelper.getPropertyIds(type),
             PropertyHelper.getKeyPropertyIds(type),
-            managementController, maintenanceStateHelper);
+        managementController, maintenanceStateHelper, repositoryVersionDAO);
   }
 
-  public static void createServices(AmbariManagementController controller, Set<ServiceRequest> requests)
+  public static void createServices(AmbariManagementController controller,
+      RepositoryVersionDAO repositoryVersionDAO, Set<ServiceRequest> requests)
       throws AmbariException, AuthorizationException {
-    ServiceResourceProvider provider = getServiceProvider(controller);
+    MaintenanceStateHelper maintenanceStateHelperMock = createNiceMock(MaintenanceStateHelper.class);
+    ServiceResourceProvider provider = getServiceProvider(controller, maintenanceStateHelperMock, repositoryVersionDAO);
     provider.createServices(requests);
   }
 
@@ -1205,7 +1230,7 @@ public class ServiceResourceProviderTest {
       throws AmbariException, AuthorizationException {
     ServiceResourceProvider provider;
     if (maintenanceStateHelper != null) {
-      provider = getServiceProvider(controller, maintenanceStateHelper);
+      provider = getServiceProvider(controller, maintenanceStateHelper, null);
     } else {
       provider = getServiceProvider(controller);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
index 1c45589..bc178ea 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
@@ -55,6 +55,7 @@ import org.apache.ambari.server.state.ServiceComponentHostFactory;
 import org.apache.ambari.server.state.ServiceFactory;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.UpgradeContext;
 import org.apache.ambari.server.state.UpgradeContextFactory;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
 import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
@@ -90,7 +91,6 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
   private static final StackId s_targetStackId = new StackId("HDP-2.5");
 
   private Injector m_injector;
-  private Clusters m_clustersMock;
   private AmbariMetaInfo m_ambariMetaInfoMock;
 
   /**
@@ -98,7 +98,6 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
    */
   @Before
   public void before() throws Exception {
-    m_clustersMock = createNiceMock(Clusters.class);
     m_ambariMetaInfoMock = createNiceMock(AmbariMetaInfo.class);
 
     MockModule mockModule = new MockModule();
@@ -158,6 +157,8 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
     UpgradePack upgradePack = createNiceMock(UpgradePack.class);
     StackEntity targetStack = createNiceMock(StackEntity.class);
 
+    String version = "2.5.0.0-1234";
+
     // mocks which were bound previously
     AmbariManagementController amc = m_injector.getInstance(AmbariManagementController.class);
     AmbariMetaInfo ambariMetaInfo = m_injector.getInstance(AmbariMetaInfo.class);
@@ -173,7 +174,9 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
     EasyMock.expect(targetStack.getStackVersion()).andReturn("2.5").anyTimes();
 
     EasyMock.expect(repositoryVersionEntity.getStack()).andReturn(targetStack);
-    EasyMock.expect(repositoryVersionDAO.findByStackNameAndVersion("HDP", "2.5.0.0-1234")).andReturn(repositoryVersionEntity);
+    EasyMock.expect(repositoryVersionEntity.getVersion()).andReturn(version);
+    EasyMock.expect(repositoryVersionDAO.findByStackNameAndVersion("HDP", version)).andReturn(
+        repositoryVersionEntity);
 
     EasyMock.expect(upgradePack.getGroups(Direction.UPGRADE)).andReturn(new ArrayList<Grouping>());
 
@@ -236,13 +239,20 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
 
     EasyMock.expectLastCall();
 
+    UpgradeContext upgradeContext = createNiceMock(UpgradeContext.class);
+    EasyMock.expect(upgradeContext.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+    EasyMock.expect(upgradeContext.getCluster()).andReturn(cluster).anyTimes();
+    EasyMock.expect(upgradeContext.getDirection()).andReturn(Direction.UPGRADE).anyTimes();
+    EasyMock.expect(upgradeContext.getUpgradePack()).andReturn(upgradePack).anyTimes();
+    EasyMock.expect(upgradeContext.getTargetRepositoryVersion()).andReturn(repositoryVersionEntity).anyTimes();
+    EasyMock.expect(upgradeContext.getTargetStackId()).andReturn(new StackId("HDP-2.5")).anyTimes();
+    EasyMock.expect(upgradeContext.getVersion()).andReturn(version).anyTimes();
     replayAll();
 
     UpgradeResourceProvider upgradeResourceProvider = new UpgradeResourceProvider(amc);
     m_injector.injectMembers(upgradeResourceProvider);
 
-    upgradeResourceProvider.applyStackAndProcessConfigurations("HDP", cluster, "2.5.0.0-1234",
-        Direction.UPGRADE, upgradePack, "admin");
+    upgradeResourceProvider.applyStackAndProcessConfigurations(upgradeContext);
 
     // assertion time!
     Map<String, Map<String, String>> mergedConfigurations = capturedArgument.getValue();

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
index 36889b2..3039267 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
@@ -157,7 +157,9 @@ public class UpgradeResourceProviderHDP22Test {
     clusters.addCluster("c1", stackId);
     Cluster cluster = clusters.getCluster("c1");
 
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
+
     cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin", RepositoryVersionState.INSTALLING);
     cluster.transitionClusterVersion(stackId, stackId.getStackVersion(), RepositoryVersionState.CURRENT);
 
@@ -172,8 +174,7 @@ public class UpgradeResourceProviderHDP22Test {
     clusters.mapHostToCluster("h1", "c1");
 
     // add a single HIVE server
-    Service service = cluster.addService("HIVE");
-    service.setDesiredStackVersion(cluster.getDesiredStackVersion());
+    Service service = cluster.addService("HIVE", repositoryVersion);
 
     ServiceComponent component = service.addServiceComponent("HIVE_SERVER");
     ServiceComponentHost sch = component.addServiceComponentHost("h1");
@@ -215,7 +216,7 @@ public class UpgradeResourceProviderHDP22Test {
         assertEquals(oldStack, sc.getDesiredStackVersion());
 
         for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-          assertEquals(oldStack, sch.getDesiredStackVersion());
+          assertEquals("2.2.0.0", sch.getVersion());
         }
       }
     }
@@ -260,10 +261,6 @@ public class UpgradeResourceProviderHDP22Test {
 
       for (ServiceComponent sc : s.getServiceComponents().values()) {
         assertEquals(newStack, sc.getDesiredStackVersion());
-
-        for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-          assertEquals(newStack, sch.getDesiredStackVersion());
-        }
       }
     }
 


[5/9] ambari git commit: AMBARI-20894 - Setting Desired Stack Ids to Correct Values During Service and Patch Upgrades (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 628a56f..38e6a22 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -231,6 +231,16 @@ public class AmbariManagementControllerTest {
   private TopologyHostInfoDAO topologyHostInfoDAO;
   private HostRoleCommandDAO hostRoleCommandDAO;
   private StackManagerMock stackManagerMock;
+  private RepositoryVersionDAO repositoryVersionDAO;
+
+  RepositoryVersionEntity repositoryVersion01;
+  RepositoryVersionEntity repositoryVersion02;
+  RepositoryVersionEntity repositoryVersion120;
+  RepositoryVersionEntity repositoryVersion201;
+  RepositoryVersionEntity repositoryVersion206;
+  RepositoryVersionEntity repositoryVersion207;
+  RepositoryVersionEntity repositoryVersion208;
+  RepositoryVersionEntity repositoryVersion220;
 
   @Rule
   public ExpectedException expectedException = ExpectedException.none();
@@ -276,6 +286,32 @@ public class AmbariManagementControllerTest {
     hostRoleCommandDAO = injector.getInstance(HostRoleCommandDAO.class);
     stackManagerMock = (StackManagerMock) ambariMetaInfo.getStackManager();
     EasyMock.replay(injector.getInstance(AuditLogger.class));
+
+    repositoryVersion01 = helper.getOrCreateRepositoryVersion(
+        new StackId("HDP-0.1"), "0.1-1234");
+
+    repositoryVersion02 = helper.getOrCreateRepositoryVersion(
+        new StackId("HDP-0.2"), "0.2-1234");
+
+    repositoryVersion120 = helper.getOrCreateRepositoryVersion(
+        new StackId("HDP-1.2.0"), "1.2.0-1234");
+
+    repositoryVersion201 = helper.getOrCreateRepositoryVersion(
+        new StackId("HDP-2.0.1"), "2.0.1-1234");
+
+    repositoryVersion206 = helper.getOrCreateRepositoryVersion(
+        new StackId("HDP-2.0.6"), "2.0.6-1234");
+
+    repositoryVersion207 = helper.getOrCreateRepositoryVersion(
+        new StackId("HDP-2.0.7"), "2.0.7-1234");
+
+    repositoryVersion208 = helper.getOrCreateRepositoryVersion(
+        new StackId("HDP-2.0.8"), "2.0.8-1234");
+
+    repositoryVersion220 = helper.getOrCreateRepositoryVersion(
+        new StackId("HDP-2.2.0"), "2.2.0-1234");
+
+    repositoryVersionDAO = injector.getInstance(RepositoryVersionDAO.class);
   }
 
   @After
@@ -333,17 +369,35 @@ public class AmbariManagementControllerTest {
     controller.createCluster(r);
   }
 
-  private void createService(String clusterName,
-      String serviceName, State desiredState) throws AmbariException, AuthorizationException {
+  private void createService(String clusterName, String serviceName, State desiredState) throws AmbariException, AuthorizationException {
+    createService(clusterName, serviceName, repositoryVersion02, desiredState);
+  }
+
+  private void createService(String clusterName, String serviceName,
+      RepositoryVersionEntity repositoryVersion, State desiredState)
+      throws AmbariException, AuthorizationException {
     String dStateStr = null;
     if (desiredState != null) {
       dStateStr = desiredState.toString();
     }
-    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName, dStateStr);
+
+    Cluster cluster = clusters.getCluster(clusterName);
+    if (null == cluster.getCurrentClusterVersion()) {
+      StackId stackId = cluster.getCurrentStackVersion();
+      helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+
+      cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
+          RepositoryVersionState.INSTALLING);
+    }
+
+    ServiceRequest r1 = new ServiceRequest(clusterName, serviceName,
+        repositoryVersion.getStackId().getStackId(), repositoryVersion.getVersion(), dStateStr,
+        null);
+
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r1);
 
-    ServiceResourceProviderTest.createServices(controller, requests);
+    ServiceResourceProviderTest.createServices(controller, repositoryVersionDAO, requests);
   }
 
   private void createServiceComponent(String clusterName,
@@ -419,7 +473,7 @@ public class AmbariManagementControllerTest {
   private long stopService(String clusterName, String serviceName,
       boolean runSmokeTests, boolean reconfigureClients) throws
       AmbariException, AuthorizationException {
-    ServiceRequest r = new ServiceRequest(clusterName, serviceName, State.INSTALLED.toString());
+    ServiceRequest r = new ServiceRequest(clusterName, serviceName, null, null, State.INSTALLED.toString(), null);
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r);
     Map<String, String> mapRequestProps = new HashMap<>();
@@ -484,8 +538,8 @@ public class AmbariManagementControllerTest {
                             boolean runSmokeTests, boolean reconfigureClients,
                             MaintenanceStateHelper maintenanceStateHelper) throws
       AmbariException, AuthorizationException {
-    ServiceRequest r = new ServiceRequest(clusterName, serviceName,
-        State.STARTED.toString());
+    ServiceRequest r = new ServiceRequest(clusterName, serviceName, "HDP-0.2", "0.2-1234",
+        State.STARTED.toString(), null);
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r);
     Map<String, String> mapRequestProps = new HashMap<>();
@@ -539,8 +593,8 @@ public class AmbariManagementControllerTest {
                               Map<String, String> mapRequestPropsInput)
       throws AmbariException, AuthorizationException {
 
-    ServiceRequest r = new ServiceRequest(clusterName, serviceName,
-        State.INSTALLED.toString());
+    ServiceRequest r = new ServiceRequest(clusterName, serviceName, "HDP-0.2", "0.2-1234",
+        State.INSTALLED.toString(), null);
 
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r);
@@ -681,9 +735,7 @@ public class AmbariManagementControllerTest {
     String cluster1 = getUniqueName();
     createCluster(cluster1);
     String serviceName = "HDFS";
-    clusters.getCluster(cluster1).setDesiredStackVersion(
-        new StackId("HDP-0.1"));
-    createService(cluster1, serviceName, State.INIT);
+    createService(cluster1, serviceName, repositoryVersion02, State.INIT);
 
     Service s =
         clusters.getCluster(cluster1).getService(serviceName);
@@ -691,7 +743,7 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals(serviceName, s.getName());
     Assert.assertEquals(cluster1, s.getCluster().getClusterName());
 
-    ServiceRequest req = new ServiceRequest(cluster1, serviceName, null);
+    ServiceRequest req = new ServiceRequest(cluster1, "HDFS", "HDP-0.2", "0.2-1234", null, null);
 
     Set<ServiceResponse> r =
         ServiceResourceProviderTest.getServices(controller, Collections.singleton(req));
@@ -699,11 +751,8 @@ public class AmbariManagementControllerTest {
     ServiceResponse resp = r.iterator().next();
     Assert.assertEquals(serviceName, resp.getServiceName());
     Assert.assertEquals(cluster1, resp.getClusterName());
-    Assert.assertEquals(State.INIT.toString(),
-        resp.getDesiredState());
-    Assert.assertEquals("HDP-0.1", resp.getDesiredStackVersion());
-
-    // TODO test resp.getConfigVersions()
+    Assert.assertEquals(State.INIT.toString(), resp.getDesiredState());
+    Assert.assertEquals("HDP-0.2", resp.getDesiredStackVersion());
   }
 
   @Test
@@ -716,9 +765,9 @@ public class AmbariManagementControllerTest {
 
     try {
       set1.clear();
-      ServiceRequest rInvalid = new ServiceRequest(null, null, null);
+      ServiceRequest rInvalid = new ServiceRequest(null, null, null, null, null, null);
       set1.add(rInvalid);
-      ServiceResourceProviderTest.createServices(controller, set1);
+      ServiceResourceProviderTest.createServices(controller, repositoryVersionDAO, set1);
       fail("Expected failure for invalid requests");
     } catch (Exception e) {
       // Expected
@@ -726,9 +775,9 @@ public class AmbariManagementControllerTest {
 
     try {
       set1.clear();
-      ServiceRequest rInvalid = new ServiceRequest("foo", null, null);
+      ServiceRequest rInvalid = new ServiceRequest("foo", null, null, null, null, null);
       set1.add(rInvalid);
-      ServiceResourceProviderTest.createServices(controller, set1);
+      ServiceResourceProviderTest.createServices(controller, repositoryVersionDAO, set1);
       fail("Expected failure for invalid requests");
     } catch (Exception e) {
       // Expected
@@ -736,9 +785,9 @@ public class AmbariManagementControllerTest {
 
     try {
       set1.clear();
-      ServiceRequest rInvalid = new ServiceRequest("foo", "bar", null);
+      ServiceRequest rInvalid = new ServiceRequest("foo", "bar", null, null, null, null);
       set1.add(rInvalid);
-      ServiceResourceProviderTest.createServices(controller, set1);
+      ServiceResourceProviderTest.createServices(controller, repositoryVersionDAO, set1);
       fail("Expected failure for invalid cluster");
     } catch (AmbariException e) {
       // Expected
@@ -754,11 +803,11 @@ public class AmbariManagementControllerTest {
 
     try {
       set1.clear();
-      ServiceRequest valid1 = new ServiceRequest(cluster1, "HDFS", null);
-      ServiceRequest valid2 = new ServiceRequest(cluster1, "HDFS", null);
+      ServiceRequest valid1 = new ServiceRequest(cluster1, "HDFS", null, null, null, null);
+      ServiceRequest valid2 = new ServiceRequest(cluster1, "HDFS", null, null, null, null);
       set1.add(valid1);
       set1.add(valid2);
-      ServiceResourceProviderTest.createServices(controller, set1);
+      ServiceResourceProviderTest.createServices(controller, repositoryVersionDAO, set1);
       fail("Expected failure for invalid requests");
     } catch (Exception e) {
       // Expected
@@ -766,9 +815,9 @@ public class AmbariManagementControllerTest {
 
     try {
       set1.clear();
-      ServiceRequest valid1 = new ServiceRequest(cluster1, "bar", null);
+      ServiceRequest valid1 = new ServiceRequest(cluster1, "bar", "HDP-0.2", "0.2-1234", State.STARTED.toString(), null);
       set1.add(valid1);
-      ServiceResourceProviderTest.createServices(controller, set1);
+      ServiceResourceProviderTest.createServices(controller, repositoryVersionDAO, set1);
       fail("Expected failure for invalid service");
     } catch (Exception e) {
       // Expected
@@ -777,11 +826,11 @@ public class AmbariManagementControllerTest {
 
     try {
       set1.clear();
-      ServiceRequest valid1 = new ServiceRequest(cluster1, "HDFS", null);
-      ServiceRequest valid2 = new ServiceRequest(cluster2, "HDFS", null);
+      ServiceRequest valid1 = new ServiceRequest(cluster1, "HDFS", "HDP-0.2", "0.2-1234", State.STARTED.toString(), null);
+      ServiceRequest valid2 = new ServiceRequest(cluster2, "HDFS", "HDP-0.2", "0.2-1234", State.STARTED.toString(), null);
       set1.add(valid1);
       set1.add(valid2);
-      ServiceResourceProviderTest.createServices(controller, set1);
+      ServiceResourceProviderTest.createServices(controller, repositoryVersionDAO, set1);
       fail("Expected failure for multiple clusters");
     } catch (Exception e) {
       // Expected
@@ -791,17 +840,17 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals(0, clusters.getCluster(cluster1).getServices().size());
 
     set1.clear();
-    ServiceRequest valid = new ServiceRequest(cluster1, "HDFS", null);
+    ServiceRequest valid = new ServiceRequest(cluster1, "HDFS", "HDP-0.2", "0.2-1234", null, null);
     set1.add(valid);
-    ServiceResourceProviderTest.createServices(controller, set1);
+    ServiceResourceProviderTest.createServices(controller, repositoryVersionDAO, set1);
 
     try {
       set1.clear();
-      ServiceRequest valid1 = new ServiceRequest(cluster1, "HDFS", null);
-      ServiceRequest valid2 = new ServiceRequest(cluster1, "HDFS", null);
+      ServiceRequest valid1 = new ServiceRequest(cluster1, "HDFS", "HDP-0.2", "0.2-1234", State.STARTED.toString(), null);
+      ServiceRequest valid2 = new ServiceRequest(cluster1, "HDFS", "HDP-0.2", "0.2-1234", State.STARTED.toString(), null);
       set1.add(valid1);
       set1.add(valid2);
-      ServiceResourceProviderTest.createServices(controller, set1);
+      ServiceResourceProviderTest.createServices(controller, repositoryVersionDAO, set1);
       fail("Expected failure for existing service");
     } catch (Exception e) {
       // Expected
@@ -840,14 +889,14 @@ public class AmbariManagementControllerTest {
     String serviceName2 = "MAPREDUCE";
     createService(cluster1, serviceName2, State.INIT);
 
-    ServiceRequest r = new ServiceRequest(cluster1, null, null);
+    ServiceRequest r = new ServiceRequest(cluster1, null, null, null, null, null);
     Set<ServiceResponse> response = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
     Assert.assertEquals(2, response.size());
 
     for (ServiceResponse svc : response) {
       Assert.assertTrue(svc.getServiceName().equals(serviceName)
           || svc.getServiceName().equals(serviceName2));
-      Assert.assertEquals("HDP-0.1", svc.getDesiredStackVersion());
+      Assert.assertEquals("HDP-0.2", svc.getDesiredStackVersion());
       Assert.assertEquals(State.INIT.toString(), svc.getDesiredState());
     }
   }
@@ -860,18 +909,18 @@ public class AmbariManagementControllerTest {
 
     clusters.addCluster(cluster1, new StackId("HDP-0.1"));
 
-    ServiceRequest valid1 = new ServiceRequest(cluster1, "HDFS", null);
-    ServiceRequest valid2 = new ServiceRequest(cluster1, "MAPREDUCE", null);
+    ServiceRequest valid1 = new ServiceRequest(cluster1, "HDFS", "HDP-0.1", "0.1-1234", null, null);
+    ServiceRequest valid2 = new ServiceRequest(cluster1, "MAPREDUCE", "HDP-0.1", "0.1-1234", null, null);
     set1.add(valid1);
     set1.add(valid2);
-    ServiceResourceProviderTest.createServices(controller, set1);
+    ServiceResourceProviderTest.createServices(controller, repositoryVersionDAO, set1);
 
     try {
-      valid1 = new ServiceRequest(cluster1, "PIG", null);
-      valid2 = new ServiceRequest(cluster1, "MAPREDUCE", null);
+      valid1 = new ServiceRequest(cluster1, "PIG", "HDP-0.1", "0.1-1234", null, null);
+      valid2 = new ServiceRequest(cluster1, "MAPREDUCE", "HDP-0.1", "0.2-1234", null, null);
       set1.add(valid1);
       set1.add(valid2);
-      ServiceResourceProviderTest.createServices(controller, set1);
+      ServiceResourceProviderTest.createServices(controller, repositoryVersionDAO, set1);
       fail("Expected failure for invalid services");
     } catch (AmbariException e) {
       // Expected
@@ -999,11 +1048,15 @@ public class AmbariManagementControllerTest {
     Cluster c1 = clusters.getCluster(cluster1);
     StackId stackId = new StackId("HDP-0.1");
     c1.setDesiredStackVersion(stackId);
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
+
     c1.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
         RepositoryVersionState.INSTALLING);
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
+
+    Service s1 = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
+    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE", repositoryVersion);
     c1.addService(s1);
     c1.addService(s2);
 
@@ -1314,11 +1367,6 @@ public class AmbariManagementControllerTest {
   }
 
   @Test
-  public void testCreateServiceComponentWithConfigs() {
-    // FIXME after config impl
-  }
-
-  @Test
   public void testCreateServiceComponentMultiple() throws AmbariException, AuthorizationException {
     String cluster1 = getUniqueName();
     String cluster2 = getUniqueName();
@@ -1329,12 +1377,12 @@ public class AmbariManagementControllerTest {
     Cluster c1 = clusters.getCluster(cluster1);
     StackId stackId = new StackId("HDP-0.2");
 
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
     c1.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
         RepositoryVersionState.INSTALLING);
 
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
+    Service s1 = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
+    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE", repositoryVersion);
     c1.addService(s1);
     c1.addService(s2);
 
@@ -1611,7 +1659,8 @@ public class AmbariManagementControllerTest {
 
 
     StackId stackId = new StackId("HDP-0.2");
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
 
     foo.setDesiredStackVersion(stackId);
     foo.setCurrentStackVersion(stackId);
@@ -1644,11 +1693,11 @@ public class AmbariManagementControllerTest {
       // Expected
     }
 
-    Service s1 = serviceFactory.createNew(foo, "HDFS");
+    Service s1 = serviceFactory.createNew(foo, "HDFS", repositoryVersion);
     foo.addService(s1);
-    Service s2 = serviceFactory.createNew(c1, "HDFS");
+    Service s2 = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
     c1.addService(s2);
-    Service s3 = serviceFactory.createNew(c2, "HDFS");
+    Service s3 = serviceFactory.createNew(c2, "HDFS", repositoryVersion);
     c2.addService(s3);
 
 
@@ -1965,7 +2014,9 @@ public class AmbariManagementControllerTest {
     Config c1 = configFactory.createNew(cluster, "hdfs-site", "v1",  properties, propertiesAttributes);
     configs.put(c1.getType(), c1);
 
-    ServiceRequest r = new ServiceRequest(cluster1, serviceName, State.INSTALLED.toString());
+    ServiceRequest r = new ServiceRequest(cluster1, serviceName, "HDP-0.2", "0.2-1234",
+        State.INSTALLED.toString(), null);
+
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r);
 
@@ -2016,7 +2067,9 @@ public class AmbariManagementControllerTest {
     configs.put(c1.getType(), c1);
     configs.put(c2.getType(), c2);
 
-    ServiceRequest r = new ServiceRequest(cluster1, serviceName, State.INSTALLED.toString());
+    ServiceRequest r = new ServiceRequest(cluster1, serviceName, "HDP-0.2", "0.2-1234",
+        State.INSTALLED.toString(), null);
+
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r);
 
@@ -2124,7 +2177,8 @@ public class AmbariManagementControllerTest {
       }
     }
 
-    r = new ServiceRequest(cluster1, serviceName, State.STARTED.toString());
+    r = new ServiceRequest(cluster1, serviceName, "HDP-0.2", "0.2-1234", State.STARTED.toString(),
+        null);
     requests.clear();
     requests.add(r);
     trackAction = ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true,
@@ -2170,7 +2224,8 @@ public class AmbariManagementControllerTest {
       }
     }
 
-    r = new ServiceRequest(cluster1, serviceName, State.INSTALLED.toString());
+    r = new ServiceRequest(cluster1, serviceName, "HDP-0.2", "0.2-1234", State.INSTALLED.toString(),
+        null);
     requests.clear();
     requests.add(r);
     trackAction = ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true,
@@ -2267,15 +2322,18 @@ public class AmbariManagementControllerTest {
   public void testGetServices() throws AmbariException {
     String cluster1 = getUniqueName();
 
-    clusters.addCluster(cluster1, new StackId("HDP-0.1"));
+    StackId stackId = new StackId("HDP-0.1");
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
+
+    clusters.addCluster(cluster1, stackId);
     Cluster c1 = clusters.getCluster(cluster1);
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
+    Service s1 = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
 
     c1.addService(s1);
-    s1.setDesiredStackVersion(new StackId("HDP-0.1"));
     s1.setDesiredState(State.INSTALLED);
 
-    ServiceRequest r = new ServiceRequest(cluster1, null, null);
+    ServiceRequest r = new ServiceRequest(cluster1, null, null, null, null, null);
     Set<ServiceResponse> resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
 
     ServiceResponse resp1 = resp.iterator().next();
@@ -2296,18 +2354,22 @@ public class AmbariManagementControllerTest {
     String cluster1 = getUniqueName();
     String cluster2 = getUniqueName();
 
-    clusters.addCluster(cluster1, new StackId("HDP-0.2"));
-    clusters.addCluster(cluster2, new StackId("HDP-0.2"));
+    StackId stackId = new StackId("HDP-0.2");
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
+
+    clusters.addCluster(cluster1, stackId);
+    clusters.addCluster(cluster2, stackId);
     Cluster c1 = clusters.getCluster(cluster1);
     Cluster c2 = clusters.getCluster(cluster2);
-    c1.setDesiredStackVersion(new StackId("HDP-0.2"));
-    c2.setDesiredStackVersion(new StackId("HDP-0.2"));
+    c1.setDesiredStackVersion(stackId);
+    c2.setDesiredStackVersion(stackId);
 
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
-    Service s3 = serviceFactory.createNew(c1, "HBASE");
-    Service s4 = serviceFactory.createNew(c2, "HIVE");
-    Service s5 = serviceFactory.createNew(c2, "ZOOKEEPER");
+    Service s1 = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
+    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE", repositoryVersion);
+    Service s3 = serviceFactory.createNew(c1, "HBASE", repositoryVersion);
+    Service s4 = serviceFactory.createNew(c2, "HIVE", repositoryVersion);
+    Service s5 = serviceFactory.createNew(c2, "ZOOKEEPER", repositoryVersion);
 
     c1.addService(s1);
     c1.addService(s2);
@@ -2319,7 +2381,7 @@ public class AmbariManagementControllerTest {
     s2.setDesiredState(State.INSTALLED);
     s4.setDesiredState(State.INSTALLED);
 
-    ServiceRequest r = new ServiceRequest(null, null, null);
+    ServiceRequest r = new ServiceRequest(null, null, null, null, null, null);
     Set<ServiceResponse> resp;
 
     try {
@@ -2329,35 +2391,35 @@ public class AmbariManagementControllerTest {
       // Expected
     }
 
-    r = new ServiceRequest(c1.getClusterName(), null, null);
+    r = new ServiceRequest(c1.getClusterName(), null, null, null, null, null);
     resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
     Assert.assertEquals(3, resp.size());
 
-    r = new ServiceRequest(c1.getClusterName(), s2.getName(), null);
+    r = new ServiceRequest(c1.getClusterName(), s2.getName(), null, null, null, null);
     resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
     Assert.assertEquals(1, resp.size());
     Assert.assertEquals(s2.getName(), resp.iterator().next().getServiceName());
 
     try {
-      r = new ServiceRequest(c2.getClusterName(), s1.getName(), null);
+      r = new ServiceRequest(c2.getClusterName(), s1.getName(), null, null, null, null);
       ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
       fail("Expected failure for invalid service");
     } catch (Exception e) {
       // Expected
     }
 
-    r = new ServiceRequest(c1.getClusterName(), null, "INSTALLED");
+    r = new ServiceRequest(c1.getClusterName(), null, null, null, "INSTALLED", null);
     resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
     Assert.assertEquals(2, resp.size());
 
-    r = new ServiceRequest(c2.getClusterName(), null, "INIT");
+    r = new ServiceRequest(c2.getClusterName(), null, null, null, "INIT", null);
     resp = ServiceResourceProviderTest.getServices(controller, Collections.singleton(r));
     Assert.assertEquals(1, resp.size());
 
     ServiceRequest r1, r2, r3;
-    r1 = new ServiceRequest(c1.getClusterName(), null, "INSTALLED");
-    r2 = new ServiceRequest(c2.getClusterName(), null, "INIT");
-    r3 = new ServiceRequest(c2.getClusterName(), null, "INIT");
+    r1 = new ServiceRequest(c1.getClusterName(), null, null, null, "INSTALLED", null);
+    r2 = new ServiceRequest(c2.getClusterName(), null, null, null, "INIT", null);
+    r3 = new ServiceRequest(c2.getClusterName(), null, null, null, "INIT", null);
 
     Set<ServiceRequest> reqs = new HashSet<>();
     reqs.addAll(Arrays.asList(r1, r2, r3));
@@ -2371,15 +2433,18 @@ public class AmbariManagementControllerTest {
   public void testGetServiceComponents() throws AmbariException {
     String cluster1 = getUniqueName();
 
-    clusters.addCluster(cluster1, new StackId("HDP-0.2"));
+    StackId stackId = new StackId("HDP-0.2");
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
+
+    clusters.addCluster(cluster1, stackId);
     Cluster c1 = clusters.getCluster(cluster1);
-    c1.setDesiredStackVersion(new StackId("HDP-0.2"));
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
+    c1.setDesiredStackVersion(stackId);
+    Service s1 = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
     c1.addService(s1);
     s1.setDesiredState(State.INSTALLED);
     ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
     s1.addServiceComponent(sc1);
-    sc1.setDesiredStackVersion(new StackId("HDP-0.1"));
     sc1.setDesiredState(State.UNINSTALLED);
 
     ServiceComponentRequest r = new ServiceComponentRequest(cluster1,
@@ -2393,7 +2458,7 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals(c1.getClusterName(), resp.getClusterName());
     Assert.assertEquals(sc1.getName(), resp.getComponentName());
     Assert.assertEquals(s1.getName(), resp.getServiceName());
-    Assert.assertEquals("HDP-0.1", resp.getDesiredStackVersion());
+    Assert.assertEquals("HDP-0.2", resp.getDesiredStackVersion());
     Assert.assertEquals(sc1.getDesiredState().toString(),
         resp.getDesiredState());
     Assert.assertEquals(c1.getClusterId(), resp.getClusterId().longValue());
@@ -2406,16 +2471,20 @@ public class AmbariManagementControllerTest {
     String cluster1 = getUniqueName();
     String cluster2 = getUniqueName();
 
-    clusters.addCluster(cluster1, new StackId("HDP-0.2"));
-    clusters.addCluster(cluster2, new StackId("HDP-0.2"));
+    StackId stackId = new StackId("HDP-0.2");
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
+
+    clusters.addCluster(cluster1, stackId);
+    clusters.addCluster(cluster2, stackId);
     Cluster c1 = clusters.getCluster(cluster1);
     Cluster c2 = clusters.getCluster(cluster2);
 
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
-    Service s3 = serviceFactory.createNew(c1, "HBASE");
-    Service s4 = serviceFactory.createNew(c2, "HIVE");
-    Service s5 = serviceFactory.createNew(c2, "ZOOKEEPER");
+    Service s1 = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
+    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE", repositoryVersion);
+    Service s3 = serviceFactory.createNew(c1, "HBASE", repositoryVersion);
+    Service s4 = serviceFactory.createNew(c2, "HIVE", repositoryVersion);
+    Service s5 = serviceFactory.createNew(c2, "ZOOKEEPER", repositoryVersion);
 
     c1.addService(s1);
     c1.addService(s2);
@@ -2429,16 +2498,12 @@ public class AmbariManagementControllerTest {
 
     ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
     ServiceComponent sc2 = serviceComponentFactory.createNew(s1, "NAMENODE");
-    ServiceComponent sc3 = serviceComponentFactory.createNew(s3,
-        "HBASE_REGIONSERVER");
+    ServiceComponent sc3 = serviceComponentFactory.createNew(s3, "HBASE_REGIONSERVER");
     ServiceComponent sc4 = serviceComponentFactory.createNew(s4, "HIVE_SERVER");
     ServiceComponent sc5 = serviceComponentFactory.createNew(s4, "HIVE_CLIENT");
-    ServiceComponent sc6 = serviceComponentFactory.createNew(s4,
-        "MYSQL_SERVER");
-    ServiceComponent sc7 = serviceComponentFactory.createNew(s5,
-        "ZOOKEEPER_SERVER");
-    ServiceComponent sc8 = serviceComponentFactory.createNew(s5,
-        "ZOOKEEPER_CLIENT");
+    ServiceComponent sc6 = serviceComponentFactory.createNew(s4, "MYSQL_SERVER");
+    ServiceComponent sc7 = serviceComponentFactory.createNew(s5, "ZOOKEEPER_SERVER");
+    ServiceComponent sc8 = serviceComponentFactory.createNew(s5, "ZOOKEEPER_CLIENT");
 
     s1.addServiceComponent(sc1);
     s1.addServiceComponent(sc2);
@@ -2528,7 +2593,9 @@ public class AmbariManagementControllerTest {
     String host1 = getUniqueName();
 
     Cluster c1 = setupClusterWithHosts(cluster1, "HDP-0.1", Lists.newArrayList(host1), "centos5");
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
+    RepositoryVersionEntity repositoryVersion = repositoryVersion01;
+
+    Service s1 = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
     c1.addService(s1);
     ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "DATANODE");
     s1.addServiceComponent(sc1);
@@ -2537,8 +2604,6 @@ public class AmbariManagementControllerTest {
     sc1.addServiceComponentHost(sch1);
     sch1.setDesiredState(State.INSTALLED);
     sch1.setState(State.INSTALLING);
-    sch1.setDesiredStackVersion(new StackId("HDP-1.2.0"));
-    sch1.setStackVersion(new StackId("HDP-0.1"));
 
     sch1.updateActualConfigs(new HashMap<String, Map<String,String>>() {{
       put("global", new HashMap<String,String>() {{ put("tag", "version1"); }});
@@ -2562,8 +2627,8 @@ public class AmbariManagementControllerTest {
         resp.getDesiredState());
     Assert.assertEquals(sch1.getState().toString(),
         resp.getLiveState());
-    Assert.assertEquals(sch1.getStackVersion().getStackId(),
-        resp.getStackVersion());
+    Assert.assertEquals(repositoryVersion.getStackId(),
+        sch1.getServiceComponent().getDesiredStackVersion());
     Assert.assertNotNull(resp.getActualConfigs());
     Assert.assertEquals(1, resp.getActualConfigs().size());
   }
@@ -2760,17 +2825,14 @@ public class AmbariManagementControllerTest {
   public void testHbaseDecommission() throws AmbariException, AuthorizationException {
     String cluster1 = getUniqueName();
     createCluster(cluster1);
-    clusters.getCluster(cluster1)
-        .setDesiredStackVersion(new StackId("HDP-2.0.7"));
+    clusters.getCluster(cluster1).setDesiredStackVersion(new StackId("HDP-2.0.7"));
     String serviceName = "HBASE";
-    createService(cluster1, serviceName, null);
+    createService(cluster1, serviceName, repositoryVersion207, null);
     String componentName1 = "HBASE_MASTER";
     String componentName2 = "HBASE_REGIONSERVER";
 
-    createServiceComponent(cluster1, serviceName, componentName1,
-        State.INIT);
-    createServiceComponent(cluster1, serviceName, componentName2,
-        State.INIT);
+    createServiceComponent(cluster1, serviceName, componentName1, State.INIT);
+    createServiceComponent(cluster1, serviceName, componentName2, State.INIT);
 
     final String host1 = getUniqueName();
     final String host2 = getUniqueName();
@@ -2778,14 +2840,10 @@ public class AmbariManagementControllerTest {
     addHostToCluster(host1, cluster1);
     addHostToCluster(host2, cluster1);
 
-    createServiceComponentHost(cluster1, serviceName, componentName1,
-        host1, null);
-    createServiceComponentHost(cluster1, serviceName, componentName1,
-        host2, null);
-    createServiceComponentHost(cluster1, serviceName, componentName2,
-        host1, null);
-    createServiceComponentHost(cluster1, serviceName, componentName2,
-        host2, null);
+    createServiceComponentHost(cluster1, serviceName, componentName1, host1, null);
+    createServiceComponentHost(cluster1, serviceName, componentName1, host2, null);
+    createServiceComponentHost(cluster1, serviceName, componentName2, host1, null);
+    createServiceComponentHost(cluster1, serviceName, componentName2, host2, null);
 
     RequestOperationLevel level = new RequestOperationLevel(
             Resource.Type.HostComponent, cluster1, null, null, null);
@@ -2931,9 +2989,11 @@ public class AmbariManagementControllerTest {
         }},
         "centos5");
 
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
-    Service s3 = serviceFactory.createNew(c1, "HBASE");
+    RepositoryVersionEntity repositoryVersion = repositoryVersion02;
+
+    Service s1 = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
+    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE", repositoryVersion);
+    Service s3 = serviceFactory.createNew(c1, "HBASE", repositoryVersion);
 
     c1.addService(s1);
     c1.addService(s2);
@@ -3161,8 +3221,8 @@ public class AmbariManagementControllerTest {
     ServiceRequest r;
 
     try {
-      r = new ServiceRequest(cluster1, serviceName,
-          State.INSTALLING.toString());
+      r = new ServiceRequest(cluster1, serviceName, "HDP-0.2", "0.2-1234",
+          State.INSTALLING.toString(), null);
       reqs.clear();
       reqs.add(r);
       ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
@@ -3171,8 +3231,8 @@ public class AmbariManagementControllerTest {
       // Expected
     }
 
-    r = new ServiceRequest(cluster1, serviceName,
-        State.INSTALLED.toString());
+    r = new ServiceRequest(cluster1, serviceName, "HDP-0.2", "0.2-1234", State.INSTALLED.toString(),
+        null);
     reqs.clear();
     reqs.add(r);
     RequestStatusResponse trackAction = ServiceResourceProviderTest.updateServices(controller, reqs,
@@ -3216,10 +3276,10 @@ public class AmbariManagementControllerTest {
     ServiceRequest req1, req2;
     try {
       reqs.clear();
-      req1 = new ServiceRequest(cluster1, serviceName1,
-          State.INSTALLED.toString());
-      req2 = new ServiceRequest(cluster2, serviceName2,
-          State.INSTALLED.toString());
+      req1 = new ServiceRequest(cluster1, serviceName1, "HDP-0.2", "0.2-1234",
+          State.INSTALLED.toString(), null);
+      req2 = new ServiceRequest(cluster2, serviceName2, "HDP-0.2", "0.2-1234",
+          State.INSTALLED.toString(), null);
       reqs.add(req1);
       reqs.add(req2);
       ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
@@ -3230,10 +3290,10 @@ public class AmbariManagementControllerTest {
 
     try {
       reqs.clear();
-      req1 = new ServiceRequest(cluster1, serviceName1,
-          State.INSTALLED.toString());
-      req2 = new ServiceRequest(cluster1, serviceName1,
-          State.INSTALLED.toString());
+      req1 = new ServiceRequest(cluster1, serviceName1, "HDP-0.2", "0.2-1234",
+          State.INSTALLED.toString(), null);
+      req2 = new ServiceRequest(cluster1, serviceName1, "HDP-0.2", "0.2-1234",
+          State.INSTALLED.toString(), null);
       reqs.add(req1);
       reqs.add(req2);
       ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
@@ -3247,10 +3307,10 @@ public class AmbariManagementControllerTest {
 
     try {
       reqs.clear();
-      req1 = new ServiceRequest(cluster1, serviceName1,
-          State.INSTALLED.toString());
-      req2 = new ServiceRequest(cluster1, serviceName2,
-          State.STARTED.toString());
+      req1 = new ServiceRequest(cluster1, serviceName1, "HDP-0.2", "0.2-1234",
+          State.INSTALLED.toString(), null);
+      req2 = new ServiceRequest(cluster1, serviceName2, "HDP-0.2", "0.2-1234",
+          State.STARTED.toString(), null);
       reqs.add(req1);
       reqs.add(req2);
       ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
@@ -3359,8 +3419,8 @@ public class AmbariManagementControllerTest {
     ServiceRequest req1, req2;
     try {
       reqs.clear();
-      req1 = new ServiceRequest(cluster1, serviceName1,
-          State.STARTED.toString());
+      req1 = new ServiceRequest(cluster1, serviceName1, "HDP-0.2", "0.2-1234",
+          State.STARTED.toString(), null);
       reqs.add(req1);
       ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
       fail("Expected failure for invalid state update");
@@ -3386,8 +3446,8 @@ public class AmbariManagementControllerTest {
 
     try {
       reqs.clear();
-      req1 = new ServiceRequest(cluster1, serviceName1,
-          State.STARTED.toString());
+      req1 = new ServiceRequest(cluster1, serviceName1, "HDP-0.2", "0.2-1234",
+          State.STARTED.toString(), null);
       reqs.add(req1);
       ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true, false);
       fail("Expected failure for invalid state update");
@@ -3412,10 +3472,10 @@ public class AmbariManagementControllerTest {
     sch5.setState(State.INSTALLED);
 
     reqs.clear();
-    req1 = new ServiceRequest(cluster1, serviceName1,
-        State.STARTED.toString());
-    req2 = new ServiceRequest(cluster1, serviceName2,
-        State.STARTED.toString());
+    req1 = new ServiceRequest(cluster1, serviceName1, "HDP-0.2", "0.2-1234",
+        State.STARTED.toString(), null);
+    req2 = new ServiceRequest(cluster1, serviceName2, "HDP-0.2", "0.2-1234",
+        State.STARTED.toString(), null);
     reqs.add(req1);
     reqs.add(req2);
     RequestStatusResponse trackAction = ServiceResourceProviderTest.updateServices(controller, reqs,
@@ -3500,10 +3560,10 @@ public class AmbariManagementControllerTest {
 
     // test no-op
     reqs.clear();
-    req1 = new ServiceRequest(cluster1, serviceName1,
-        State.STARTED.toString());
-    req2 = new ServiceRequest(cluster1, serviceName2,
-        State.STARTED.toString());
+    req1 = new ServiceRequest(cluster1, serviceName1, "HDP-0.2", "0.2-1234",
+        State.STARTED.toString(), null);
+    req2 = new ServiceRequest(cluster1, serviceName2, "HDP-0.2", "0.2-1234",
+        State.STARTED.toString(), null);
     reqs.add(req1);
     reqs.add(req2);
     trackAction = ServiceResourceProviderTest.updateServices(controller, reqs, mapRequestProps, true,
@@ -3878,327 +3938,6 @@ public class AmbariManagementControllerTest {
     Assert.assertNull(trackAction);
   }
 
-  @Ignore
-  @Test
-  public void testServiceComponentHostUpdateStackId() throws Exception {
-    String cluster1 = getUniqueName();
-    createCluster(cluster1);
-    String serviceName1 = "HDFS";
-    createService(cluster1, serviceName1, null);
-    String componentName1 = "NAMENODE";
-    String componentName2 = "DATANODE";
-    createServiceComponent(cluster1, serviceName1, componentName1,
-        State.INIT);
-    createServiceComponent(cluster1, serviceName1, componentName2,
-        State.INIT);
-    String host1 = getUniqueName();
-    String host2 = getUniqueName();
-    addHostToCluster(host1, cluster1);
-    addHostToCluster(host2, cluster1);
-
-    Set<ServiceComponentHostRequest> set1 =
-      new HashSet<>();
-    ServiceComponentHostRequest r1 =
-        new ServiceComponentHostRequest(cluster1, serviceName1,
-            componentName1, host1, State.INIT.toString());
-    ServiceComponentHostRequest r2 =
-        new ServiceComponentHostRequest(cluster1, serviceName1,
-            componentName1, host2, State.INIT.toString());
-    ServiceComponentHostRequest r3 =
-        new ServiceComponentHostRequest(cluster1, serviceName1,
-            componentName2, host1, State.INIT.toString());
-
-    set1.add(r1);
-    set1.add(r2);
-    set1.add(r3);
-    controller.createHostComponents(set1);
-
-    Cluster c1 = clusters.getCluster(cluster1);
-    Service s1 = c1.getService(serviceName1);
-    ServiceComponent sc1 = s1.getServiceComponent(componentName1);
-    ServiceComponent sc2 = s1.getServiceComponent(componentName2);
-    ServiceComponentHost sch1 = sc1.getServiceComponentHost(host1);
-    ServiceComponentHost sch2 = sc1.getServiceComponentHost(host2);
-    ServiceComponentHost sch3 = sc2.getServiceComponentHost(host1);
-
-    s1.setDesiredState(State.INSTALLED);
-    sc1.setDesiredState(State.INSTALLED);
-    sc2.setDesiredState(State.INSTALLED);
-
-    ServiceComponentHostRequest req1;
-    ServiceComponentHostRequest req2;
-    ServiceComponentHostRequest req3;
-    Set<ServiceComponentHostRequest> reqs =
-      new HashSet<>();
-
-    StackId newStack = new StackId("HDP-0.2");
-    StackId oldStack = new StackId("HDP-0.1");
-    c1.setCurrentStackVersion(newStack);
-    c1.setDesiredStackVersion(newStack);
-    sch1.setState(State.INSTALLED);
-    sch2.setState(State.UPGRADING);
-    sch1.setDesiredState(State.INSTALLED);
-    sch2.setDesiredState(State.INSTALLED);
-
-    sch1.setStackVersion(oldStack);
-    sch2.setStackVersion(oldStack);
-    sch1.setDesiredStackVersion(newStack);
-    sch2.setDesiredStackVersion(oldStack);
-
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1,
-        State.INSTALLED.toString());
-    req1.setDesiredStackId("HDP-0.2");
-    reqs.add(req1);
-    req2 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host2,
-        State.INSTALLED.toString());
-    req2.setDesiredStackId("HDP-0.2");
-    reqs.add(req2);
-
-    Map<String,String> mapRequestProps = new HashMap<>();
-    mapRequestProps.put("context", "testServiceComponentHostUpdateStackId");
-
-    RequestStatusResponse resp = updateHostComponents(reqs, mapRequestProps, true);
-    List<Stage> stages = actionDB.getAllStages(resp.getRequestId());
-    Assert.assertEquals(1, stages.size());
-    Assert.assertEquals(2, stages.get(0).getOrderedHostRoleCommands().size());
-    Assert.assertEquals("testServiceComponentHostUpdateStackId", stages.get(0).getRequestContext());
-    Assert.assertEquals(State.UPGRADING, sch1.getState());
-    Assert.assertEquals(State.UPGRADING, sch2.getState());
-    Assert.assertTrue(sch1.getDesiredStackVersion().compareTo(newStack) == 0);
-    Assert.assertTrue(sch2.getDesiredStackVersion().compareTo(newStack) == 0);
-    for (HostRoleCommand command : stages.get(0).getOrderedHostRoleCommands()) {
-      ExecutionCommand execCommand = command.getExecutionCommandWrapper().getExecutionCommand();
-      Assert.assertTrue(execCommand.getCommandParams().containsKey("source_stack_version"));
-      Assert.assertTrue(execCommand.getCommandParams().containsKey("target_stack_version"));
-      Assert.assertEquals(RoleCommand.UPGRADE, execCommand.getRoleCommand());
-    }
-
-    sch1.setState(State.INSTALLED);
-    sch1.setDesiredState(State.INSTALLED);
-    sch2.setState(State.UPGRADING);
-    sch2.setDesiredState(State.INSTALLED);
-    sch3.setState(State.UPGRADING);
-    sch3.setDesiredState(State.INSTALLED);
-
-    sch3.setStackVersion(oldStack);
-    sch3.setDesiredStackVersion(newStack);
-
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1,
-        State.INSTALLED.toString());
-    req1.setDesiredStackId("HDP-0.2");
-    reqs.add(req1);
-    req2 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host2,
-        State.INSTALLED.toString());
-    req2.setDesiredStackId("HDP-0.2");
-    reqs.add(req2);
-    req3 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName2, host1,
-        State.INSTALLED.toString());
-    req3.setDesiredStackId("HDP-0.2");
-    reqs.add(req3);
-
-    resp = updateHostComponents(reqs, Collections.<String, String>emptyMap(), true);
-    stages = actionDB.getAllStages(resp.getRequestId());
-    Assert.assertEquals(2, stages.size());
-    Assert.assertEquals(2, stages.get(0).getOrderedHostRoleCommands().size());
-    Assert.assertEquals("", stages.get(0).getRequestContext());
-    Assert.assertEquals(State.UPGRADING, sch1.getState());
-    Assert.assertEquals(State.UPGRADING, sch2.getState());
-    Assert.assertEquals(State.UPGRADING, sch3.getState());
-    Assert.assertTrue(sch1.getDesiredStackVersion().compareTo(newStack) == 0);
-    Assert.assertTrue(sch2.getDesiredStackVersion().compareTo(newStack) == 0);
-    Assert.assertTrue(sch3.getDesiredStackVersion().compareTo(newStack) == 0);
-    for (Stage stage : stages) {
-      for (HostRoleCommand command : stage.getOrderedHostRoleCommands()) {
-        ExecutionCommand execCommand = command.getExecutionCommandWrapper().getExecutionCommand();
-        Assert.assertTrue(execCommand.getCommandParams().containsKey("source_stack_version"));
-        Assert.assertTrue(execCommand.getCommandParams().containsKey("target_stack_version"));
-        Assert.assertEquals("{\"stackName\":\"HDP\",\"stackVersion\":\"0.2\"}",
-            execCommand.getCommandParams().get("target_stack_version"));
-        Assert.assertEquals(RoleCommand.UPGRADE, execCommand.getRoleCommand());
-      }
-    }
-  }
-
-  @Ignore
-  @Test
-  public void testServiceComponentHostUpdateStackIdError() throws Exception {
-    String cluster1 = getUniqueName();
-    createCluster(cluster1);
-    String serviceName1 = "HDFS";
-    createService(cluster1, serviceName1, null);
-    String componentName1 = "NAMENODE";
-    createServiceComponent(cluster1, serviceName1, componentName1,
-        State.INIT);
-    String host1 = getUniqueName();
-    String host2 = getUniqueName();
-    addHostToCluster(host1, cluster1);
-    addHostToCluster(host2, cluster1);
-
-    Set<ServiceComponentHostRequest> set1 =
-      new HashSet<>();
-    ServiceComponentHostRequest r1 =
-        new ServiceComponentHostRequest(cluster1, serviceName1,
-            componentName1, host1, State.INIT.toString());
-    ServiceComponentHostRequest r2 =
-        new ServiceComponentHostRequest(cluster1, serviceName1,
-            componentName1, host2, State.INIT.toString());
-
-    set1.add(r1);
-    set1.add(r2);
-    controller.createHostComponents(set1);
-
-    Cluster c1 = clusters.getCluster(cluster1);
-    Service s1 = c1.getService(serviceName1);
-    ServiceComponent sc1 = s1.getServiceComponent(componentName1);
-    ServiceComponentHost sch1 = sc1.getServiceComponentHost(host1);
-    ServiceComponentHost sch2 = sc1.getServiceComponentHost(host2);
-
-    s1.setDesiredState(State.INIT);
-    sc1.setDesiredState(State.INIT);
-
-    ServiceComponentHostRequest req1;
-    ServiceComponentHostRequest req2;
-    Set<ServiceComponentHostRequest> reqs =
-      new HashSet<>();
-
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1, State.STARTED.toString());
-    req1.setDesiredStackId("invalid stack id");
-    reqs.add(req1);
-    updateHostAndCompareExpectedFailure(reqs, "Invalid desired stack id");
-
-    c1.setCurrentStackVersion(new StackId("HDP-0.0"));
-    sch1.setStackVersion(new StackId("HDP-0.1"));
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1,
-        State.STARTED.toString());
-    req1.setDesiredStackId("HDP-0.2");
-    reqs.add(req1);
-    updateHostAndCompareExpectedFailure(reqs, "Cluster has not been upgraded yet");
-
-    c1.setCurrentStackVersion(new StackId("HDP2-0.1"));
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1,
-        State.STARTED.toString());
-    req1.setDesiredStackId("HDP-0.2");
-    reqs.add(req1);
-    updateHostAndCompareExpectedFailure(reqs, "Deployed stack name and requested stack names");
-
-    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1,
-        State.STARTED.toString());
-    req1.setDesiredStackId("HDP-0.3");
-    reqs.add(req1);
-    updateHostAndCompareExpectedFailure(reqs, "Component host can only be upgraded to the same version");
-
-    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
-    sch1.setState(State.STARTED);
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1,
-        State.STARTED.toString());
-    req1.setDesiredStackId("HDP-0.2");
-    reqs.add(req1);
-    updateHostAndCompareExpectedFailure(reqs, "Component host is in an invalid state for upgrade");
-
-    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
-    sch1.setState(State.UPGRADING);
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1,
-        State.STARTED.toString());
-    req1.setDesiredStackId("HDP-0.2");
-    reqs.add(req1);
-    updateHostAndCompareExpectedFailure(reqs, "The desired state for an upgrade request must be");
-
-    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
-    sch1.setState(State.UPGRADING);
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1, null);
-    req1.setDesiredStackId("HDP-0.2");
-    reqs.add(req1);
-    updateHostAndCompareExpectedFailure(reqs, "The desired state for an upgrade request must be");
-
-    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
-    sch1.setState(State.INSTALLED);
-    sch1.setDesiredState(State.INSTALLED);
-    sch2.setState(State.INSTALLED);
-    sch2.setDesiredState(State.INSTALLED);
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1,
-        State.INSTALLED.toString());
-    req1.setDesiredStackId("HDP-0.2");
-    reqs.add(req1);
-    req2 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host2,
-        State.STARTED.toString());
-    reqs.add(req2);
-    updateHostAndCompareExpectedFailure(reqs, "An upgrade request cannot be combined with other");
-
-    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
-    sch1.setState(State.INSTALLED);
-    sch1.setStackVersion(new StackId("HDP-0.2"));
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1,
-        null);
-    req1.setDesiredStackId("HDP-0.2");
-    reqs.add(req1);
-
-    RequestStatusResponse resp = updateHostComponents(reqs,
-        Collections.<String,String>emptyMap(), true);
-    Assert.assertNull(resp);
-
-    c1.setCurrentStackVersion(new StackId("HDP-0.2"));
-    sch1.setState(State.INSTALLED);
-    sch1.setStackVersion(new StackId("HDP-0.2"));
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceName1,
-        componentName1, host1,
-        State.INSTALLED.toString());
-    req1.setDesiredStackId("HDP-0.2");
-    reqs.add(req1);
-    resp = updateHostComponents(reqs, Collections.<String,String>emptyMap(), true);
-    Assert.assertNull(resp);
-  }
-
-  private void updateHostAndCompareExpectedFailure(Set<ServiceComponentHostRequest> reqs,
-                                                   String expectedMessage) {
-    try {
-      updateHostComponents(reqs, Collections.<String,String>emptyMap(), true);
-      fail("Expected failure: " + expectedMessage);
-    } catch (Exception e) {
-      LOG.info("Actual exception message: " + e.getMessage());
-      Assert.assertTrue(e.getMessage().contains(expectedMessage));
-    }
-  }
-
-  @Test
-  public void testStartClientComponent() {
-    // FIXME write test after meta data integration
-    // start should fail
-  }
-
-  @Test
-  public void testStartClientHostComponent() {
-    // FIXME write test after meta data integration
-    // start should fail
-  }
-
   @Test
   public void testCreateCustomActions() throws Exception {
     final String cluster1 = getUniqueName();
@@ -4235,8 +3974,10 @@ public class AmbariManagementControllerTest {
           put("test.password", "supersecret");
         }}, new HashMap<String, Map<String,String>>());
 
-    Service hdfs = cluster.addService("HDFS");
-    Service mapred = cluster.addService("YARN");
+    RepositoryVersionEntity repositoryVersion = repositoryVersion206;
+
+    Service hdfs = cluster.addService("HDFS", repositoryVersion);
+    Service mapred = cluster.addService("YARN", repositoryVersion);
 
     hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
     hdfs.addServiceComponent(Role.NAMENODE.name());
@@ -4396,7 +4137,9 @@ public class AmbariManagementControllerTest {
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
 
-    Service hdfs = cluster.addService("HDFS");
+    RepositoryVersionEntity repositoryVersion = repositoryVersion207;
+
+    Service hdfs = cluster.addService("HDFS", repositoryVersion);
 
     hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
     hdfs.addServiceComponent(Role.NAMENODE.name());
@@ -4485,6 +4228,8 @@ public class AmbariManagementControllerTest {
     cluster.setDesiredStackVersion(new StackId("HDP-2.0.7"));
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.7"));
 
+    RepositoryVersionEntity repositoryVersion = repositoryVersion207;
+
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
     Config config1 = cf.createNew(cluster, "global", "version1",
         new HashMap<String, String>() {{
@@ -4501,8 +4246,8 @@ public class AmbariManagementControllerTest {
     cluster.addDesiredConfig("_test", Collections.singleton(config1));
     cluster.addDesiredConfig("_test", Collections.singleton(config2));
 
-    Service hdfs = cluster.addService("HDFS");
-    Service hive = cluster.addService("HIVE");
+    Service hdfs = cluster.addService("HDFS", repositoryVersion);
+    Service hive = cluster.addService("HIVE", repositoryVersion);
 
     hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
     hdfs.addServiceComponent(Role.NAMENODE.name());
@@ -4769,6 +4514,8 @@ public class AmbariManagementControllerTest {
     cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
     cluster.setCurrentStackVersion(new StackId("HDP-0.1"));
 
+    RepositoryVersionEntity repositoryVersion = repositoryVersion01;
+
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
     Config config1 = cf.createNew(cluster, "global", "version1",
         new HashMap<String, String>(){{ put("key1", "value1"); }}, new HashMap<String, Map<String,String>>());
@@ -4781,8 +4528,8 @@ public class AmbariManagementControllerTest {
     cluster.addDesiredConfig("_test", Collections.singleton(config1));
     cluster.addDesiredConfig("_test", Collections.singleton(config2));
 
-    Service hdfs = cluster.addService("HDFS");
-    Service mapReduce = cluster.addService("MAPREDUCE");
+    Service hdfs = cluster.addService("HDFS", repositoryVersion);
+    Service mapReduce = cluster.addService("MAPREDUCE", repositoryVersion);
 
     hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
     mapReduce.addServiceComponent(Role.MAPREDUCE_CLIENT.name());
@@ -4996,8 +4743,8 @@ public class AmbariManagementControllerTest {
             .getServiceComponentHost(host2));
 
     // Install
-    ServiceRequest r = new ServiceRequest(cluster1, serviceName,
-        State.INSTALLED.toString());
+    ServiceRequest r = new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234",
+        State.INSTALLED.toString(), null);
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r);
 
@@ -5016,8 +4763,8 @@ public class AmbariManagementControllerTest {
     }
 
     // Start
-    r = new ServiceRequest(cluster1, serviceName,
-            State.STARTED.toString());
+    r = new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234",
+        State.STARTED.toString(), null);
     requests.clear();
     requests.add(r);
     ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
@@ -5108,7 +4855,7 @@ public class AmbariManagementControllerTest {
     configVersions.put("typeC", "v2");
     configVersions.put("typeE", "v1");
     sReqs.clear();
-    sReqs.add(new ServiceRequest(cluster1, serviceName, null));
+    sReqs.add(new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234", null, null));
     Assert.assertNull(ServiceResourceProviderTest.updateServices(controller, sReqs, mapRequestProps, true, false));
 
 
@@ -5257,7 +5004,7 @@ public class AmbariManagementControllerTest {
     configVersions.put("typeC", "v2");
     configVersions.put("typeE", "v1");
     sReqs.clear();
-    sReqs.add(new ServiceRequest(cluster1, serviceName, null));
+    sReqs.add(new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234", null, null));
     Assert.assertNull(ServiceResourceProviderTest.updateServices(controller, sReqs, mapRequestProps, true, false));
 
     // update configs at SCH level
@@ -5323,7 +5070,7 @@ public class AmbariManagementControllerTest {
       host2, null);
 
     // Install
-    ServiceRequest r = new ServiceRequest(cluster1, serviceName,
+    ServiceRequest r = new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234",
       State.INSTALLED.toString());
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r);
@@ -5423,7 +5170,7 @@ public class AmbariManagementControllerTest {
     configVersions.put("core-site", "version1");
     configVersions.put("hdfs-site", "version1");
     sReqs.clear();
-    sReqs.add(new ServiceRequest(cluster1, serviceName, null));
+    sReqs.add(new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234", null));
     Assert.assertNull(ServiceResourceProviderTest.updateServices(controller, sReqs, mapRequestProps, true, false));
 
     // Reconfigure S Level
@@ -5431,7 +5178,7 @@ public class AmbariManagementControllerTest {
     configVersions.put("core-site", "version122");
 
     sReqs.clear();
-    sReqs.add(new ServiceRequest(cluster1, serviceName, null));
+    sReqs.add(new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234", null));
     Assert.assertNull(ServiceResourceProviderTest.updateServices(controller, sReqs, mapRequestProps, true, false));
 
     entityManager.clear();
@@ -5724,13 +5471,10 @@ public class AmbariManagementControllerTest {
   public void testClientServiceSmokeTests() throws AmbariException, AuthorizationException {
     String cluster1 = getUniqueName();
     createCluster(cluster1);
-    clusters.getCluster(cluster1)
-        .setDesiredStackVersion(new StackId("HDP-0.1"));
     String serviceName = "PIG";
-    createService(cluster1, serviceName, null);
+    createService(cluster1, serviceName, repositoryVersion01, null);
     String componentName1 = "PIG";
-    createServiceComponent(cluster1, serviceName, componentName1,
-        State.INIT);
+    createServiceComponent(cluster1, serviceName, componentName1, State.INIT);
 
     String host1 = getUniqueName();
     String host2 = getUniqueName();
@@ -5747,7 +5491,8 @@ public class AmbariManagementControllerTest {
     createServiceComponentHost(cluster1, null, componentName1,
         host2, null);
 
-    ServiceRequest r = new ServiceRequest(cluster1, serviceName, State.INSTALLED.toString());
+    ServiceRequest r = new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234",
+        State.INSTALLED.toString());
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r);
 
@@ -5783,7 +5528,7 @@ public class AmbariManagementControllerTest {
       }
     }
 
-    r = new ServiceRequest(cluster1, serviceName, State.STARTED.toString());
+    r = new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234", State.STARTED.toString());
     requests.clear();
     requests.add(r);
 
@@ -6207,7 +5952,7 @@ public class AmbariManagementControllerTest {
 
     // Start Service
     ServiceRequest sr = new ServiceRequest(
-      cluster1, serviceName, State.STARTED.name());
+      cluster1, serviceName, "HDP-2.0.6", "2.0.6-1234", State.STARTED.name());
     Set<ServiceRequest> setReqs = new HashSet<>();
     setReqs.add(sr);
     RequestStatusResponse resp = ServiceResourceProviderTest.updateServices(controller,
@@ -6434,6 +6179,8 @@ public class AmbariManagementControllerTest {
     cluster.setDesiredStackVersion(new StackId("HDP-2.0.6"));
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
 
+    RepositoryVersionEntity repositoryVersion = repositoryVersion206;
+
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
     Config config1 = cf.createNew(cluster, "global", "version1",
       new HashMap<String, String>() {{
@@ -6445,8 +6192,8 @@ public class AmbariManagementControllerTest {
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
 
-    Service hdfs = cluster.addService("HDFS");
-    Service mapred = cluster.addService("YARN");
+    Service hdfs = cluster.addService("HDFS", repositoryVersion);
+    Service mapred = cluster.addService("YARN", repositoryVersion);
 
     hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
     hdfs.addServiceComponent(Role.NAMENODE.name());
@@ -6535,6 +6282,8 @@ public class AmbariManagementControllerTest {
     cluster.setDesiredStackVersion(new StackId("HDP-2.0.6"));
     cluster.setCurrentStackVersion(new StackId("HDP-2.0.6"));
 
+    RepositoryVersionEntity repositoryVersion = repositoryVersion206;
+
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
     Config config1 = cf.createNew(cluster, "global", "version1",
       new HashMap<String, String>() {{
@@ -6546,8 +6295,8 @@ public class AmbariManagementControllerTest {
         put("key1", "value1");
       }}, new HashMap<String, Map<String,String>>());
 
-    Service hdfs = cluster.addService("HDFS");
-    Service mapred = cluster.addService("YARN");
+    Service hdfs = cluster.addService("HDFS", repositoryVersion);
+    Service mapred = cluster.addService("YARN", repositoryVersion);
 
     hdfs.addServiceComponent(Role.HDFS_CLIENT.name());
     hdfs.addServiceComponent(Role.NAMENODE.name());
@@ -6806,7 +6555,7 @@ public class AmbariManagementControllerTest {
       put("core-site", "version1");
       put("hdfs-site", "version1");
     }};
-    ServiceRequest sr = new ServiceRequest(cluster1, serviceName, null);
+    ServiceRequest sr = new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234", null);
     ServiceResourceProviderTest.updateServices(controller, Collections.singleton(sr), new HashMap<String,String>(), false, false);
 
     // Install
@@ -6834,10 +6583,8 @@ public class AmbariManagementControllerTest {
   public void testHostLevelParamsSentWithCommands() throws AmbariException, AuthorizationException {
     String cluster1 = getUniqueName();
     createCluster(cluster1);
-    clusters.getCluster(cluster1)
-      .setDesiredStackVersion(new StackId("HDP-0.1"));
     String serviceName = "PIG";
-    createService(cluster1, serviceName, null);
+    createService(cluster1, serviceName, repositoryVersion01, null);
     String componentName1 = "PIG";
     createServiceComponent(cluster1, serviceName, componentName1,
       State.INIT);
@@ -6859,7 +6606,8 @@ public class AmbariManagementControllerTest {
 
 
 
-    ServiceRequest r = new ServiceRequest(cluster1, serviceName, State.INSTALLED.toString());
+    ServiceRequest r = new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234",
+        State.INSTALLED.toString());
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r);
 
@@ -6896,21 +6644,17 @@ public class AmbariManagementControllerTest {
     cluster.setDesiredStackVersion(new StackId("HDP-2.0.6"));
     String serviceName1 = "HDFS";
     String serviceName2 = "MAPREDUCE2";
-    createService(cluster1, serviceName1, null);
-    createService(cluster1, serviceName2, null);
+    createService(cluster1, serviceName1, repositoryVersion206, null);
+    createService(cluster1, serviceName2, repositoryVersion206, null);
     String componentName1 = "NAMENODE";
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
     String componentName4 = "HISTORYSERVER";
 
-    createServiceComponent(cluster1, serviceName1, componentName1,
-      State.INIT);
-    createServiceComponent(cluster1, serviceName1, componentName2,
-      State.INIT);
-    createServiceComponent(cluster1, serviceName1, componentName3,
-      State.INIT);
-    createServiceComponent(cluster1, serviceName2, componentName4,
-      State.INIT);
+    createServiceComponent(cluster1, serviceName1, componentName1, State.INIT);
+    createServiceComponent(cluster1, serviceName1, componentName2, State.INIT);
+    createServiceComponent(cluster1, serviceName1, componentName3, State.INIT);
+    createServiceComponent(cluster1, serviceName2, componentName4, State.INIT);
 
     final String host1 = getUniqueName();
     final String host2 = getUniqueName();
@@ -6920,28 +6664,20 @@ public class AmbariManagementControllerTest {
     addHostToCluster(host2, cluster1);
     addHostToCluster(host3, cluster1);
 
-    createServiceComponentHost(cluster1, serviceName1, componentName1,
-      host1, null);
-    createServiceComponentHost(cluster1, serviceName1, componentName2,
-      host2, null);
-    createServiceComponentHost(cluster1, serviceName1, componentName3,
-      host2, null);
-    createServiceComponentHost(cluster1, serviceName1, componentName3,
-      host3, null);
-    createServiceComponentHost(cluster1, serviceName2, componentName4,
-      host3, null);
+    createServiceComponentHost(cluster1, serviceName1, componentName1, host1, null);
+    createServiceComponentHost(cluster1, serviceName1, componentName2, host2, null);
+    createServiceComponentHost(cluster1, serviceName1, componentName3, host2, null);
+    createServiceComponentHost(cluster1, serviceName1, componentName3, host3, null);
+    createServiceComponentHost(cluster1, serviceName2, componentName4, host3, null);
 
     // Create and attach config
     Map<String, String> configs = new HashMap<>();
     configs.put("a", "b");
 
     ConfigurationRequest cr1,cr2,cr3;
-    cr1 = new ConfigurationRequest(cluster1, "core-site","version1",
-      configs, null);
-    cr2 = new ConfigurationRequest(cluster1, "hdfs-site","version1",
-      configs, null);
-    cr3 = new ConfigurationRequest(cluster1, "mapred-site","version1",
-      configs, null);
+    cr1 = new ConfigurationRequest(cluster1, "core-site", "version1", configs, null);
+    cr2 = new ConfigurationRequest(cluster1, "hdfs-site", "version1", configs, null);
+    cr3 = new ConfigurationRequest(cluster1, "mapred-site", "version1", configs, null);
 
     ClusterRequest crReq = new ClusterRequest(cluster.getClusterId(), cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr1));
@@ -7651,140 +7387,6 @@ public class AmbariManagementControllerTest {
     }
   }
 
-  // disabled as upgrade feature is disabled
-  @Ignore
-  @Test
-  public void testUpdateClusterVersionBasic() throws AmbariException, AuthorizationException {
-    String cluster1 = getUniqueName();
-    String serviceName = "MAPREDUCE";
-    String host1 = getUniqueName();
-    String host2 = getUniqueName();
-    String componentName = "JOBTRACKER";
-    StackId currentStackId = new StackId("HDP-0.1");
-
-    Map<String, String> mapRequestProps = new HashMap<>();
-    mapRequestProps.put("context", "Called from a test");
-
-    createCluster(cluster1);
-    Cluster c = clusters.getCluster(cluster1);
-    c.setDesiredStackVersion(currentStackId);
-    createService(cluster1, serviceName, State.INIT);
-    createServiceComponent(cluster1, serviceName, componentName, null);
-
-    addHostToCluster(host1, cluster1);
-    addHostToCluster(host2, cluster1);
-
-    createServiceComponentHost(cluster1, null, componentName,
-        host1, null);
-    createServiceComponentHost(cluster1, null, componentName,
-        host2, null);
-
-    c.getService(serviceName).setDesiredState(State.INSTALLED);
-    c.getService(serviceName).getServiceComponent(componentName).setDesiredState(State.INSTALLED);
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host1)
-        .setDesiredState(State.INSTALLED);
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host2)
-        .setDesiredState(State.STARTED);
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host1)
-        .setState(State.INSTALLED);
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host2)
-        .setState(State.STARTED);
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host1)
-        .setStackVersion(currentStackId);
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host2)
-        .setStackVersion(currentStackId);
-
-    ClusterRequest r = new ClusterRequest(c.getClusterId(), cluster1, "HDP-0.0.1", null);
-    try {
-      controller.updateClusters(Collections.singleton(r), mapRequestProps);
-      fail("Update cluster should fail");
-    } catch (AmbariException e) {
-      Assert.assertTrue(e.getMessage().contains("must be greater than current version"));
-    }
-
-    r = new ClusterRequest(c.getClusterId(), cluster1, "HDPLocal-1.2.2", null);
-    try {
-      controller.updateClusters(Collections.singleton(r), mapRequestProps);
-      fail("Update cluster should fail");
-    } catch (AmbariException e) {
-      Assert.assertTrue(e.getMessage().contains("Upgrade not possible between different stacks"));
-    }
-
-    r = new ClusterRequest(c.getClusterId(), cluster1, "HDP-0.2", null);
-    try {
-      controller.updateClusters(Collections.singleton(r), mapRequestProps);
-      fail("Update cluster should fail");
-    } catch (AmbariException e) {
-      Assert.assertTrue(e.getMessage().contains("Upgrade needs all services to be stopped"));
-      Assert.assertTrue(e.getMessage().contains(serviceName));
-    }
-
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host2)
-        .setDesiredState(State.INSTALLED);
-
-    r = new ClusterRequest(c.getClusterId(), cluster1, "HDP-0.2", null);
-    try {
-      controller.updateClusters(Collections.singleton(r), mapRequestProps);
-      fail("Update cluster should fail");
-    } catch (AmbariException e) {
-      Assert.assertTrue(e.getMessage().contains("Upgrade needs all services to be stopped"));
-      Assert.assertTrue(e.getMessage().contains(componentName));
-    }
-
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host2)
-        .setState(State.INSTALLED);
-    controller.updateClusters(Collections.singleton(r), mapRequestProps);
-    StackId expectedStackId = new StackId("HDP-0.2");
-    Assert.assertTrue(expectedStackId.equals(c.getDesiredStackVersion()));
-    Assert.assertTrue(expectedStackId.equals(c.getService(serviceName).getDesiredStackVersion()));
-    Assert.assertTrue(expectedStackId.equals(c.getService(serviceName)
-        .getServiceComponent(componentName).getDesiredStackVersion()));
-    Assert.assertTrue(expectedStackId.equals(c.getService(serviceName)
-        .getServiceComponent(componentName).getServiceComponentHost(host1).getDesiredStackVersion()));
-    Assert.assertTrue(expectedStackId.equals(c.getService(serviceName)
-        .getServiceComponent(componentName).getServiceComponentHost(host2).getDesiredStackVersion()));
-    Assert.assertTrue(currentStackId.equals(c.getService(serviceName)
-        .getServiceComponent(componentName).getServiceComponentHost(host1).getStackVersion()));
-    Assert.assertTrue(currentStackId.equals(c.getService(serviceName)
-        .getServiceComponent(componentName).getServiceComponentHost(host2).getStackVersion()));
-    ServiceComponent sc = c.getService(serviceName).getServiceComponent(componentName);
-    Assert.assertEquals(State.UPGRADING, sc.getServiceComponentHost(host1).getState());
-    Assert.assertEquals(State.UPGRADING, sc.getServiceComponentHost(host2).getState());
-
-    // Fail as another request is active
-    try {
-      controller.updateClusters(Collections.singleton(r), mapRequestProps);
-      fail("Update cluster should fail");
-    } catch (AmbariException e) {
-      Assert.assertTrue(e.getMessage().contains("A prior upgrade request with id"));
-    }
-
-    // cases where there is no update required
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host1)
-        .setDesiredState(State.INSTALLED);
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host2)
-        .setDesiredState(State.INSTALLED);
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host1)
-        .setState(State.INSTALLED);
-    c.getService(serviceName).getServiceComponent(componentName).getServiceComponentHost(host2)
-        .setState(State.INSTALLED);
-    c.setCurrentStackVersion(expectedStackId);
-    r = new ClusterRequest(c.getClusterId(), cluster1, "", null);
-    controller.updateClusters(Collections.singleton(r), mapRequestProps);
-    Assert.assertEquals(State.INSTALLED, sc.getServiceComponentHost(host1).getState());
-    Assert.assertEquals(State.INSTALLED, sc.getServiceComponentHost(host2).getState());
-
-    r = new ClusterRequest(c.getClusterId(), cluster1, null, null);
-    controller.updateClusters(Collections.singleton(r), mapRequestProps);
-    Assert.assertEquals(State.INSTALLED, sc.getServiceComponentHost(host1).getState());
-    Assert.assertEquals(State.INSTALLED, sc.getServiceComponentHost(host2).getState());
-
-    r = new ClusterRequest(c.getClusterId(), cluster1, "HDP-0.2", null);
-    controller.updateClusters(Collections.singleton(r), mapRequestProps);
-    Assert.assertEquals(State.INSTALLED, sc.getServiceComponentHost(host1).getState());
-    Assert.assertEquals(State.INSTALLED, sc.getServiceComponentHost(host2).getState());
-  }
-
   @Test
   public void testUpdateClusterUpgradabilityCheck() throws AmbariException, AuthorizationException {
     String cluster1 = getUniqueName();
@@ -7815,159 +7417,6 @@ public class AmbariManagementControllerTest {
     }
   }
 
-  // disabled as cluster upgrade feature is disabled
-  @Ignore
-  @Test
-  public void testUpdateClusterVersionCombinations() throws AmbariException, AuthorizationException {
-    String cluster1 = getUniqueName();
-    String pigServiceName = "PIG";
-    String mrServiceName = "MAPREDUCE";
-    final String host1 = getUniqueName();
-    final String host2 = getUniqueName();
-    String pigComponentName = "PIG";
-    String mrJobTrackerComp = "JOBTRACKER";
-    String mrTaskTrackerComp = "TASKTRACKER";
-    String mrClientComp = "MAPREDUCE_CLIENT";
-    String hdfsService = "HDFS";
-    String hdfsNameNode = "NAMENODE";
-    String hdfsDataNode = "DATANODE";
-    String hdfsClient = "HDFS_CLIENT";
-    StackId currentStackId = new StackId("HDP-0.1");
-    StackId desiredStackId = new StackId("HDP-0.2");
-
-    List<String> hosts = new ArrayList<>();
-    hosts.add(host1);
-    hosts.add(host2);
-
-    Map<String, String> mapRequestProps = new HashMap<>();
-    mapRequestProps.put("context", "Called from a test");
-
-    createCluster(cluster1);
-    Cluster c = clusters.getCluster(cluster1);
-    c.setDesiredStackVersion(currentStackId);
-    createService(cluster1, pigServiceName, State.INIT);
-    createServiceComponent(cluster1, pigServiceName, pigComponentName, null);
-
-    addHostToCluster(host1, cluster1);
-    addHostToCluster(host2, cluster1);
-
-    createServiceComponentHost(cluster1, null, pigComponentName,
-        host1, null);
-    createServiceComponentHost(cluster1, null, pigComponentName,
-        host2, null);
-
-    resetServiceState(pigServiceName, currentStackId, c);
-
-    ClusterRequest r = new ClusterRequest(c.getClusterId(), cluster1, "HDP-0.2", null);
-    RequestStatusResponse trackAction = controller.updateClusters(Collections.singleton(r), mapRequestProps);
-    List<Stage> stages = actionDB.getAllStages(trackAction.getRequestId());
-
-    // Upgrade a cluster with one service
-    ExpectedUpgradeTasks expectedTasks = new ExpectedUpgradeTasks(hosts);
-    expectedTasks.expectTask(Role.PIG, host1);
-    expectedTasks.expectTask(Role.PIG, host2);
-    expectedTasks.expectTask(Role.AMBARI_SERVER_ACTION);
-    validateGeneratedStages(stages, 2, expectedTasks);
-
-    resetCluster(c, currentStackId);
-
-    createService(cluster1, mrServiceName, State.INIT);
-    createServiceComponent(cluster1, mrServiceName, mrJobTrackerComp, null);
-    createServiceComponent(cluster1, mrServiceName, mrTaskTrackerComp, null);
-    createServiceComponent(cluster1, mrServiceName, mrClientComp, null);
-
-    createServiceComponentHost(cluster1, null, mrJobTrackerComp, host1, null);
-    createServiceComponentHost(cluster1, null, mrTaskTrackerComp, host2, null);
-    createServiceComponentHost(cluster1, null, mrClientComp, host2, null);
-
-    resetServiceState(mrServiceName, currentStackId, c);
-
-    // Upgrade a cluster with two service
-    actionDB.abortOperation(trackAction.getRequestId());
-    r = new ClusterRequest(c.getClusterId(), cluster1, "HDP-0.2", null);
-    trackAction = controller.updateClusters(Collections.singleton(r), mapRequestProps);
-    stages = actionDB.getAllStages(trackAction.getRequestId());
-
-    expectedTasks.expectTask(Role.JOBTRACKER, host1);
-    expectedTasks.expectTask(Role.TASKTRACKER, host2);
-    expectedTasks.expectTask(Role.MAPREDUCE_CLIENT, host2);
-    validateGeneratedStages(stages, 5, expectedTasks);
-
-    // Upgrade again
-    actionDB.abortOperation(trackAction.getRequestId());
-    trackAction = controller.updateClusters(Collections.singleton(r), mapRequestProps);
-    stages = actionDB.getAllStages(trackAction.getRequestId());
-    validateGeneratedStages(stages, 5, expectedTasks);
-
-    // some host components are upgraded
-    c.getService(pigServiceName).getServiceComponent(pigComponentName).getServiceComponentHost(host1)
-        .setState(State.INSTALLED);
-    c.getService(pigServiceName).getServiceComponent(pigComponentName).getServiceComponentHost(host2)
-        .setState(State.INSTALLED);
-    c.getService(pigServiceName).getServiceComponent(pigComponentName).getServiceComponentHost(host1)
-        .setStackVersion(desiredStackId);
-    c.getService(pigServiceName).getServiceComponent(pigComponentName).getServiceComponentHost(host2)
-        .setStackVersion(desiredStackId);
-
-    actionDB.abortOperation(trackAction.getRequestId());
-    trackAction = controller.updateClusters(Collections.singleton(r), mapRequestProps);
-    stages = actionDB.getAllStages(trackAction.getRequestId());
-    validateGeneratedStages(stages, 5, expectedTasks);
-
-    c.getService(mrServiceName).getServiceComponent(mrJobTrackerComp).getServiceComponentHost(host1)
-        .setState(State.UPGRADING);
-    c.getService(mrServiceName).getServiceComponent(mrTaskTrackerComp).getServiceComponentHost(host2)
-        .setState(State.UPGRADING);
-    actionDB.abortOperation(trackAction.getRequestId());
-    trackAction = controller.updateClusters(Collections.singleton(r), mapRequestProps);
-    stages = actionDB.getAllStages(trackAction.getRequestId());
-    validateGeneratedStages(stages, 5, expectedTasks);
-
-    // Add HDFS and upgrade
-    createService(cluster1, hdfsService, State.INIT);
-    createServiceComponent(cluster1, hdfsService, hdfsNameNode, null);
-    createServiceComponent(cluster1, hdfsService, hdfsDataNode, null);
-    createServiceComponent(cluster1, hdfsService, hdfsClient, null);
-
-    createServiceComponentHost(cluster1, null, hdfsNameNode, host1, null);
-    createServiceComponentHost(cluster1, null, hdfsDataNode, host1, null);
-    createServiceComponentHost(cluster1, null, hdfsDataNode, host2, null);
-    createServiceComponentHost(cluster1, null, hdfsClient, host2, null);
-
-    resetServiceState(hdfsService, currentStackId, c);
-    resetServiceState(mrServiceName, currentStackId, c);
-    resetServiceState(pigServiceName, currentStackId, c);
-
-    actionDB.abortOperation(trackAction.getRequestId());
-    trackAction = controller.updateClusters(Collections.singleton(r), mapRequestProps);
-    stages = actionDB.getAllStages(trackAction.getRequestId());
-
-    expectedTasks.resetAll();
-    expectedTasks.expectTask(Role.PIG, host1);
-    expectedTasks.expectTask(Role.PIG, host2);
-    expectedTasks.expectTask(Role.JOBTRACKER, host1);
-    expectedTasks.expectTask(Role.TASKTRACKER, host2);
-    expectedTasks.expectTask(Role.MAPREDUCE_CLIENT, host2);
-    expectedTasks.expectTask(Role.DATANODE, host1);
-    expectedTasks.expectTask(Role.DATANODE, host2);
-    expectedTasks.expectTask(Role.NAMENODE, host1);
-    expectedTasks.expectTask(Role.HDFS_CLIENT, host2);
-    expectedTasks.expectTask(Role.AMBARI_SERVER_ACTION);
-    validateGeneratedStages(stages, 8, expectedTasks);
-  }
-
-  private void resetServiceState(String service, StackId currentStackId, Cluster c) throws AmbariException {
-    c.getService(service).setDesiredState(State.INSTALLED);
-    for (ServiceComponent sc : c.getService(service).getServiceComponents().values()) {
-      sc.setDesiredState(State.INSTALLED);
-      for (ServiceComponentHost sch : sc.getServiceComponentHosts().values()) {
-        sch.setDesiredState(State.INSTALLED);
-        sch.setState(State.INSTALLED);
-        sch.setStackVersion(currentStackId);
-      }
-    }
-  }
-
   private void validateGeneratedStages(List<Stage> stages, int expectedStageCount, ExpectedUpgradeTasks expectedTasks) {
     Assert.assertEquals(expectedStageCount, stages.size());
     int prevRoleOrder = -1;
@@ -8003,20 +7452,6 @@ public class AmbariManagementControllerTest {
     }
   }
 
-  private void resetCluster(Cluster cluster, StackId currentStackId) throws AmbariException{
-    cluster.setDesiredStackVersion(currentStackId);
-    for (Service service : cluster.getServices().values()) {
-      service.setDesiredStackVersion(currentStackId);
-      for (ServiceComponent component : service.getServiceComponents().values()) {
-        component.setDesiredStackVersion(currentStackId);
-        for (ServiceComponentHost componentHost : component.getServiceComponentHosts().values()) {
-          componentHost.setDesiredStackVersion(currentStackId);
-          componentHost.setState(State.INSTALLED);
-        }
-      }
-    }
-  }
-
   class ExpectedUpgradeTasks {
     private static final int ROLE_COUNT = 25;
     private static final String DEFAULT_HOST = "default_host";
@@ -8163,7 +7598,7 @@ public class AmbariManagementControllerTest {
       .getServiceComponentHost(host2));
 
     // Install
-    ServiceRequest r = new ServiceRequest(cluster1, serviceName, State.INSTALLED.toString());
+    ServiceRequest r = new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234", State.INSTALLED.toString());
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r);
 
@@ -8182,7 +7617,7 @@ public class AmbariManagementControllerTest {
     }
 
     // Start
-    r = new ServiceRequest(cluster1, serviceName, State.STARTED.toString());
+    r = new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234", State.STARTED.toString());
     requests.clear();
     requests.add(r);
     ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
@@ -8227,7 +7662,7 @@ public class AmbariManagementControllerTest {
     }
 
     // Stop all services
-    r = new ServiceRequest(cluster1, serviceName, State.INSTALLED.toString());
+    r = new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234", State.INSTALLED.toString());
     requests.clear();
     requests.add(r);
     ServiceResourceProviderTest.updateServices(controller, requests, mapRequestProps, true, false);
@@ -8429,7 +7864,7 @@ public class AmbariManagementControllerTest {
 
 
     // Install
-    ServiceRequest r = new ServiceRequest(cluster1, serviceName, State.INSTALLED.toString());
+    ServiceRequest r = new ServiceRequest(cluster1, serviceName, "HDP-0.1", "0.1-1234", State.INSTALLED.toString());
     Set<ServiceRequest> requests = new HashSet<>();
     requests.add(r);
 
@@ -8527,7 +7962,7 @@ public class AmbariManagementControllerTest {
     sch3.setState(State.INSTALLED);
 
     // an UNKOWN failure will throw an exception
-    ServiceRequest req = new ServiceRequest(cluster1, serviceName1,
+    ServiceRequest req = new ServiceRequest(cluster1, serviceName1, "HDP-0.2", "0.2-1234",
         State.INSTALLED.toString());
     ServiceResourceProviderTest.updateServices(controller, Collections.singleton(req), Collections.<String, String>emptyMap(), true, false);
   }
@@ -9255,9 +8690,9 @@ public class AmbariManagementControllerTest {
       amc.createCluster(clusterRequest);
 
       Set<ServiceRequest> serviceRequests = new HashSet<>();
-      serviceRequests.add(new ServiceRequest(cluster1, "HDFS", null));
+      serviceRequests.add(new ServiceRequest(cluster1, "HDFS", "HDP-1.2.0", "1.2.0-1234", null));
 
-      ServiceResourceProviderTest.createServices(amc, serviceRequests);
+      ServiceResourceProviderTest.createServices(amc, repositoryVersionDAO, serviceRequests);
 
       Type confType = new TypeToken<Map<String, String>>() {
       }.getType();
@@ -9316,11 +8751,11 @@ public class AmbariManagementControllerTest {
     HostResourceProviderTest.createHosts(amc, hrs);
 
     Set<ServiceRequest> serviceRequests = new HashSet<>();
-    serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", null));
-    serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", null));
-    serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", null));
+    serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "HDFS", STACK_ID, "2.0.1-1234", null));
+    serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "MAPREDUCE2", STACK_ID, "2.0.1-1234", null));
+    serviceRequests.add(new ServiceRequest(CLUSTER_NAME, "YARN", STACK_ID, "2.0.1-1234", null));
 
-    ServiceResourceProviderTest.createServices(amc, serviceRequests);
+    ServiceResourceProviderTest.createServices(amc, repositoryVersionDAO, serviceRequests);
 
     Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<>();
     serviceComponentRequests.add(new ServiceComponentRequest(CLUSTER_NAME, "HDFS", "NAMENODE", null));
@@ -9392,10 +8827,10 @@ public class AmbariManagementControllerTest {
     amc.createCluster(clusterRequest);
 
     Set<ServiceRequest> serviceRequests = new HashSet<>();
-    serviceRequests.add(new ServiceRequest(cluster1, "HDFS", null));
-    serviceRequests.add(new ServiceRequest(cluster1, "HIVE", null));
+    serviceRequests.add(new ServiceRequest(cluster1, "HDFS", "HDP-1.2.0", "1.2.0-1234", null));
+    serviceRequests.add(new ServiceRequest(cluster1, "HIVE", "HDP-1.2.0", "1.2.0-1234", null));
 
-    ServiceResourceProviderTest.createServices(amc, serviceRequests);
+    ServiceResourceProviderTest.createServices(amc, repositoryVersionDAO, serviceRequests);
 
     Type confType = new TypeToken<Map<String, String>>() {}.getType();
 
@@ -9414,7 +8849,7 @@ public class AmbariManagementControllerTest {
     Assert.assertTrue(clusters.getCluster(cluster1).getDesiredConfigs().containsKey("hive-site"));
 
     serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest(cluster1, "HDFS", null));
+    serviceRequests.add(new ServiceRequest(cluster1, "HDFS", "HDP-1.2.0", "1.2.0-1234", null));
 
     ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false);
 
@@ -9444,7 +8879,7 @@ public class AmbariManagementControllerTest {
     amc.createHostComponents(componentHostRequests);
 
     serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest(cluster1, "HDFS", "INSTALLED"));
+    serviceRequests.add(new ServiceRequest(cluster1, "HDFS", "HDP-1.2.0", "1.2.0-1234", "INSTALLED"));
     ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false);
 
     Cluster cluster = clusters.getCluster(cluster1);
@@ -9511,7 +8946,7 @@ public class AmbariManagementControllerTest {
     componentHost.handleEvent(new ServiceComponentHostOpSucceededEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis()));
 
     serviceRequests.clear();
-    serviceRequests.add(new ServiceRequest(cluster1, "HDFS", "STARTED"));
+    serviceRequests.add(new ServiceRequest(cluster1, "HDFS", "HDP-1.2.0", "1.2.0-1234", "STARTED"));
 
     RequestStatusResponse response = ServiceResourceProviderTest.updateServices(amc, serviceRequests,
         mapRequestProps

<TRUNCATED>

[2/9] ambari git commit: AMBARI-20894 - Setting Desired Stack Ids to Correct Values During Service and Patch Upgrades (jonathanhurley)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
index 3dc34e3..364b92c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
@@ -49,6 +49,7 @@ import org.apache.ambari.server.controller.ConfigurationRequest;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.security.TestAuthenticationFactory;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.stack.HostsType;
@@ -1245,21 +1246,21 @@ public class UpgradeHelperTest {
 
     String clusterName = "c1";
 
+    String repositoryVersionString = "2.1.1-1234";
     StackId stackId = new StackId("HDP-2.1.1");
     StackId stackId2 = new StackId("HDP-2.2.0");
 
     clusters.addCluster(clusterName, stackId);
     Cluster c = clusters.getCluster(clusterName);
 
-    helper.getOrCreateRepositoryVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        repositoryVersionString);
 
     helper.getOrCreateRepositoryVersion(stackId2,"2.2.0");
 
     helper.getOrCreateRepositoryVersion(stackId2, UPGRADE_VERSION);
 
-    c.createClusterVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion(), "admin",
+    c.createClusterVersion(stackId, repositoryVersionString, "admin",
         RepositoryVersionState.INSTALLING);
 
     for (int i = 0; i < 4; i++) {
@@ -1277,11 +1278,11 @@ public class UpgradeHelperTest {
     }
 
     // !!! add services
-    c.addService(serviceFactory.createNew(c, "HDFS"));
-    c.addService(serviceFactory.createNew(c, "YARN"));
-    c.addService(serviceFactory.createNew(c, "ZOOKEEPER"));
-    c.addService(serviceFactory.createNew(c, "HIVE"));
-    c.addService(serviceFactory.createNew(c, "OOZIE"));
+    c.addService(serviceFactory.createNew(c, "HDFS", repositoryVersion));
+    c.addService(serviceFactory.createNew(c, "YARN", repositoryVersion));
+    c.addService(serviceFactory.createNew(c, "ZOOKEEPER", repositoryVersion));
+    c.addService(serviceFactory.createNew(c, "HIVE", repositoryVersion));
+    c.addService(serviceFactory.createNew(c, "OOZIE", repositoryVersion));
 
     Service s = c.getService("HDFS");
     ServiceComponent sc = s.addServiceComponent("NAMENODE");
@@ -1385,7 +1386,7 @@ public class UpgradeHelperTest {
     expect(m_masterHostResolver.getCluster()).andReturn(c).anyTimes();
 
     for(String service : additionalServices) {
-      c.addService(service);
+      c.addService(service, repositoryVersion);
       if (service.equals("HBASE")) {
         type = new HostsType();
         type.hosts.addAll(Arrays.asList("h1", "h2"));
@@ -1477,16 +1478,14 @@ public class UpgradeHelperTest {
 
     String clusterName = "c1";
 
+    String version = "2.1.1.0-1234";
     StackId stackId = new StackId("HDP-2.1.1");
     clusters.addCluster(clusterName, stackId);
     Cluster c = clusters.getCluster(clusterName);
 
-    helper.getOrCreateRepositoryVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, version);
 
-    c.createClusterVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion(), "admin",
-        RepositoryVersionState.INSTALLING);
+    c.createClusterVersion(stackId, version, "admin", RepositoryVersionState.INSTALLING);
 
     for (int i = 0; i < 2; i++) {
       String hostName = "h" + (i+1);
@@ -1503,7 +1502,7 @@ public class UpgradeHelperTest {
     }
 
     // !!! add services
-    c.addService(serviceFactory.createNew(c, "HDFS"));
+    c.addService(serviceFactory.createNew(c, "HDFS", repositoryVersion));
 
     Service s = c.getService("HDFS");
     ServiceComponent sc = s.addServiceComponent("NAMENODE");
@@ -1557,16 +1556,15 @@ public class UpgradeHelperTest {
 
     String clusterName = "c1";
 
+    String version = "2.1.1.0-1234";
     StackId stackId = new StackId("HDP-2.1.1");
     clusters.addCluster(clusterName, stackId);
     Cluster c = clusters.getCluster(clusterName);
 
-    helper.getOrCreateRepositoryVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        version);
 
-    c.createClusterVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion(), "admin",
-        RepositoryVersionState.INSTALLING);
+    c.createClusterVersion(stackId, version, "admin", RepositoryVersionState.INSTALLING);
 
     for (int i = 0; i < 2; i++) {
       String hostName = "h" + (i+1);
@@ -1583,7 +1581,7 @@ public class UpgradeHelperTest {
     }
 
     // !!! add services
-    c.addService(serviceFactory.createNew(c, "ZOOKEEPER"));
+    c.addService(serviceFactory.createNew(c, "ZOOKEEPER", repositoryVersion));
 
     Service s = c.getService("ZOOKEEPER");
     ServiceComponent sc = s.addServiceComponent("ZOOKEEPER_SERVER");
@@ -1626,12 +1624,10 @@ public class UpgradeHelperTest {
     clusters.addCluster(clusterName, stackId);
     Cluster c = clusters.getCluster(clusterName);
 
-    helper.getOrCreateRepositoryVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        version);
 
-    c.createClusterVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion(), "admin",
-        RepositoryVersionState.INSTALLING);
+    c.createClusterVersion(stackId, version, "admin", RepositoryVersionState.INSTALLING);
 
     for (int i = 0; i < 2; i++) {
       String hostName = "h" + (i+1);
@@ -1648,7 +1644,7 @@ public class UpgradeHelperTest {
     }
 
     // Add services
-    c.addService(serviceFactory.createNew(c, "HDFS"));
+    c.addService(serviceFactory.createNew(c, "HDFS", repositoryVersion));
 
     Service s = c.getService("HDFS");
     ServiceComponent sc = s.addServiceComponent("NAMENODE");
@@ -1692,12 +1688,10 @@ public class UpgradeHelperTest {
     clusters.addCluster(clusterName, stackId);
     Cluster c = clusters.getCluster(clusterName);
 
-    helper.getOrCreateRepositoryVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        version);
 
-    c.createClusterVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion(), "admin",
-        RepositoryVersionState.INSTALLING);
+    c.createClusterVersion(stackId, version, "admin", RepositoryVersionState.INSTALLING);
 
     for (int i = 0; i < 2; i++) {
       String hostName = "h" + (i+1);
@@ -1714,7 +1708,7 @@ public class UpgradeHelperTest {
     }
 
     // Add services
-    c.addService(serviceFactory.createNew(c, "HDFS"));
+    c.addService(serviceFactory.createNew(c, "HDFS", repositoryVersion));
 
     Service s = c.getService("HDFS");
     ServiceComponent sc = s.addServiceComponent("NAMENODE");
@@ -1806,18 +1800,18 @@ public class UpgradeHelperTest {
 
     String clusterName = "c1";
 
+    String version = "2.1.1.0-1234";
     StackId stackId = new StackId("HDP-2.1.1");
     StackId stackId2 = new StackId("HDP-2.2.0");
     clusters.addCluster(clusterName, stackId);
     Cluster c = clusters.getCluster(clusterName);
 
-    helper.getOrCreateRepositoryVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        version);
+
     helper.getOrCreateRepositoryVersion(stackId2,"2.2.0");
 
-    c.createClusterVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion(), "admin",
-        RepositoryVersionState.INSTALLING);
+    c.createClusterVersion(stackId, version, "admin", RepositoryVersionState.INSTALLING);
 
     for (int i = 0; i < 2; i++) {
       String hostName = "h" + (i+1);
@@ -1834,7 +1828,7 @@ public class UpgradeHelperTest {
     }
 
     // !!! add storm
-    c.addService(serviceFactory.createNew(c, "STORM"));
+    c.addService(serviceFactory.createNew(c, "STORM", repositoryVersion));
 
     Service s = c.getService("STORM");
     ServiceComponent sc = s.addServiceComponent("NIMBUS");
@@ -1904,22 +1898,21 @@ public class UpgradeHelperTest {
 
     String clusterName = "c1";
 
+    String version = "2.1.1.0-1234";
     StackId stackId = new StackId("HDP-2.1.1");
     StackId stackId2 = new StackId("HDP-2.2.0");
 
     clusters.addCluster(clusterName, stackId);
     Cluster c = clusters.getCluster(clusterName);
 
-    helper.getOrCreateRepositoryVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        version);
 
     helper.getOrCreateRepositoryVersion(stackId2,"2.2.0");
 
     helper.getOrCreateRepositoryVersion(stackId2, UPGRADE_VERSION);
 
-    c.createClusterVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion(), "admin",
-        RepositoryVersionState.INSTALLING);
+    c.createClusterVersion(stackId, version, "admin", RepositoryVersionState.INSTALLING);
 
     for (int i = 0; i < 2; i++) {
       String hostName = "h" + (i+1);
@@ -1935,7 +1928,7 @@ public class UpgradeHelperTest {
     }
 
     // !!! add services
-    c.addService(serviceFactory.createNew(c, "ZOOKEEPER"));
+    c.addService(serviceFactory.createNew(c, "ZOOKEEPER", repositoryVersion));
 
     Service s = c.getService("ZOOKEEPER");
     ServiceComponent sc = s.addServiceComponent("ZOOKEEPER_SERVER");
@@ -2094,18 +2087,17 @@ public class UpgradeHelperTest {
 
     String clusterName = "c1";
 
+    String version = "2.1.1.0-1234";
     StackId stackId = new StackId("HDP-2.1.1");
     StackId stackId2 = new StackId("HDP-2.2.0");
     clusters.addCluster(clusterName, stackId);
     Cluster c = clusters.getCluster(clusterName);
 
-    helper.getOrCreateRepositoryVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion());
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, version);
+
     helper.getOrCreateRepositoryVersion(stackId2, "2.2.0");
 
-    c.createClusterVersion(stackId,
-        c.getDesiredStackVersion().getStackVersion(), "admin",
-        RepositoryVersionState.INSTALLING);
+    c.createClusterVersion(stackId, version, "admin", RepositoryVersionState.INSTALLING);
 
     // create 2 hosts
     for (int i = 0; i < 2; i++) {
@@ -2124,8 +2116,8 @@ public class UpgradeHelperTest {
 
     // add ZK Server to both hosts, and then Nimbus to only 1 - this will test
     // how the HOU breaks out dependencies into stages
-    c.addService(serviceFactory.createNew(c, "ZOOKEEPER"));
-    c.addService(serviceFactory.createNew(c, "HBASE"));
+    c.addService(serviceFactory.createNew(c, "ZOOKEEPER", repositoryVersion));
+    c.addService(serviceFactory.createNew(c, "HBASE", repositoryVersion));
     Service zookeeper = c.getService("ZOOKEEPER");
     Service hbase = c.getService("HBASE");
     ServiceComponent zookeeperServer = zookeeper.addServiceComponent("ZOOKEEPER_SERVER");

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
index 8c23b69..f5f4e10 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertEventPublisherTest.java
@@ -33,6 +33,7 @@ import org.apache.ambari.server.orm.entities.AlertCurrentEntity;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.orm.entities.AlertGroupEntity;
 import org.apache.ambari.server.orm.entities.AlertHistoryEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.AlertState;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
@@ -76,6 +77,9 @@ public class AlertEventPublisherTest {
   private OrmTestHelper ormHelper;
   private AggregateDefinitionMapping aggregateMapping;
 
+  private final String STACK_VERSION = "2.0.6";
+  private final String REPO_VERSION = "2.0.6-1234";
+
   /**
    *
    */
@@ -95,7 +99,7 @@ public class AlertEventPublisherTest {
     aggregateMapping = injector.getInstance(AggregateDefinitionMapping.class);
 
     clusterName = "foo";
-    clusters.addCluster(clusterName, new StackId("HDP", "2.0.6"));
+    clusters.addCluster(clusterName, new StackId("HDP", STACK_VERSION));
     cluster = clusters.getCluster(clusterName);
     Assert.assertNotNull(cluster);
   }
@@ -301,8 +305,11 @@ public class AlertEventPublisherTest {
   }
 
   private void installHdfsService() throws Exception {
+    RepositoryVersionEntity repositoryVersion = ormHelper.getOrCreateRepositoryVersion(
+        cluster.getCurrentStackVersion(), REPO_VERSION);
+
     String serviceName = "HDFS";
-    Service service = serviceFactory.createNew(cluster, serviceName);
+    Service service = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
     service = cluster.getService(serviceName);
 
     Assert.assertNotNull(service);

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
index 890464d..443b4f0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/InitialAlertEventTest.java
@@ -28,10 +28,12 @@ import org.apache.ambari.server.events.MockEventListener;
 import org.apache.ambari.server.events.publishers.AlertEventPublisher;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
 import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.dao.AlertsDAO;
 import org.apache.ambari.server.orm.entities.AlertCurrentEntity;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Alert;
 import org.apache.ambari.server.state.AlertFirmness;
 import org.apache.ambari.server.state.AlertState;
@@ -72,6 +74,13 @@ public class InitialAlertEventTest {
   private String m_clusterName;
   private ServiceFactory m_serviceFactory;
 
+  private OrmTestHelper m_helper;
+
+  private final String STACK_VERSION = "2.0.6";
+  private final String REPO_VERSION = "2.0.6-1234";
+  private final StackId STACK_ID = new StackId("HDP", STACK_VERSION);
+  private RepositoryVersionEntity m_repositoryVersion;
+
   /**
    *
    */
@@ -97,9 +106,12 @@ public class InitialAlertEventTest {
     m_serviceFactory = m_injector.getInstance(ServiceFactory.class);
 
     m_alertsDao = m_injector.getInstance(AlertsDAO.class);
+    m_helper = m_injector.getInstance(OrmTestHelper.class);
+
+    m_repositoryVersion = m_helper.getOrCreateRepositoryVersion(STACK_ID, REPO_VERSION);
 
     m_clusterName = "c1";
-    m_clusters.addCluster(m_clusterName, new StackId("HDP", "2.0.6"));
+    m_clusters.addCluster(m_clusterName, STACK_ID);
     m_cluster = m_clusters.getCluster(m_clusterName);
     Assert.assertNotNull(m_cluster);
 
@@ -175,7 +187,7 @@ public class InitialAlertEventTest {
 
   private void installHdfsService() throws Exception {
     String serviceName = "HDFS";
-    Service service = m_serviceFactory.createNew(m_cluster, serviceName);
+    Service service = m_serviceFactory.createNew(m_cluster, serviceName, m_repositoryVersion);
     service = m_cluster.getService(serviceName);
 
     Assert.assertNotNull(service);

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
index d479ca2..9c17e01 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterDeadlockTest.java
@@ -35,6 +35,7 @@ import org.apache.ambari.server.events.listeners.upgrade.HostVersionOutOfSyncLis
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
@@ -96,6 +97,7 @@ public class ClusterDeadlockTest {
   private OrmTestHelper helper;
 
   private StackId stackId = new StackId("HDP-0.1");
+  private String REPO_VERSION = "0.1-1234";
 
   /**
    * The cluster.
@@ -577,8 +579,6 @@ public class ClusterDeadlockTest {
     sc.addServiceComponentHost(sch);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLED);
-    sch.setDesiredStackVersion(stackId);
-    sch.setStackVersion(stackId);
 
     return sch;
   }
@@ -586,10 +586,13 @@ public class ClusterDeadlockTest {
   private Service installService(String serviceName) throws AmbariException {
     Service service = null;
 
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(
+        stackId, REPO_VERSION);
+
     try {
       service = cluster.getService(serviceName);
     } catch (ServiceNotFoundException e) {
-      service = serviceFactory.createNew(cluster, serviceName);
+      service = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
       cluster.addService(service);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
index 76f9130..ec5eef0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
@@ -40,6 +40,8 @@ import org.apache.ambari.server.HostNotFoundException;
 import org.apache.ambari.server.controller.AmbariSessionManager;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
@@ -60,12 +62,14 @@ public class ClusterImplTest {
 
   private static Injector injector;
   private static Clusters clusters;
+  private static OrmTestHelper ormTestHelper;
 
   @BeforeClass
   public static void setUpClass() throws Exception {
     injector = Guice.createInjector(new InMemoryDefaultTestModule());
     injector.getInstance(GuiceJpaInitializer.class);
     clusters = injector.getInstance(Clusters.class);
+    ormTestHelper = injector.getInstance(OrmTestHelper.class);
   }
 
   @AfterClass
@@ -207,10 +211,15 @@ public class ClusterImplTest {
     String clusterName = "TEST_CLUSTER";
     String hostName1 = "HOST1", hostName2 = "HOST2";
 
-    clusters.addCluster(clusterName, new StackId("HDP-2.1.1"));
+    String stackVersion = "HDP-2.1.1";
+    String repoVersion = "2.1.1-1234";
 
+    clusters.addCluster(clusterName, new StackId(stackVersion));
     Cluster cluster = clusters.getCluster(clusterName);
 
+    RepositoryVersionEntity repositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(
+        new StackId(stackVersion), repoVersion);
+
     clusters.addHost(hostName1);
     clusters.addHost(hostName2);
 
@@ -222,7 +231,7 @@ public class ClusterImplTest {
 
     clusters.mapAndPublishHostsToCluster(Sets.newHashSet(hostName1, hostName2), clusterName);
 
-    Service hdfs = cluster.addService("HDFS");
+    Service hdfs = cluster.addService("HDFS", repositoryVersion);
 
     ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
     nameNode.addServiceComponentHost(hostName1);
@@ -235,7 +244,7 @@ public class ClusterImplTest {
     hdfsClient.addServiceComponentHost(hostName1);
     hdfsClient.addServiceComponentHost(hostName2);
 
-    Service tez = cluster.addService(serviceToDelete);
+    Service tez = cluster.addService(serviceToDelete, repositoryVersion);
 
     ServiceComponent tezClient = tez.addServiceComponent("TEZ_CLIENT");
     ServiceComponentHost tezClientHost1 =  tezClient.addServiceComponentHost(hostName1);

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index e5e2643..6471988 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -344,20 +344,26 @@ public class ClusterTest {
   }
 
   /**
-   * For Rolling Upgrades, create a cluster with the following components
-   * HDFS: NameNode, DataNode, HDFS Client
-   * ZK: Zookeeper Server, Zookeeper Monitor
+   * For Rolling Upgrades, create a cluster with the following components HDFS:
+   * NameNode, DataNode, HDFS Client ZK: Zookeeper Server, Zookeeper Monitor
    * Ganglia: Ganglia Server, Ganglia Monitor
    *
-   * Further, 3 hosts will be added.
-   * Finally, verify that only the Ganglia components do not need to advertise a version.
-   * @param clusterName Cluster Name
-   * @param stackId Stack to set for the cluster
-   * @param hostAttributes Host attributes to use for 3 hosts (h-1, h-2, h-3)
+   * Further, 3 hosts will be added. Finally, verify that only the Ganglia
+   * components do not need to advertise a version.
+   *
+   * @param clusterName
+   *          Cluster Name
+   * @param repositoryVersion
+   *          the repository to use for new services being installed in the
+   *          cluster
+   * @param hostAttributes
+   *          Host attributes to use for 3 hosts (h-1, h-2, h-3)
    * @throws Exception
    * @return Cluster that was created
    */
-  private Cluster createClusterForRU(String clusterName, StackId stackId, Map<String, String> hostAttributes) throws Exception {
+  private Cluster createClusterForRU(String clusterName, RepositoryVersionEntity repositoryVersion,
+      Map<String, String> hostAttributes) throws Exception {
+    StackId stackId = repositoryVersion.getStackId();
     clusters.addCluster(clusterName, stackId);
     Cluster cluster = clusters.getCluster(clusterName);
     Assert.assertEquals(clusterName, cluster.getClusterName());
@@ -382,9 +388,9 @@ public class ClusterTest {
     }
 
     // Add Services
-    Service s1 = serviceFactory.createNew(cluster, "HDFS");
-    Service s2 = serviceFactory.createNew(cluster, "ZOOKEEPER");
-    Service s3 = serviceFactory.createNew(cluster, "GANGLIA");
+    Service s1 = serviceFactory.createNew(cluster, "HDFS", repositoryVersion);
+    Service s2 = serviceFactory.createNew(cluster, "ZOOKEEPER", repositoryVersion);
+    Service s3 = serviceFactory.createNew(cluster, "GANGLIA", repositoryVersion);
     cluster.addService(s1);
     cluster.addService(s2);
     cluster.addService(s3);
@@ -648,8 +654,10 @@ public class ClusterTest {
     // public Service getService(String serviceName) throws AmbariException;
     // public Map<String, Service> getServices();
 
-    Service s1 = serviceFactory.createNew(c1, "HDFS");
-    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
+    Service s1 = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
+    Service s2 = serviceFactory.createNew(c1, "MAPREDUCE", repositoryVersion);
 
     Service s = c1.getService("HDFS");
     Assert.assertNotNull(s);
@@ -676,7 +684,9 @@ public class ClusterTest {
     // TODO write unit tests
     // public List<ServiceComponentHost> getServiceComponentHosts(String hostname);
 
-    Service s = serviceFactory.createNew(c1, "HDFS");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
+    Service s = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
     c1.addService(s);
     ServiceComponent sc = serviceComponentFactory.createNew(s, "NAMENODE");
     s.addServiceComponent(sc);
@@ -694,7 +704,7 @@ public class ClusterTest {
     try {
       while (iterator.hasNext()) {
         iterator.next();
-        Service s1 = serviceFactory.createNew(c1, "PIG");
+        Service s1 = serviceFactory.createNew(c1, "PIG", repositoryVersion);
         c1.addService(s1);
         ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "PIG");
         s1.addServiceComponent(sc1);
@@ -713,7 +723,9 @@ public class ClusterTest {
   public void testGetServiceComponentHosts_ForService() throws Exception {
     createDefaultCluster();
 
-    Service s = serviceFactory.createNew(c1, "HDFS");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
+    Service s = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
     c1.addService(s);
 
     ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
@@ -741,7 +753,9 @@ public class ClusterTest {
   public void testGetServiceComponentHosts_ForServiceComponent() throws Exception {
     createDefaultCluster();
 
-    Service s = serviceFactory.createNew(c1, "HDFS");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
+    Service s = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
     c1.addService(s);
 
     ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
@@ -775,7 +789,9 @@ public class ClusterTest {
   public void testGetServiceComponentHostMap() throws Exception {
     createDefaultCluster();
 
-    Service s = serviceFactory.createNew(c1, "HDFS");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
+    Service s = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
     c1.addService(s);
 
     ServiceComponent scNN = serviceComponentFactory.createNew(s, "NAMENODE");
@@ -807,10 +823,12 @@ public class ClusterTest {
   public void testGetServiceComponentHostMap_ForService() throws Exception {
     createDefaultCluster();
 
-    Service sfHDFS = serviceFactory.createNew(c1, "HDFS");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
+    Service sfHDFS = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
     c1.addService(sfHDFS);
 
-    Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE");
+    Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE", repositoryVersion);
     c1.addService(sfMR);
 
     ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
@@ -863,10 +881,12 @@ public class ClusterTest {
   public void testGetServiceComponentHostMap_ForHost() throws Exception {
     createDefaultCluster();
 
-    Service sfHDFS = serviceFactory.createNew(c1, "HDFS");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
+    Service sfHDFS = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
     c1.addService(sfHDFS);
 
-    Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE");
+    Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE", repositoryVersion);
     c1.addService(sfMR);
 
     ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
@@ -920,10 +940,12 @@ public class ClusterTest {
   public void testGetServiceComponentHostMap_ForHostAndService() throws Exception {
     createDefaultCluster();
 
-    Service sfHDFS = serviceFactory.createNew(c1, "HDFS");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
+    Service sfHDFS = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
     c1.addService(sfHDFS);
 
-    Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE");
+    Service sfMR = serviceFactory.createNew(c1, "MAPREDUCE", repositoryVersion);
     c1.addService(sfMR);
 
     ServiceComponent scNN = serviceComponentFactory.createNew(sfHDFS, "NAMENODE");
@@ -1104,9 +1126,11 @@ public class ClusterTest {
   public void testDeleteService() throws Exception {
     createDefaultCluster();
 
-    c1.addService("MAPREDUCE");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
 
-    Service hdfs = c1.addService("HDFS");
+    c1.addService("MAPREDUCE", repositoryVersion);
+
+    Service hdfs = c1.addService("HDFS", repositoryVersion);
     ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
 
     assertEquals(2, c1.getServices().size());
@@ -1124,7 +1148,9 @@ public class ClusterTest {
   public void testDeleteServiceWithConfigHistory() throws Exception {
     createDefaultCluster();
 
-    c1.addService("HDFS");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
+    c1.addService("HDFS", repositoryVersion);
 
     Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
@@ -1694,8 +1720,10 @@ public class ClusterTest {
 
     assertTrue(checked);
 
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
     // add some host components
-    Service hdfs = serviceFactory.createNew(c1, "HDFS");
+    Service hdfs = serviceFactory.createNew(c1, "HDFS", repositoryVersion);
     c1.addService(hdfs);
 
     // Add HDFS components
@@ -1997,7 +2025,7 @@ public class ClusterTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6.4");
 
-    Cluster cluster = createClusterForRU(clusterName, stackId, hostAttributes);
+    Cluster cluster = createClusterForRU(clusterName, rv1, hostAttributes);
 
     // Begin install by starting to advertise versions
     // Set the version for the HostComponentState objects
@@ -2154,7 +2182,7 @@ public class ClusterTest {
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6.4");
 
-    Cluster cluster = createClusterForRU(clusterName, stackId, hostAttributes);
+    Cluster cluster = createClusterForRU(clusterName, rv1, hostAttributes);
 
     // Make one host unhealthy
     Host deadHost = cluster.getHosts().iterator().next();
@@ -2233,10 +2261,8 @@ public class ClusterTest {
     String v1 = "2.0.5-1";
     String v2 = "2.0.5-2";
     c1.setDesiredStackVersion(stackId);
-    RepositoryVersionEntity rve1 = helper.getOrCreateRepositoryVersion(stackId,
-        v1);
-    RepositoryVersionEntity rve2 = helper.getOrCreateRepositoryVersion(stackId,
-        v2);
+    RepositoryVersionEntity rve1 = helper.getOrCreateRepositoryVersion(stackId, v1);
+    RepositoryVersionEntity rve2 = helper.getOrCreateRepositoryVersion(stackId, v2);
 
     c1.setCurrentStackVersion(stackId);
     c1.createClusterVersion(stackId, v1, "admin",
@@ -2248,12 +2274,14 @@ public class ClusterTest {
     clusters.mapHostToCluster("h-3", clusterName);
     ClusterVersionDAOMock.failOnCurrentVersionState = false;
 
-    Service service = c1.addService("ZOOKEEPER");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
+    Service service = c1.addService("ZOOKEEPER", repositoryVersion);
     ServiceComponent sc = service.addServiceComponent("ZOOKEEPER_SERVER");
     sc.addServiceComponentHost("h-1");
     sc.addServiceComponentHost("h-2");
 
-    service = c1.addService("SQOOP");
+    service = c1.addService("SQOOP", repositoryVersion);
     sc = service.addServiceComponent("SQOOP");
     sc.addServiceComponentHost("h-3");
 
@@ -2317,7 +2345,9 @@ public class ClusterTest {
 
     ClusterVersionDAOMock.failOnCurrentVersionState = false;
 
-    Service service = c1.addService("ZOOKEEPER");
+    RepositoryVersionEntity repositoryVersion = c1.getCurrentClusterVersion().getRepositoryVersion();
+
+    Service service = c1.addService("ZOOKEEPER", repositoryVersion);
     ServiceComponent sc = service.addServiceComponent("ZOOKEEPER_SERVER");
     sc.addServiceComponentHost("h-1");
     sc.addServiceComponentHost("h-2");

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
index 1a112d6..801f3a7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersDeadlockTest.java
@@ -34,6 +34,7 @@ import org.apache.ambari.server.events.listeners.upgrade.HostVersionOutOfSyncLis
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
@@ -75,7 +76,8 @@ public class ClustersDeadlockTest {
   private CountDownLatch writerStoppedSignal;
   private CountDownLatch readerStoppedSignal;
 
-  private final StackId stackId = new StackId("HDP-0.1");
+  private StackId stackId = new StackId("HDP-0.1");
+  private String REPO_VERSION = "0.1-1234";
 
   @Inject
   private Injector injector;
@@ -381,10 +383,13 @@ public class ClustersDeadlockTest {
   private Service installService(String serviceName) throws AmbariException {
     Service service = null;
 
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(
+        stackId, REPO_VERSION);
+
     try {
       service = cluster.getService(serviceName);
     } catch (ServiceNotFoundException e) {
-      service = serviceFactory.createNew(cluster, serviceName);
+      service = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
       cluster.addService(service);
     }
 
@@ -418,8 +423,6 @@ public class ClustersDeadlockTest {
     sc.addServiceComponentHost(sch);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLED);
-    sch.setDesiredStackVersion(stackId);
-    sch.setStackVersion(stackId);
 
     return sch;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
index c6cef26..43e9737 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
@@ -57,6 +57,7 @@ import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.TopologyRequestDAO;
 import org.apache.ambari.server.orm.entities.ClusterStateEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.AgentVersion;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
@@ -399,9 +400,13 @@ public class ClustersTest {
 
     cluster.setDesiredStackVersion(stackId);
     cluster.setCurrentStackVersion(stackId);
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+
+    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(stackId,
+        stackId.getStackVersion());
+
     cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
         RepositoryVersionState.INSTALLING);
+
     cluster.transitionClusterVersion(stackId, stackId.getStackVersion(),
         RepositoryVersionState.CURRENT);
 
@@ -422,7 +427,7 @@ public class ClustersTest {
     clusters.addHost(h2);
 
     Host host1 = clusters.getHost(h1);
-    Host host2 = clusters.getHost(h2);
+
     setOsFamily(clusters.getHost(h1), "centos", "5.9");
     setOsFamily(clusters.getHost(h2), "centos", "5.9");
 
@@ -435,7 +440,7 @@ public class ClustersTest {
     // host config override
     host1.addDesiredConfig(cluster.getClusterId(), true, "_test", config2);
 
-    Service hdfs = cluster.addService("HDFS");
+    Service hdfs = cluster.addService("HDFS", repositoryVersion);
 
     Assert.assertNotNull(injector.getInstance(ClusterServiceDAO.class).findByClusterAndServiceNames(c1, "HDFS"));
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
index 46a039d..4d06f60 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
@@ -34,6 +34,7 @@ import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
@@ -95,6 +96,8 @@ public class ConcurrentServiceConfigVersionTest {
    */
   private Cluster cluster;
 
+  private RepositoryVersionEntity repositoryVersion;
+
   /**
    * Creates a cluster and installs HDFS with NN and DN.
    *
@@ -109,7 +112,7 @@ public class ConcurrentServiceConfigVersionTest {
     injector.injectMembers(this);
     clusters.addCluster("c1", stackId);
     cluster = clusters.getCluster("c1");
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
+    repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
     cluster.createClusterVersion(stackId,
         stackId.getStackVersion(), "admin", RepositoryVersionState.INSTALLING);
 
@@ -210,8 +213,6 @@ public class ConcurrentServiceConfigVersionTest {
     sc.addServiceComponentHost(sch);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLED);
-    sch.setDesiredStackVersion(stackId);
-    sch.setStackVersion(stackId);
 
     return sch;
   }
@@ -222,7 +223,7 @@ public class ConcurrentServiceConfigVersionTest {
     try {
       service = cluster.getService(serviceName);
     } catch (ServiceNotFoundException e) {
-      service = serviceFactory.createNew(cluster, serviceName);
+      service = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
       cluster.addService(service);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
index 8f37ad7..8cd00ce 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
@@ -34,6 +34,7 @@ import org.apache.ambari.server.events.listeners.upgrade.HostVersionOutOfSyncLis
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
@@ -91,6 +92,8 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
   private OrmTestHelper helper;
 
   private StackId stackId = new StackId("HDP-0.1");
+  private final String REPO_VERSION = "0.1-1234";
+  private RepositoryVersionEntity m_repositoryVersion;
 
   /**
    * The cluster.
@@ -111,9 +114,8 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
     injector.injectMembers(this);
     clusters.addCluster("c1", stackId);
     cluster = clusters.getCluster("c1");
-    helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
-    cluster.createClusterVersion(stackId,
-        stackId.getStackVersion(), "admin", RepositoryVersionState.INSTALLING);
+    m_repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, REPO_VERSION);
+    cluster.createClusterVersion(stackId, REPO_VERSION, "admin", RepositoryVersionState.INSTALLING);
 
     Config config1 = configFactory.createNew(cluster, "test-type1", null, new HashMap<String, String>(), new HashMap<String,
         Map<String, String>>());
@@ -224,14 +226,12 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
     Service s = installService(svc);
     ServiceComponent sc = addServiceComponent(s, svcComponent);
 
-    ServiceComponentHost sch = serviceComponentHostFactory.createNew(sc,
-        hostName);
+    ServiceComponentHost sch = serviceComponentHostFactory.createNew(sc, hostName);
 
     sc.addServiceComponentHost(sch);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLED);
-    sch.setDesiredStackVersion(stackId);
-    sch.setStackVersion(stackId);
+    sch.setVersion(REPO_VERSION);
 
     return sch;
   }
@@ -242,7 +242,7 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
     try {
       service = cluster.getService(serviceName);
     } catch (ServiceNotFoundException e) {
-      service = serviceFactory.createNew(cluster, serviceName);
+      service = serviceFactory.createNew(cluster, serviceName, m_repositoryVersion);
       cluster.addService(service);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
index 6db820b..3a80ca7 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
@@ -40,9 +40,11 @@ import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
@@ -107,6 +109,7 @@ public class ServiceComponentHostTest {
   private String clusterName = "c1";
   private String hostName1 = "h1";
   private Map<String, String> hostAttributes = new HashMap<>();
+  private RepositoryVersionEntity repositoryVersion;
 
 
   @Before
@@ -115,7 +118,7 @@ public class ServiceComponentHostTest {
     injector.getInstance(GuiceJpaInitializer.class);
     injector.injectMembers(this);
 
-    StackId stackId = new StackId("HDP-0.1");
+    StackId stackId = new StackId("HDP-2.0.6");
     createCluster(stackId, clusterName);
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "5.9");
@@ -126,8 +129,10 @@ public class ServiceComponentHostTest {
 
     Cluster c1 = clusters.getCluster(clusterName);
     helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
-    c1.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
-            RepositoryVersionState.INSTALLING);
+    ClusterVersionEntity clusterVersion = c1.createClusterVersion(stackId,
+        stackId.getStackVersion(), "admin", RepositoryVersionState.INSTALLING);
+
+    repositoryVersion = clusterVersion.getRepositoryVersion();
   }
 
   @After
@@ -180,7 +185,8 @@ public class ServiceComponentHostTest {
     } catch (ServiceNotFoundException e) {
       LOG.debug("Calling service create"
           + ", serviceName=" + svc);
-      s = serviceFactory.createNew(c, svc);
+
+      s = serviceFactory.createNew(c, svc, repositoryVersion);
       c.addService(s);
     }
 
@@ -207,10 +213,7 @@ public class ServiceComponentHostTest {
 
     Assert.assertNotNull(c.getServiceComponentHosts(hostName));
 
-    Assert.assertFalse(
-        impl.getDesiredStackVersion().getStackId().isEmpty());
-
-    Assert.assertFalse(impl.getStackVersion().getStackId().isEmpty());
+    Assert.assertNotNull(sc.getDesiredRepositoryVersion());
 
     return impl;
   }
@@ -229,7 +232,7 @@ public class ServiceComponentHostTest {
       case HOST_SVCCOMP_INSTALL:
         return new ServiceComponentHostInstallEvent(
             impl.getServiceComponentName(), impl.getHostName(), timestamp,
-            impl.getDesiredStackVersion().getStackId());
+            impl.getServiceComponent().getDesiredStackVersion().toString());
       case HOST_SVCCOMP_START:
         return new ServiceComponentHostStartEvent(
             impl.getServiceComponentName(), impl.getHostName(), timestamp);
@@ -289,9 +292,7 @@ public class ServiceComponentHostTest {
     Assert.assertEquals(inProgressState,
         impl.getState());
     if (checkStack) {
-      Assert.assertNotNull(impl.getStackVersion());
-      Assert.assertEquals(impl.getDesiredStackVersion().getStackId(),
-          impl.getStackVersion().getStackId());
+      Assert.assertNotNull(impl.getServiceComponent().getDesiredStackVersion());
     }
 
     ServiceComponentHostEvent installEvent2 = createEvent(impl, ++timestamp,
@@ -537,15 +538,9 @@ public class ServiceComponentHostTest {
     ServiceComponentHost sch = createNewServiceComponentHost(clusterName, "HDFS", "NAMENODE", hostName1, false);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLING);
-    sch.setStackVersion(new StackId("HDP-1.2.0"));
-    sch.setDesiredStackVersion(new StackId("HDP-1.2.0"));
 
     Assert.assertEquals(State.INSTALLING, sch.getState());
     Assert.assertEquals(State.INSTALLED, sch.getDesiredState());
-    Assert.assertEquals("HDP-1.2.0",
-        sch.getStackVersion().getStackId());
-    Assert.assertEquals("HDP-1.2.0",
-            sch.getDesiredStackVersion().getStackId());
   }
 
   @Test
@@ -553,8 +548,6 @@ public class ServiceComponentHostTest {
     ServiceComponentHost sch = createNewServiceComponentHost(clusterName, "HDFS", "NAMENODE", hostName1, false);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLING);
-    sch.setStackVersion(new StackId("HDP-1.2.0"));
-    sch.setDesiredStackVersion(new StackId("HDP-1.2.0"));
 
     Cluster cluster = clusters.getCluster(clusterName);
 
@@ -585,7 +578,6 @@ public class ServiceComponentHostTest {
     ServiceComponentHost sch = createNewServiceComponentHost(clusterName, "HDFS", "DATANODE", hostName1, false);
     sch.setDesiredState(State.INSTALLED);
     sch.setState(State.INSTALLING);
-    sch.setStackVersion(new StackId("HDP-1.2.0"));
     ServiceComponentHostResponse r = sch.convertToResponse(null);
     Assert.assertEquals("HDFS", r.getServiceName());
     Assert.assertEquals("DATANODE", r.getComponentName());
@@ -593,7 +585,7 @@ public class ServiceComponentHostTest {
     Assert.assertEquals(clusterName, r.getClusterName());
     Assert.assertEquals(State.INSTALLED.toString(), r.getDesiredState());
     Assert.assertEquals(State.INSTALLING.toString(), r.getLiveState());
-    Assert.assertEquals("HDP-1.2.0", r.getStackVersion());
+    Assert.assertEquals(repositoryVersion.getStackId().toString(), r.getDesiredStackVersion());
 
     Assert.assertFalse(r.isStaleConfig());
 
@@ -727,24 +719,25 @@ public class ServiceComponentHostTest {
     Assert.assertNotNull(cluster);
 
     helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
-    cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
-        RepositoryVersionState.INSTALLING);
+    ClusterVersionEntity clusterVersion = cluster.createClusterVersion(stackId,
+        stackId.getStackVersion(), "admin", RepositoryVersionState.INSTALLING);
+
+    RepositoryVersionEntity repositoryVersion = clusterVersion.getRepositoryVersion();
 
     ServiceComponentHost sch1 = createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", hostName);
     ServiceComponentHost sch2 = createNewServiceComponentHost(cluster, "HDFS", "DATANODE", hostName);
     ServiceComponentHost sch3 = createNewServiceComponentHost(cluster, "MAPREDUCE2", "HISTORYSERVER", hostName);
 
+    sch1.getServiceComponent().setDesiredRepositoryVersion(repositoryVersion);
+
     sch1.setDesiredState(State.INSTALLED);
     sch1.setState(State.INSTALLING);
-    sch1.setStackVersion(new StackId(stackVersion));
 
     sch2.setDesiredState(State.INSTALLED);
     sch2.setState(State.INSTALLING);
-    sch2.setStackVersion(new StackId(stackVersion));
 
     sch3.setDesiredState(State.INSTALLED);
     sch3.setState(State.INSTALLING);
-    sch3.setStackVersion(new StackId(stackVersion));
 
     Assert.assertFalse(sch1.convertToResponse(null).isStaleConfig());
     Assert.assertFalse(sch2.convertToResponse(null).isStaleConfig());
@@ -917,24 +910,26 @@ public class ServiceComponentHostTest {
     Cluster cluster = clusters.getCluster(clusterName);
 
     helper.getOrCreateRepositoryVersion(stackId, stackId.getStackVersion());
-    cluster.createClusterVersion(stackId, stackId.getStackVersion(), "admin",
+    ClusterVersionEntity clusterVersion = cluster.createClusterVersion(stackId,
+        stackId.getStackVersion(), "admin",
         RepositoryVersionState.INSTALLING);
 
+    RepositoryVersionEntity repositoryVersion = clusterVersion.getRepositoryVersion();
+
     ServiceComponentHost sch1 = createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", hostName);
     ServiceComponentHost sch2 = createNewServiceComponentHost(cluster, "HDFS", "DATANODE", hostName);
     ServiceComponentHost sch3 = createNewServiceComponentHost(cluster, "MAPREDUCE2", "HISTORYSERVER", hostName);
 
+    sch1.getServiceComponent().setDesiredRepositoryVersion(repositoryVersion);
+
     sch1.setDesiredState(State.INSTALLED);
     sch1.setState(State.INSTALLING);
-    sch1.setStackVersion(new StackId(stackVersion));
 
     sch2.setDesiredState(State.INSTALLED);
     sch2.setState(State.INSTALLING);
-    sch2.setStackVersion(new StackId(stackVersion));
 
     sch3.setDesiredState(State.INSTALLED);
     sch3.setState(State.INSTALLING);
-    sch3.setStackVersion(new StackId(stackVersion));
 
     Assert.assertFalse(sch1.convertToResponse(null).isStaleConfig());
     Assert.assertFalse(sch2.convertToResponse(null).isStaleConfig());

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/StackUpgradeUtilTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/StackUpgradeUtilTest.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/StackUpgradeUtilTest.java
deleted file mode 100644
index 3ebf4e7..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/StackUpgradeUtilTest.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.upgrade;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.ambari.server.H2DatabaseCleaner;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.MetainfoDAO;
-import org.apache.ambari.server.orm.entities.MetainfoEntity;
-import org.apache.ambari.server.state.RepositoryInfo;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-
-/**
- * Tests the StackUpgradeHelper
- */
-public class StackUpgradeUtilTest {
-
-  private Injector injector;
-
-  @Before
-  public void setup() throws Exception {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-  }
-
-  @After
-  public void teardown() throws Exception {
-    H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);
-  }
-
-  private void reset(String stackName, String stackVersion) throws Exception {
-    AmbariMetaInfo ami = injector.getInstance(AmbariMetaInfo.class);
-
-    for (Entry<String, List<RepositoryInfo>> entry : ami.getRepository(stackName, stackVersion).entrySet()) {
-      for (RepositoryInfo ri : entry.getValue()) {
-        if (-1 == ri.getRepoId().indexOf("epel")) {
-          ami.updateRepo(stackName, stackVersion,
-              ri.getOsType(), ri.getRepoId(), "", null);
-        }
-      }
-    }
-
-  }
-
-  @Test
-  public void testUpgradeStack() throws Exception {
-    StackUpgradeUtil stackUpgradeUtil = injector.getInstance(StackUpgradeUtil.class);
-
-    String stackName = "HDP";
-    String stackVersion = "1.3.0";
-    String localRepoUrl = "http://foo.bar";
-
-    // check updating all
-    stackUpgradeUtil.updateLocalRepo(stackName, stackVersion, localRepoUrl, null, null);
-
-    MetainfoDAO dao = injector.getInstance(MetainfoDAO.class);
-
-    Collection<MetainfoEntity> entities = dao.findAll();
-    Assert.assertTrue(entities.size() > 0);
-
-    for (MetainfoEntity entity : entities) {
-      Assert.assertTrue(entity.getMetainfoName().startsWith("repo:/HDP/1.3.0/"));
-      Assert.assertEquals(localRepoUrl, entity.getMetainfoValue());
-    }
-
-    reset (stackName, stackVersion);
-    entities = dao.findAll();
-    Assert.assertEquals(0, entities.size());
-
-    // check updating only centos6
-    stackUpgradeUtil.updateLocalRepo(stackName, stackVersion, localRepoUrl, "centos6", null);
-
-    entities = dao.findAll();
-    for (MetainfoEntity entity : entities) {
-      Assert.assertTrue(entity.getMetainfoName().startsWith("repo:/HDP/1.3.0/centos6") ||
-          entity.getMetainfoName().startsWith("repo:/HDP/1.3.0/redhat6"));
-      Assert.assertEquals(localRepoUrl, entity.getMetainfoValue());
-    }
-
-    reset (stackName, stackVersion);
-    entities = dao.findAll();
-    Assert.assertTrue(0 == entities.size());
-
-    // check updating only centos6 and centos5
-    stackUpgradeUtil.updateLocalRepo(stackName, stackVersion, localRepoUrl, "centos6,centos5", null);
-
-    entities = dao.findAll();
-    for (MetainfoEntity entity : entities) {
-      Assert.assertTrue(entity.getMetainfoName().startsWith("repo:/HDP/1.3.0/centos6") ||
-          entity.getMetainfoName().startsWith("repo:/HDP/1.3.0/redhat6") ||
-          entity.getMetainfoName().startsWith("repo:/HDP/1.3.0/redhat5") ||
-          entity.getMetainfoName().startsWith("repo:/HDP/1.3.0/centos5"));
-      Assert.assertEquals(localRepoUrl, entity.getMetainfoValue());
-    }
-
-    // verify that a change to centos6 also changes redhat6
-    localRepoUrl = "http://newfoo.bar";
-    stackUpgradeUtil.updateLocalRepo(stackName, stackVersion, localRepoUrl, "centos6", null);
-    entities = dao.findAll();
-    boolean foundCentos6 = false;
-    boolean foundRedhat6 = false;
-    for (MetainfoEntity entity : entities) {
-      if (-1 != entity.getMetainfoName().indexOf("centos6")) {
-        foundCentos6 = true;
-        Assert.assertEquals(localRepoUrl, entity.getMetainfoValue());
-      } else if (-1 != entity.getMetainfoName().indexOf("redhat6")) {
-        foundRedhat6 = true;
-        Assert.assertEquals(localRepoUrl, entity.getMetainfoValue());
-      } else {
-        Assert.assertFalse(localRepoUrl.equals(entity.getMetainfoValue()));
-      }
-    }
-    Assert.assertTrue(foundCentos6);
-    Assert.assertTrue(foundRedhat6);
-
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
index c209671..3d1cdfc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
@@ -63,6 +63,7 @@ import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
 import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
@@ -70,6 +71,7 @@ import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.Cluster;
@@ -107,6 +109,7 @@ public class UpgradeCatalog200Test {
   private final String HOST_NAME = "h1";
 
   private final StackId DESIRED_STACK = new StackId("HDP", "2.0.6");
+  private final String DESIRED_REPO_VERSION = "2.0.6-1234";
 
   private Injector injector;
   private Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
@@ -627,10 +630,14 @@ public class UpgradeCatalog200Test {
     assertNotNull(stackEntity);
 
     final ClusterEntity clusterEntity = upgradeCatalogHelper.createCluster(
-        injector, CLUSTER_NAME, stackEntity);
+        injector, CLUSTER_NAME, stackEntity, DESIRED_REPO_VERSION);
+
+    RepositoryVersionDAO repositoryVersionDAO = injector.getInstance(RepositoryVersionDAO.class);
+    RepositoryVersionEntity repositoryVersion = repositoryVersionDAO.findByStackAndVersion(
+        stackEntity, DESIRED_REPO_VERSION);
 
     final ClusterServiceEntity clusterServiceEntityNagios = upgradeCatalogHelper.addService(
-        injector, clusterEntity, "NAGIOS", stackEntity);
+        injector, clusterEntity, "NAGIOS", repositoryVersion);
 
     final HostEntity hostEntity = upgradeCatalogHelper.createHost(injector,
         clusterEntity, HOST_NAME);

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
index c40eac6..4ed7685 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
@@ -109,6 +109,7 @@ public class UpgradeCatalog210Test {
   private EntityManager entityManager = createNiceMock(EntityManager.class);
   private UpgradeCatalogHelper upgradeCatalogHelper;
   private StackEntity desiredStackEntity;
+  private String desiredRepositoryVersion = "2.2.0-1234";
 
   public void initData() {
     //reset(entityManagerProvider);
@@ -805,9 +806,11 @@ public class UpgradeCatalog210Test {
   public void testDeleteStormRestApiServiceComponent() throws Exception {
     initData();
     ClusterEntity clusterEntity = upgradeCatalogHelper.createCluster(injector,
-      "c1", desiredStackEntity);
+        "c1", desiredStackEntity, desiredRepositoryVersion);
+
     ClusterServiceEntity clusterServiceEntity = upgradeCatalogHelper.createService(
         injector, clusterEntity, "STORM");
+
     HostEntity hostEntity = upgradeCatalogHelper.createHost(injector,
         clusterEntity, "h1");
 
@@ -827,7 +830,6 @@ public class UpgradeCatalog210Test {
     componentDesiredStateEntity.setServiceName(clusterServiceEntity.getServiceName());
     componentDesiredStateEntity.setClusterServiceEntity(clusterServiceEntity);
     componentDesiredStateEntity.setComponentName("STORM_REST_API");
-    componentDesiredStateEntity.setDesiredStack(desiredStackEntity);
 
     ServiceComponentDesiredStateDAO componentDesiredStateDAO =
       injector.getInstance(ServiceComponentDesiredStateDAO.class);
@@ -845,7 +847,6 @@ public class UpgradeCatalog210Test {
     hostComponentDesiredStateEntity.setServiceName(clusterServiceEntity.getServiceName());
     hostComponentDesiredStateEntity.setServiceComponentDesiredStateEntity(componentDesiredStateEntity);
     hostComponentDesiredStateEntity.setHostEntity(hostEntity);
-    hostComponentDesiredStateEntity.setDesiredStack(desiredStackEntity);
 
     hostComponentDesiredStateDAO.create(hostComponentDesiredStateEntity);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
index 4413ca8..8b78479 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
@@ -1211,7 +1211,6 @@ public class UpgradeCatalog220Test {
     DBAccessor mockedDbAccessor = mocksControl.createMock(DBAccessor.class);
     DaoUtils mockedDaoUtils = mocksControl.createMock(DaoUtils.class);
     Configuration mockedConfiguration = mocksControl.createMock(Configuration.class);
-    StackUpgradeUtil mockedStackUpgradeUtil = mocksControl.createMock(StackUpgradeUtil.class);
 
     Capture<String> capturedTableName = EasyMock.newCapture();
     Capture<String> capturedPKColumn = EasyMock.newCapture();
@@ -1255,7 +1254,6 @@ public class UpgradeCatalog220Test {
     EasyMockSupport.injectMocks(testSubject);
 
     //todo refactor the DI approach, don't directly access these members!!!
-    testSubject.stackUpgradeUtil = mockedStackUpgradeUtil;
     testSubject.dbAccessor = mockedDbAccessor;
     testSubject.configuration = mockedConfiguration;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogHelper.java
index 4c11d10..2cf0321 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogHelper.java
@@ -19,6 +19,7 @@ package org.apache.ambari.server.upgrade;
 
 import java.util.Collections;
 
+import org.apache.ambari.server.orm.OrmTestHelper;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
 import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
@@ -30,6 +31,7 @@ import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.ResourceEntity;
 import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
 import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
@@ -37,6 +39,7 @@ import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.security.authorization.ResourceType;
 import org.apache.ambari.server.state.HostComponentAdminState;
+import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 
 import com.google.inject.Injector;
@@ -49,7 +52,8 @@ import com.google.inject.persist.Transactional;
 public class UpgradeCatalogHelper {
 
   /**
-   * Creates a cluster with the specified name and stack.
+   * Creates a cluster with the specified name and stack as well as the
+   * repository version.
    *
    * @param injector
    * @param clusterName
@@ -57,7 +61,7 @@ public class UpgradeCatalogHelper {
    * @return
    */
   protected ClusterEntity createCluster(Injector injector, String clusterName,
-      StackEntity desiredStackEntity) {
+      StackEntity desiredStackEntity, String repositoryVersion) {
     ResourceTypeDAO resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
 
     // create an admin resource to represent this cluster
@@ -81,6 +85,12 @@ public class UpgradeCatalogHelper {
     clusterEntity.setResource(resourceEntity);
 
     clusterDAO.create(clusterEntity);
+
+    OrmTestHelper ormTestHelper = injector.getInstance(OrmTestHelper.class);
+    ormTestHelper.getOrCreateRepositoryVersion(
+        new StackId(desiredStackEntity.getStackName(), desiredStackEntity.getStackVersion()),
+        repositoryVersion);
+
     return clusterEntity;
   }
 
@@ -110,19 +120,19 @@ public class UpgradeCatalogHelper {
    * @param injector
    * @param clusterEntity
    * @param serviceName
-   * @param desiredStackEntity
+   * @param desiredRepositoryVersion
    * @return
    */
   protected ClusterServiceEntity addService(Injector injector,
       ClusterEntity clusterEntity, String serviceName,
-      StackEntity desiredStackEntity) {
+      RepositoryVersionEntity desiredRepositoryVersion) {
     ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
 
     ClusterServiceEntity clusterServiceEntity = createService(injector,
         clusterEntity, serviceName);
 
     ServiceDesiredStateEntity serviceDesiredStateEntity = new ServiceDesiredStateEntity();
-    serviceDesiredStateEntity.setDesiredStack(desiredStackEntity);
+    serviceDesiredStateEntity.setDesiredRepositoryVersion(desiredRepositoryVersion);
     serviceDesiredStateEntity.setClusterId(1L);
     serviceDesiredStateEntity.setServiceName(serviceName);
     serviceDesiredStateEntity.setClusterServiceEntity(clusterServiceEntity);
@@ -177,7 +187,6 @@ public class UpgradeCatalogHelper {
     componentDesiredStateEntity.setClusterServiceEntity(clusterServiceEntity);
     componentDesiredStateEntity.setComponentName(componentName);
     componentDesiredStateEntity.setServiceName(clusterServiceEntity.getServiceName());
-    componentDesiredStateEntity.setDesiredStack(desiredStackEntity);
     componentDesiredStateEntity.setClusterServiceEntity(clusterServiceEntity);
     componentDesiredStateEntity.setClusterId(clusterServiceEntity.getClusterId());
     serviceComponentDesiredStateDAO.create(componentDesiredStateEntity);
@@ -190,7 +199,6 @@ public class UpgradeCatalogHelper {
     hostComponentDesiredStateEntity.setAdminState(HostComponentAdminState.INSERVICE);
     hostComponentDesiredStateEntity.setServiceComponentDesiredStateEntity(componentDesiredStateEntity);
     hostComponentDesiredStateEntity.setHostEntity(hostEntity);
-    hostComponentDesiredStateEntity.setDesiredStack(desiredStackEntity);
     hostComponentDesiredStateDAO.create(hostComponentDesiredStateEntity);
 
     HostComponentStateEntity hostComponentStateEntity = new HostComponentStateEntity();
@@ -198,9 +206,7 @@ public class UpgradeCatalogHelper {
     hostComponentStateEntity.setComponentName(componentName);
     hostComponentStateEntity.setServiceName(clusterServiceEntity.getServiceName());
     hostComponentStateEntity.setClusterId(clusterEntity.getClusterId());
-    hostComponentStateEntity.setCurrentStack(clusterEntity.getDesiredStack());
     hostComponentStateEntity.setServiceComponentDesiredStateEntity(componentDesiredStateEntity);
-    hostComponentStateEntity.setCurrentStack(desiredStackEntity);
 
     componentDesiredStateEntity.setHostComponentStateEntities(Collections.singletonList(hostComponentStateEntity));
     componentDesiredStateEntity.setHostComponentDesiredStateEntities(Collections.singletonList(hostComponentDesiredStateEntity));

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
index e9bd27c..40bab1d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
@@ -46,7 +46,6 @@ import java.util.TreeMap;
 import javax.persistence.EntityManager;
 import javax.xml.bind.JAXBException;
 
-import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapperFactory;
 import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
@@ -74,7 +73,6 @@ import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.ServiceComponentHostFactory;
-import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.cluster.ClusterFactory;
 import org.apache.ambari.server.state.host.HostFactory;
 import org.apache.ambari.server.state.stack.OsFamily;
@@ -140,29 +138,6 @@ public class StageUtilsTest extends EasyMockSupport {
     StageUtils.setConfiguration(injector.getInstance(Configuration.class));
   }
 
-
-  public static void addService(Cluster cl, List<String> hostList,
-                                Map<String, List<Integer>> topology, String serviceName,
-                                Injector injector) throws AmbariException {
-    ServiceComponentHostFactory serviceComponentHostFactory = injector.getInstance(ServiceComponentHostFactory.class);
-
-    cl.setDesiredStackVersion(new StackId(STACK_ID));
-    cl.addService(serviceName);
-
-    for (Entry<String, List<Integer>> component : topology.entrySet()) {
-      String componentName = component.getKey();
-      cl.getService(serviceName).addServiceComponent(componentName);
-
-      for (Integer hostIndex : component.getValue()) {
-        cl.getService(serviceName)
-            .getServiceComponent(componentName)
-            .addServiceComponentHost(
-                serviceComponentHostFactory.createNew(cl.getService(serviceName)
-                    .getServiceComponent(componentName), hostList.get(hostIndex)));
-      }
-    }
-  }
-
   @Test
   public void testGetATestStage() {
     StageUtils stageUtils = new StageUtils(injector.getInstance(StageFactory.class));


[9/9] ambari git commit: AMBARI-20894 - Setting Desired Stack Ids to Correct Values During Service and Patch Upgrades (jonathanhurley)

Posted by jo...@apache.org.
AMBARI-20894 - Setting Desired Stack Ids to Correct Values During Service and Patch Upgrades (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/dcbd826c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/dcbd826c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/dcbd826c

Branch: refs/heads/branch-feature-AMBARI-12556
Commit: dcbd826c96b91c718caa519f44663b9a73f17da6
Parents: b686624
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Apr 28 17:40:42 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed May 3 11:13:19 2017 -0400

----------------------------------------------------------------------
 .../libraries/script/script.py                  |    7 +
 .../ambari/server/agent/HeartbeatProcessor.java |   15 +-
 .../alerts/ComponentVersionAlertRunnable.java   |   45 +-
 .../checks/DatabaseConsistencyCheckHelper.java  |    7 +-
 .../AmbariManagementControllerImpl.java         |   11 +-
 .../controller/ServiceComponentHostRequest.java |   19 +-
 .../ServiceComponentHostResponse.java           |   39 +-
 .../server/controller/ServiceRequest.java       |   27 +-
 .../server/controller/ServiceResponse.java      |   38 +-
 .../ClusterStackVersionResourceProvider.java    |  130 --
 .../internal/ComponentResourceProvider.java     |    2 +-
 .../internal/HostComponentResourceProvider.java |   27 +-
 .../internal/ServiceResourceProvider.java       |   43 +-
 .../internal/UpgradeResourceProvider.java       |   84 +-
 .../DistributeRepositoriesActionListener.java   |   10 +-
 .../listeners/upgrade/StackVersionListener.java |    5 +-
 .../ambari/server/orm/dao/HostVersionDAO.java   |   24 +-
 .../HostComponentDesiredStateEntity.java        |   21 -
 .../orm/entities/HostComponentStateEntity.java  |   22 -
 .../server/orm/entities/HostVersionEntity.java  |   41 +-
 .../ServiceComponentDesiredStateEntity.java     |   79 +-
 .../orm/entities/ServiceDesiredStateEntity.java |   85 +-
 .../PrepareDisableKerberosServerAction.java     |    4 +-
 .../upgrades/AbstractUpgradeServerAction.java   |   42 +-
 .../upgrades/ComponentVersionCheckAction.java   |    7 +-
 .../upgrades/FinalizeUpgradeAction.java         |  330 ++---
 .../upgrades/UpdateDesiredStackAction.java      |  112 +-
 .../org/apache/ambari/server/state/Cluster.java |   28 +-
 .../org/apache/ambari/server/state/Service.java |   13 +-
 .../ambari/server/state/ServiceComponent.java   |   12 +-
 .../server/state/ServiceComponentHost.java      |   15 +-
 .../server/state/ServiceComponentImpl.java      |   42 +-
 .../ambari/server/state/ServiceFactory.java     |   25 +-
 .../apache/ambari/server/state/ServiceImpl.java |   73 +-
 .../ambari/server/state/UpgradeContext.java     |   38 +-
 .../ambari/server/state/UpgradeHelper.java      |   55 +-
 .../server/state/cluster/ClusterImpl.java       |  116 +-
 .../svccomphost/ServiceComponentHostImpl.java   |  101 +-
 .../ambari/server/topology/AmbariContext.java   |   11 +-
 .../server/upgrade/AbstractUpgradeCatalog.java  |    2 -
 .../server/upgrade/StackUpgradeHelper.java      |  171 ---
 .../ambari/server/upgrade/StackUpgradeUtil.java |  198 ---
 ambari-server/src/main/python/ambari-server.py  |    7 +-
 .../main/python/ambari_server/serverUpgrade.py  |  140 --
 .../main/python/ambari_server/setupActions.py   |    1 -
 .../main/resources/Ambari-DDL-Derby-CREATE.sql  |   13 +-
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  |   13 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql |   13 +-
 .../resources/Ambari-DDL-Postgres-CREATE.sql    |   14 +-
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql |   13 +-
 .../resources/Ambari-DDL-SQLServer-CREATE.sql   |   13 +-
 .../src/main/resources/properties.json          |    6 +-
 .../server/agent/DummyHeartbeatConstants.java   |    1 +
 .../server/agent/HeartbeatProcessorTest.java    |  149 +--
 .../server/agent/HeartbeatTestHelper.java       |   16 +-
 .../server/agent/TestHeartbeatHandler.java      |   55 +-
 .../server/agent/TestHeartbeatMonitor.java      |   29 +-
 .../ComponentVersionAlertRunnableTest.java      |   28 +-
 .../resources/BaseResourceDefinitionTest.java   |   22 +-
 .../server/api/services/AmbariMetaInfoTest.java |   16 +-
 .../configuration/RecoveryConfigHelperTest.java |   50 +-
 .../AmbariCustomCommandExecutionHelperTest.java |   45 +-
 .../AmbariManagementControllerTest.java         | 1252 +++++-------------
 .../BackgroundCustomCommandExecutionTest.java   |   24 +-
 .../server/controller/KerberosHelperTest.java   |   20 -
 ...hYarnCapacitySchedulerReleaseConfigTest.java |   74 +-
 .../AbstractControllerResourceProviderTest.java |    9 +-
 .../internal/AbstractResourceProviderTest.java  |   31 +-
 .../ClientConfigResourceProviderTest.java       |    8 +-
 ...ClusterStackVersionResourceProviderTest.java |  319 -----
 .../HostComponentResourceProviderTest.java      |   45 +-
 .../internal/HostResourceProviderTest.java      |   26 +-
 .../internal/JMXHostProviderTest.java           |   34 +-
 .../controller/internal/RequestImplTest.java    |    3 +-
 .../internal/ServiceResourceProviderTest.java   |   65 +-
 .../StackUpgradeConfigurationMergeTest.java     |   20 +-
 .../UpgradeResourceProviderHDP22Test.java       |   13 +-
 .../internal/UpgradeResourceProviderTest.java   |   69 +-
 .../UpgradeSummaryResourceProviderTest.java     |    3 +-
 .../GeneralServiceCalculatedStateTest.java      |   11 +-
 .../apache/ambari/server/events/EventsTest.java |   17 +-
 .../HostVersionOutOfSyncListenerTest.java       |   34 +-
 .../upgrade/StackVersionListenerTest.java       |    4 -
 .../apache/ambari/server/orm/OrmTestHelper.java |   16 +-
 .../ComponentVersionCheckActionTest.java        |   75 +-
 .../upgrades/ConfigureActionTest.java           |   19 +-
 .../upgrades/UpgradeActionTest.java             |  131 +-
 .../server/state/ServiceComponentTest.java      |  124 +-
 .../apache/ambari/server/state/ServiceTest.java |   74 +-
 .../ambari/server/state/UpgradeHelperTest.java  |   98 +-
 .../state/alerts/AlertEventPublisherTest.java   |   11 +-
 .../state/alerts/InitialAlertEventTest.java     |   16 +-
 .../state/cluster/ClusterDeadlockTest.java      |    9 +-
 .../server/state/cluster/ClusterImplTest.java   |   15 +-
 .../server/state/cluster/ClusterTest.java       |  106 +-
 .../state/cluster/ClustersDeadlockTest.java     |   11 +-
 .../server/state/cluster/ClustersTest.java      |   11 +-
 .../ConcurrentServiceConfigVersionTest.java     |    9 +-
 ...omponentHostConcurrentWriteDeadlockTest.java |   16 +-
 .../svccomphost/ServiceComponentHostTest.java   |   57 +-
 .../server/upgrade/StackUpgradeUtilTest.java    |  145 --
 .../server/upgrade/UpgradeCatalog200Test.java   |   11 +-
 .../server/upgrade/UpgradeCatalog210Test.java   |    7 +-
 .../server/upgrade/UpgradeCatalog220Test.java   |    2 -
 .../server/upgrade/UpgradeCatalogHelper.java    |   24 +-
 .../ambari/server/utils/StageUtilsTest.java     |   25 -
 .../src/test/python/TestAmbariServer.py         |  166 +--
 .../src/test/python/TestServerUpgrade.py        |   35 +-
 108 files changed, 2148 insertions(+), 3966 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index da47351..f54a510 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -290,6 +290,13 @@ class Script(object):
     if OSCheck.is_windows_family():
       reload_windows_env()
 
+    # !!! status commands re-use structured output files; if the status command doesn't update the
+    # the file (because it doesn't have to) then we must ensure that the file is reset to prevent
+    # old, stale structured output from a prior status command from being used
+    if self.command_name == "status":
+      Script.structuredOut = {}
+      self.put_structured_out({})
+
     try:
       with open(self.command_data_file) as f:
         pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
index c1028dc..17e1f9c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatProcessor.java
@@ -375,8 +375,9 @@ public class HeartbeatProcessor extends AbstractService{
       }
 
       LOG.debug("Received command report: " + report);
+
+      // get this locally; don't touch the database
       Host host = clusterFsm.getHost(hostname);
-//      HostEntity hostEntity = hostDAO.findByName(hostname); //don't touch database
       if (host == null) {
         LOG.error("Received a command report and was unable to retrieve Host for hostname = " + hostname);
         continue;
@@ -473,7 +474,8 @@ public class HeartbeatProcessor extends AbstractService{
           if (report.getStatus().equals(HostRoleStatus.COMPLETED.toString())) {
 
             // Reading component version if it is present
-            if (StringUtils.isNotBlank(report.getStructuredOut())) {
+            if (StringUtils.isNotBlank(report.getStructuredOut())
+                && !StringUtils.equals("{}", report.getStructuredOut())) {
               ComponentVersionStructuredOut structuredOutput = null;
               try {
                 structuredOutput = gson.fromJson(report.getStructuredOut(), ComponentVersionStructuredOut.class);
@@ -491,10 +493,7 @@ public class HeartbeatProcessor extends AbstractService{
               versionEventPublisher.publish(event);
             }
 
-            // Updating stack version, if needed (this is not actually for express/rolling upgrades!)
-            if (scHost.getState().equals(org.apache.ambari.server.state.State.UPGRADING)) {
-              scHost.setStackVersion(scHost.getDesiredStackVersion());
-            } else if ((report.getRoleCommand().equals(RoleCommand.START.toString()) ||
+            if ((report.getRoleCommand().equals(RoleCommand.START.toString()) ||
                 (report.getRoleCommand().equals(RoleCommand.CUSTOM_COMMAND.toString()) &&
                     ("START".equals(report.getCustomCommand()) ||
                         "RESTART".equals(report.getCustomCommand()))))
@@ -616,10 +615,6 @@ public class HeartbeatProcessor extends AbstractService{
                 }
               }
 
-              if (null != status.getStackVersion() && !status.getStackVersion().isEmpty()) {
-                scHost.setStackVersion(gson.fromJson(status.getStackVersion(), StackId.class));
-              }
-
               if (null != status.getConfigTags()) {
                 scHost.updateActualConfigs(status.getConfigTags());
               }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java b/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java
index 7dfbe47..d275eb2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/alerts/ComponentVersionAlertRunnable.java
@@ -28,7 +28,6 @@ import java.util.TreeMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.state.Alert;
@@ -36,9 +35,10 @@ import org.apache.ambari.server.state.AlertState;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.State;
 import org.apache.commons.lang.StringUtils;
 
 import com.google.inject.Inject;
@@ -75,17 +75,6 @@ public class ComponentVersionAlertRunnable extends AlertRunnable {
    */
   private static final String MISMATCHED_VERSIONS_MSG = "The following components are reporting unexpected versions: ";
 
-  /**
-   * The message when there is no CURRENT cluster version, but the cluster is
-   * still being setup.
-   */
-  private static final String CLUSTER_PROVISIONING_MSG = "The cluster is currently being provisioned. This alert will be skipped.";
-
-  /**
-   * The message when there is no CURRENT cluster version.
-   */
-  private static final String CLUSTER_OUT_OF_SYNC_MSG = "The cluster's CURRENT version could not be determined.";
-
   @Inject
   private AmbariMetaInfo m_metaInfo;
 
@@ -102,7 +91,7 @@ public class ComponentVersionAlertRunnable extends AlertRunnable {
    * {@inheritDoc}
    */
   @Override
-  List<Alert> execute(Cluster cluster, AlertDefinitionEntity myDefinition) {
+  List<Alert> execute(Cluster cluster, AlertDefinitionEntity myDefinition) throws AmbariException {
     // if there is an upgrade in progress, then skip running this alert
     UpgradeEntity upgrade = cluster.getUpgradeInProgress();
     if (null != upgrade) {
@@ -115,27 +104,15 @@ public class ComponentVersionAlertRunnable extends AlertRunnable {
     TreeMap<Host, Set<ServiceComponentHost>> versionMismatches = new TreeMap<>();
     Collection<Host> hosts = cluster.getHosts();
 
-    // no cluster version is very bad ...
-    ClusterVersionEntity clusterVersionEntity = cluster.getCurrentClusterVersion();
-    if (null == clusterVersionEntity) {
-      if (cluster.getProvisioningState() == State.INIT
-          || cluster.getAllClusterVersions().size() == 1) {
-        return Collections.singletonList(
-            buildAlert(cluster, myDefinition, AlertState.SKIPPED, CLUSTER_PROVISIONING_MSG));
-      } else {
-        return Collections.singletonList(
-            buildAlert(cluster, myDefinition, AlertState.CRITICAL, CLUSTER_OUT_OF_SYNC_MSG));
-      }
-    }
-
-    RepositoryVersionEntity repositoryVersionEntity = clusterVersionEntity.getRepositoryVersion();
-    String clusterVersion = repositoryVersionEntity.getVersion();
-
     for (Host host : hosts) {
-      List<ServiceComponentHost> hostComponents = cluster.getServiceComponentHosts(
-          host.getHostName());
+      List<ServiceComponentHost> hostComponents = cluster.getServiceComponentHosts(host.getHostName());
       for (ServiceComponentHost hostComponent : hostComponents) {
-        StackId desiredStackId = hostComponent.getDesiredStackVersion();
+        Service service = cluster.getService(hostComponent.getServiceName());
+        ServiceComponent serviceComponent = service.getServiceComponent(hostComponent.getServiceComponentName());
+
+        RepositoryVersionEntity desiredRepositoryVersion = service.getDesiredRepositoryVersion();
+        StackId desiredStackId = serviceComponent.getDesiredStackVersion();
+        String desiredVersion = desiredRepositoryVersion.getVersion();
 
         final ComponentInfo componentInfo;
         try {
@@ -157,7 +134,7 @@ public class ComponentVersionAlertRunnable extends AlertRunnable {
         }
 
         String version = hostComponent.getVersion();
-        if (!StringUtils.equals(version, clusterVersion)) {
+        if (!StringUtils.equals(version, desiredVersion)) {
           Set<ServiceComponentHost> mismatchedComponents = versionMismatches.get(host);
           if (null == mismatchedComponents) {
             mismatchedComponents = new HashSet<>();

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
index b2a03e4..f8e9e14 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java
@@ -57,6 +57,7 @@ import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
 import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 import org.apache.ambari.server.orm.entities.MetainfoEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
 import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.State;
@@ -165,7 +166,7 @@ public class DatabaseConsistencyCheckHelper {
       LOG.error("Exception occurred during connection close procedure: ", e);
     }
   }
-  
+
   public static DatabaseConsistencyCheckResult runAllDBChecks(boolean fixIssues) throws Throwable {
     LOG.info("******************************* Check database started *******************************");
     try {
@@ -693,6 +694,8 @@ public class DatabaseConsistencyCheckHelper {
     }
 
     for (HostComponentDesiredStateEntity hostComponentDesiredStateEntity : missedHostComponentDesiredStates) {
+      ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = hostComponentDesiredStateEntity.getServiceComponentDesiredStateEntity();
+
       HostComponentStateEntity stateEntity = new HostComponentStateEntity();
       stateEntity.setClusterId(hostComponentDesiredStateEntity.getClusterId());
       stateEntity.setComponentName(hostComponentDesiredStateEntity.getComponentName());
@@ -701,7 +704,6 @@ public class DatabaseConsistencyCheckHelper {
       stateEntity.setHostEntity(hostComponentDesiredStateEntity.getHostEntity());
       stateEntity.setCurrentState(State.UNKNOWN);
       stateEntity.setUpgradeState(UpgradeState.NONE);
-      stateEntity.setCurrentStack(hostComponentDesiredStateEntity.getDesiredStack());
       stateEntity.setSecurityState(SecurityState.UNKNOWN);
       stateEntity.setServiceComponentDesiredStateEntity(hostComponentDesiredStateEntity.getServiceComponentDesiredStateEntity());
 
@@ -717,7 +719,6 @@ public class DatabaseConsistencyCheckHelper {
       stateEntity.setServiceName(missedHostComponentState.getServiceName());
       stateEntity.setHostEntity(missedHostComponentState.getHostEntity());
       stateEntity.setDesiredState(State.UNKNOWN);
-      stateEntity.setDesiredStack(missedHostComponentState.getCurrentStack());
       stateEntity.setServiceComponentDesiredStateEntity(missedHostComponentState.getServiceComponentDesiredStateEntity());
 
       LOG.error("Trying to add missing record in hostcomponentdesiredstate: {}", stateEntity);

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 04b72ea..8995e51 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -697,8 +697,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         sch.setDesiredState(state);
       }
 
-      sch.setDesiredStackVersion(sc.getDesiredStackVersion());
-
       schMap.put(cluster, sch);
     }
 
@@ -2741,6 +2739,9 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
           for (ServiceComponentHost scHost :
               changedScHosts.get(compName).get(newState)) {
 
+            Service service = cluster.getService(scHost.getServiceName());
+            ServiceComponent serviceComponent = service.getServiceComponent(compName);
+
             // Do not create role command for hosts that are not responding
             if (scHost.getHostState().equals(HostState.HEARTBEAT_LOST)) {
               LOG.info("Command is not created for servicecomponenthost "
@@ -2779,7 +2780,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
                     event = new ServiceComponentHostInstallEvent(
                         scHost.getServiceComponentName(), scHost.getHostName(),
                         nowTimestamp,
-                        scHost.getDesiredStackVersion().getStackId());
+                        serviceComponent.getDesiredStackVersion().getStackId());
                   }
                 } else if (oldSchState == State.STARTED
                       // TODO: oldSchState == State.INSTALLED is always false, looks like a bug
@@ -2793,7 +2794,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
                   roleCommand = RoleCommand.UPGRADE;
                   event = new ServiceComponentHostUpgradeEvent(
                       scHost.getServiceComponentName(), scHost.getHostName(),
-                      nowTimestamp, scHost.getDesiredStackVersion().getStackId());
+                      nowTimestamp, serviceComponent.getDesiredStackVersion().getStackId());
                 } else {
                   throw new AmbariException("Invalid transition for"
                       + " servicecomponenthost"
@@ -2807,7 +2808,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
                 }
                 break;
               case STARTED:
-                StackId stackId = scHost.getDesiredStackVersion();
+                StackId stackId = serviceComponent.getDesiredStackVersion();
                 ComponentInfo compInfo = ambariMetaInfo.getComponent(
                     stackId.getStackName(), stackId.getStackVersion(), scHost.getServiceName(),
                     scHost.getServiceComponentName());

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
index 5b6c739..2cf3909 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
@@ -118,20 +118,6 @@ public class ServiceComponentHostRequest {
   }
 
   /**
-   * @return the desiredStackId
-   */
-  public String getDesiredStackId() {
-    return desiredStackId;
-  }
-
-  /**
-   * @param desiredStackId the desiredStackId to set
-   */
-  public void setDesiredStackId(String desiredStackId) {
-    this.desiredStackId = desiredStackId;
-  }
-
-  /**
    * @return the clusterName
    */
   public String getClusterName() {
@@ -156,7 +142,7 @@ public class ServiceComponentHostRequest {
    * @return Stale config indicator
    */
   public String getStaleConfig() {
-    return this.staleConfig;
+    return staleConfig;
   }
 
   /**
@@ -170,9 +156,10 @@ public class ServiceComponentHostRequest {
    * @return the admin state of the component
    */
   public String getAdminState() {
-    return this.adminState;
+    return adminState;
   }
 
+  @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
     sb.append("{" + " clusterName=").append(clusterName)

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
index f6993f8..08acff7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
@@ -35,21 +35,19 @@ public class ServiceComponentHostResponse {
   // type -> desired config
   private Map<String, HostConfig> actualConfigs;
   private String liveState;
-  private String stackVersion;
+  private String version;
   private String desiredStackVersion;
+  private String desiredRepositoryVersion;
   private String desiredState;
   private boolean staleConfig = false;
   private String adminState = null;
   private String maintenanceState = null;
   private UpgradeState upgradeState = UpgradeState.NONE;
 
-
-  public ServiceComponentHostResponse(String clusterName, String serviceName,
-                                      String componentName, String displayName,
-                                      String hostname, String publicHostname,
-                                      String liveState, String stackVersion,
-                                      String desiredState, String desiredStackVersion,
-                                      HostComponentAdminState adminState) {
+  public ServiceComponentHostResponse(String clusterName, String serviceName, String componentName,
+      String displayName, String hostname, String publicHostname, String liveState, String version,
+      String desiredState, String desiredStackVersion, String desiredRepositoryVersion,
+      HostComponentAdminState adminState) {
     this.clusterName = clusterName;
     this.serviceName = serviceName;
     this.componentName = componentName;
@@ -57,9 +55,10 @@ public class ServiceComponentHostResponse {
     this.hostname = hostname;
     this.publicHostname = publicHostname;
     this.liveState = liveState;
-    this.stackVersion = stackVersion;
+    this.version = version;
     this.desiredState = desiredState;
     this.desiredStackVersion = desiredStackVersion;
+    this.desiredRepositoryVersion = desiredRepositoryVersion;
     if (adminState != null) {
       this.adminState = adminState.name();
     }
@@ -143,17 +142,10 @@ public class ServiceComponentHostResponse {
   }
 
   /**
-   * @return the stackVersion
-   */
-  public String getStackVersion() {
-    return stackVersion;
-  }
-
-  /**
-   * @param stackVersion the stackVersion to set
+   * @return the version
    */
-  public void setStackVersion(String stackVersion) {
-    this.stackVersion = stackVersion;
+  public String getVersion() {
+    return version;
   }
 
   /**
@@ -185,6 +177,15 @@ public class ServiceComponentHostResponse {
   }
 
   /**
+   * Gets the desired repository of the component.
+   *
+   * @return the desired repository.
+   */
+  public String getDesiredRepositoryVersion() {
+    return desiredRepositoryVersion;
+  }
+
+  /**
    * @return the clusterName
    */
   public String getClusterName() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
index 6c0d4ea..66c1a93 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
@@ -28,17 +28,23 @@ public class ServiceRequest {
   private String credentialStoreEnabled; // CREATE/UPDATE/GET
   private String credentialStoreSupported; //GET
 
-  public ServiceRequest(String clusterName, String serviceName,
-                        String desiredState) {
-    this(clusterName, serviceName, desiredState, null);
+  private String desiredStack;
+  private String desiredRepositoryVersion;
+
+  public ServiceRequest(String clusterName, String serviceName, String desiredStack,
+      String desiredRepositoryVersion, String desiredState) {
+    this(clusterName, serviceName, desiredStack, desiredRepositoryVersion, desiredState, null);
   }
 
-  public ServiceRequest(String clusterName, String serviceName,
-                        String desiredState,
-                        String credentialStoreEnabled) {
+  public ServiceRequest(String clusterName, String serviceName, String desiredStack,
+      String desiredRepositoryVersion, String desiredState, String credentialStoreEnabled) {
     this.clusterName = clusterName;
     this.serviceName = serviceName;
     this.desiredState = desiredState;
+
+    this.desiredStack = desiredStack;
+    this.desiredRepositoryVersion = desiredRepositoryVersion;
+
     this.credentialStoreEnabled = credentialStoreEnabled;
     // Credential store supported cannot be changed after
     // creation since it comes from the stack definition.
@@ -73,6 +79,14 @@ public class ServiceRequest {
     this.desiredState = desiredState;
   }
 
+  public String getDesiredStack() {
+    return desiredStack;
+  }
+
+  public String getDesiredRepositoryVersion() {
+    return desiredRepositoryVersion;
+  }
+
   /**
    * @return the clusterName
    */
@@ -130,6 +144,7 @@ public class ServiceRequest {
     this.credentialStoreSupported = credentialStoreSupported;
   }
 
+  @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
     sb.append("clusterName=").append(clusterName)

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
index 3e35c0c..e67d124f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceResponse.java
@@ -25,25 +25,26 @@ public class ServiceResponse {
   private String clusterName;
   private String serviceName;
   private String desiredStackVersion;
+  private String desiredRepositoryVersion;
   private String desiredState;
   private String maintenanceState;
   private boolean credentialStoreSupported;
   private boolean credentialStoreEnabled;
 
-  public ServiceResponse(Long clusterId, String clusterName,
-                         String serviceName,
-                         String desiredStackVersion, String desiredState,
-                         boolean credentialStoreSupported, boolean credentialStoreEnabled) {
+  public ServiceResponse(Long clusterId, String clusterName, String serviceName,
+      String desiredStackVersion, String desiredRepositoryVersion, String desiredState,
+      boolean credentialStoreSupported, boolean credentialStoreEnabled) {
     this.clusterId = clusterId;
     this.clusterName = clusterName;
     this.serviceName = serviceName;
-    this.setDesiredStackVersion(desiredStackVersion);
-    this.setDesiredState(desiredState);
+    setDesiredStackVersion(desiredStackVersion);
+    setDesiredState(desiredState);
+    this.desiredRepositoryVersion = desiredRepositoryVersion;
     this.credentialStoreSupported = credentialStoreSupported;
     this.credentialStoreEnabled = credentialStoreEnabled;
   }
-  
-  
+
+
 
   /**
    * @return the serviceName
@@ -115,10 +116,23 @@ public class ServiceResponse {
     this.desiredStackVersion = desiredStackVersion;
   }
 
+  /**
+   * Gets the desired repository version.
+   *
+   * @return the desired repository version.
+   */
+  public String getDesiredRepositoryVersion() {
+    return desiredRepositoryVersion;
+  }
+
   @Override
   public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
 
     ServiceResponse that = (ServiceResponse) o;
 
@@ -137,11 +151,11 @@ public class ServiceResponse {
 
     return true;
   }
-  
+
   public void setMaintenanceState(String state) {
     maintenanceState = state;
   }
-  
+
   public String getMaintenanceState() {
     return maintenanceState;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index c3e66fc..5c89ced 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -35,14 +35,10 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.StaticallyInject;
 import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
-import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.actionmanager.RequestFactory;
 import org.apache.ambari.server.actionmanager.Stage;
 import org.apache.ambari.server.actionmanager.StageFactory;
-import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.ActionExecutionContext;
@@ -689,132 +685,6 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
     return requestStages;
   }
 
-  /**
-   * The only appliance of this method is triggering Finalize during
-   * manual Stack Upgrade
-   */
-  @Override
-  public RequestStatus updateResourcesAuthorized(Request request, Predicate predicate)
-      throws SystemException, UnsupportedPropertyException,
-      NoSuchResourceException, NoSuchParentResourceException {
-    try {
-      Iterator<Map<String, Object>> iterator = request.getProperties().iterator();
-      String clName;
-      final String desiredRepoVersion;
-      if (request.getProperties().size() != 1) {
-        throw new UnsupportedOperationException("Multiple requests cannot be executed at the same time.");
-      }
-      Map<String, Object> propertyMap = iterator.next();
-
-      Set<String> requiredProperties = new HashSet<>();
-      requiredProperties.add(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
-      requiredProperties.add(CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID);
-      requiredProperties.add(CLUSTER_STACK_VERSION_STATE_PROPERTY_ID);
-
-      for (String requiredProperty : requiredProperties) {
-        if (!propertyMap.containsKey(requiredProperty)) {
-          throw new IllegalArgumentException(
-                  String.format("The required property %s is not defined",
-                          requiredProperty));
-        }
-      }
-
-      clName = (String) propertyMap.get(CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID);
-      String desiredDisplayRepoVersion = (String) propertyMap.get(CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID);
-      RepositoryVersionEntity rve = repositoryVersionDAO.findByDisplayName(desiredDisplayRepoVersion);
-      if (rve == null) {
-        throw new IllegalArgumentException(
-                  String.format("Repository version with display name %s does not exist",
-                          desiredDisplayRepoVersion));
-      }
-      desiredRepoVersion = rve.getVersion();
-      String newStateStr = (String) propertyMap.get(CLUSTER_STACK_VERSION_STATE_PROPERTY_ID);
-
-      LOG.info("Initiating finalization for manual upgrade to version {} for cluster {}",
-              desiredRepoVersion, clName);
-
-      // First, set desired cluster stack version to enable cross-stack upgrade
-      StackId stackId = rve.getStackId();
-      Cluster cluster = getManagementController().getClusters().getCluster(clName);
-      cluster.setDesiredStackVersion(stackId);
-
-      String forceCurrent = (String) propertyMap.get(CLUSTER_STACK_VERSION_FORCE);
-      boolean force = false;
-      if (null != forceCurrent) {
-        force = Boolean.parseBoolean(forceCurrent);
-      }
-
-      if (!force) {
-        Map<String, String> args = new HashMap<>();
-        if (newStateStr.equals(RepositoryVersionState.CURRENT.toString())) {
-          // Finalize upgrade workflow
-          args.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
-        } else if (newStateStr.equals(RepositoryVersionState.INSTALLED.toString())) {
-          // Finalize downgrade workflow
-          args.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "downgrade");
-        } else {
-          throw new IllegalArgumentException(
-            String.format("Invalid desired state %s. Should be either CURRENT or INSTALLED",
-                    newStateStr));
-        }
-
-        // Get a host name to populate the hostrolecommand table's hostEntity.
-        String defaultHostName;
-        ArrayList<Host> hosts = new ArrayList<>(cluster.getHosts());
-        if (!hosts.isEmpty()) {
-          Collections.sort(hosts);
-          defaultHostName = hosts.get(0).getHostName();
-        } else {
-          throw new AmbariException("Could not find at least one host to set the command for");
-        }
-
-        args.put(FinalizeUpgradeAction.VERSION_KEY, desiredRepoVersion);
-        args.put(FinalizeUpgradeAction.CLUSTER_NAME_KEY, clName);
-
-        ExecutionCommand command = new ExecutionCommand();
-        command.setCommandParams(args);
-        command.setClusterName(clName);
-        finalizeUpgradeAction.setExecutionCommand(command);
-
-        HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(defaultHostName,
-                Role.AMBARI_SERVER_ACTION, null, null);
-        finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand);
-
-        CommandReport report = finalizeUpgradeAction.execute(null);
-
-        LOG.info("Finalize output:");
-        LOG.info("STDOUT: {}", report.getStdOut());
-        LOG.info("STDERR: {}", report.getStdErr());
-
-        if (report.getStatus().equals(HostRoleStatus.COMPLETED.toString())) {
-          return getRequestStatus(null);
-        } else {
-          String detailedOutput = "Finalization failed. More details: \n" +
-                  "STDOUT: " + report.getStdOut() + "\n" +
-                  "STDERR: " + report.getStdErr();
-          throw new SystemException(detailedOutput);
-        }
-      } else {
-        // !!! revisit for PU
-        // If forcing to become CURRENT, get the Cluster Version whose state is CURRENT and make sure that
-        // the Host Version records for the same Repo Version are also marked as CURRENT.
-        @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES)
-        ClusterVersionEntity current = cluster.getCurrentClusterVersion();
-
-        if (!current.getRepositoryVersion().equals(rve)) {
-          updateVersionStates(current.getClusterId(), current.getRepositoryVersion(), rve);
-        }
-
-
-        return getRequestStatus(null);
-      }
-    } catch (AmbariException e) {
-      throw new SystemException("Cannot perform request", e);
-    } catch (InterruptedException e) {
-      throw new SystemException("Cannot perform request", e);
-    }
-  }
-
   @Override
   public RequestStatus deleteResourcesAuthorized(Request request, Predicate predicate)
       throws SystemException, UnsupportedPropertyException,

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
index ff8d0be..3f4e7c2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ComponentResourceProvider.java
@@ -352,7 +352,7 @@ public class ComponentResourceProvider extends AbstractControllerResourceProvide
       Cluster cluster = clusters.getCluster(request.getClusterName());
       Service s = cluster.getService(request.getServiceName());
       ServiceComponent sc = serviceComponentFactory.createNew(s, request.getComponentName());
-      sc.setDesiredStackVersion(s.getDesiredStackVersion());
+      sc.setDesiredRepositoryVersion(s.getDesiredRepositoryVersion());
 
       if (StringUtils.isNotEmpty(request.getDesiredState())) {
         State state = State.valueOf(request.getDesiredState());

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
index 46e791b..10bd7ac 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
@@ -50,7 +50,6 @@ import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.orm.dao.HostVersionDAO;
-import org.apache.ambari.server.orm.entities.HostVersionEntity;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.security.authorization.AuthorizationHelper;
 import org.apache.ambari.server.security.authorization.ResourceType;
@@ -97,10 +96,12 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
       = PropertyHelper.getPropertyId("HostRoles", "state");
   public static final String HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID
       = PropertyHelper.getPropertyId("HostRoles", "desired_state");
-  public static final String HOST_COMPONENT_STACK_ID_PROPERTY_ID
-      = PropertyHelper.getPropertyId("HostRoles", "stack_id");
+  public static final String HOST_COMPONENT_VERSION_PROPERTY_ID
+      = PropertyHelper.getPropertyId("HostRoles", "version");
   public static final String HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID
       = PropertyHelper.getPropertyId("HostRoles", "desired_stack_id");
+  public static final String HOST_COMPONENT_DESIRED_REPOSITORY_VERSION
+    = PropertyHelper.getPropertyId("HostRoles", "desired_repository_version");
   public static final String HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID
       = PropertyHelper.getPropertyId("HostRoles", "actual_configs");
   public static final String HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID
@@ -109,8 +110,6 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
       = PropertyHelper.getPropertyId("HostRoles", "desired_admin_state");
   public static final String HOST_COMPONENT_MAINTENANCE_STATE_PROPERTY_ID
       = "HostRoles/maintenance_state";
-  public static final String HOST_COMPONENT_HDP_VERSION_PROPERTY_ID
-      = PropertyHelper.getPropertyId("HostRoles", "hdp_version");
   public static final String HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID = "HostRoles/upgrade_state";
 
   //Parameters from the predicate
@@ -237,8 +236,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
               response.getLiveState(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID,
               response.getDesiredState(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_STACK_ID_PROPERTY_ID,
-              response.getStackVersion(), requestedIds);
+      setResourceProperty(resource, HOST_COMPONENT_VERSION_PROPERTY_ID, response.getVersion(),
+          requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID,
               response.getDesiredStackVersion(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID,
@@ -247,15 +246,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
               response.isStaleConfig(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_UPGRADE_STATE_PROPERTY_ID,
               response.getUpgradeState(), requestedIds);
-
-      if (requestedIds.contains(HOST_COMPONENT_HDP_VERSION_PROPERTY_ID)) {
-        HostVersionEntity versionEntity = hostVersionDAO.
-            findByHostAndStateCurrent(response.getClusterName(), response.getHostname());
-        if (versionEntity != null) {
-          setResourceProperty(resource, HOST_COMPONENT_HDP_VERSION_PROPERTY_ID,
-              versionEntity.getRepositoryVersion().getDisplayName(), requestedIds);
-        }
-      }
+      setResourceProperty(resource, HOST_COMPONENT_DESIRED_REPOSITORY_VERSION,
+          response.getDesiredRepositoryVersion(), requestedIds);
 
       if (response.getAdminState() != null) {
         setResourceProperty(resource, HOST_COMPONENT_DESIRED_ADMIN_STATE_PROPERTY_ID,
@@ -684,7 +676,6 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
         (String) properties.get(HOST_COMPONENT_HOST_NAME_PROPERTY_ID),
         (String) properties.get(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID));
     serviceComponentHostRequest.setState((String) properties.get(HOST_COMPONENT_STATE_PROPERTY_ID));
-    serviceComponentHostRequest.setDesiredStackId((String) properties.get(HOST_COMPONENT_STACK_ID_PROPERTY_ID));
     if (properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID) != null) {
       serviceComponentHostRequest.setStaleConfig(
           properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID).toString().toLowerCase());
@@ -724,8 +715,6 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
     if (properties.get(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID) != null) {
       serviceComponentHostRequest.setDesiredState((String)properties.get(HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID));
     }
-    serviceComponentHostRequest.setDesiredStackId(
-            (String) properties.get(HOST_COMPONENT_STACK_ID_PROPERTY_ID));
     if (properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID) != null) {
       serviceComponentHostRequest.setStaleConfig(
               properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID).toString().toLowerCase());

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index 6556852..9cbcea6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@ -56,6 +56,8 @@ import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.controller.utilities.ServiceCalculatedStateFactory;
 import org.apache.ambari.server.controller.utilities.state.ServiceCalculatedState;
 import org.apache.ambari.server.metadata.RoleCommandOrder;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.security.authorization.AuthorizationHelper;
 import org.apache.ambari.server.security.authorization.ResourceType;
@@ -99,6 +101,9 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
   public static final String SERVICE_ATTRIBUTES_PROPERTY_ID = PropertyHelper.getPropertyId("Services", "attributes");
 
+  public static final String SERVICE_DESIRED_STACK_PROPERTY_ID = PropertyHelper.getPropertyId("ServiceInfo", "desired_stack");
+  public static final String SERVICE_DESIRED_REPO_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("ServiceInfo", "desired_repository_version");
+
   //Parameters from the predicate
   private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID =
     "params/run_smoke_test";
@@ -123,6 +128,11 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
   @Inject
   private KerberosHelper kerberosHelper;
 
+  /**
+   * Used to lookup the repository when creating services.
+   */
+  private final RepositoryVersionDAO repositoryVersionDAO;
+
   // ----- Constructors ----------------------------------------------------
 
   /**
@@ -134,11 +144,12 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
    */
   @AssistedInject
   public ServiceResourceProvider(@Assisted Set<String> propertyIds,
-                          @Assisted Map<Resource.Type, String> keyPropertyIds,
-                          @Assisted AmbariManagementController managementController,
-                          MaintenanceStateHelper maintenanceStateHelper) {
+      @Assisted Map<Resource.Type, String> keyPropertyIds,
+      @Assisted AmbariManagementController managementController,
+      MaintenanceStateHelper maintenanceStateHelper, RepositoryVersionDAO repositoryVersionDAO) {
     super(propertyIds, keyPropertyIds, managementController);
     this.maintenanceStateHelper = maintenanceStateHelper;
+    this.repositoryVersionDAO = repositoryVersionDAO;
 
     setRequiredCreateAuthorizations(EnumSet.of(RoleAuthorization.SERVICE_ADD_DELETE_SERVICES));
     setRequiredUpdateAuthorizations(RoleAuthorization.AUTHORIZATIONS_UPDATE_SERVICE);
@@ -207,6 +218,12 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
       setResourceProperty(resource, SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID,
           String.valueOf(response.isCredentialStoreEnabled()), requestedIds);
 
+      setResourceProperty(resource, SERVICE_DESIRED_STACK_PROPERTY_ID,
+          response.getDesiredStackVersion(), requestedIds);
+
+      setResourceProperty(resource, SERVICE_DESIRED_REPO_VERSION_PROPERTY_ID,
+          response.getDesiredRepositoryVersion(), requestedIds);
+
       Map<String, Object> serviceSpecificProperties = getServiceSpecificProperties(
           response.getClusterName(), response.getServiceName(), requestedIds);
 
@@ -328,9 +345,13 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
    * @return the service request object
    */
   private ServiceRequest getRequest(Map<String, Object> properties) {
+    String desiredStack = (String)properties.get(SERVICE_DESIRED_STACK_PROPERTY_ID);
+    String desiredRepositoryVersion = (String)properties.get(SERVICE_DESIRED_REPO_VERSION_PROPERTY_ID);
+
     ServiceRequest svcRequest = new ServiceRequest(
         (String) properties.get(SERVICE_CLUSTER_NAME_PROPERTY_ID),
         (String) properties.get(SERVICE_SERVICE_NAME_PROPERTY_ID),
+        desiredStack, desiredRepositoryVersion,
         (String) properties.get(SERVICE_SERVICE_STATE_PROPERTY_ID),
         (String) properties.get(SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID));
 
@@ -362,8 +383,20 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     for (ServiceRequest request : requests) {
       Cluster cluster = clusters.getCluster(request.getClusterName());
 
-      // Already checked that service does not exist
-      Service s = cluster.addService(request.getServiceName());
+
+      String desiredStack = request.getDesiredStack();
+      String desiredRepositoryVersion = request.getDesiredRepositoryVersion();
+      RepositoryVersionEntity repositoryVersion = null;
+      if( StringUtils.isNotBlank(desiredStack) && StringUtils.isNotBlank(desiredRepositoryVersion)){
+        repositoryVersion = repositoryVersionDAO.findByStackAndVersion(new StackId(desiredStack),
+            desiredRepositoryVersion);
+      }
+
+      if (null == repositoryVersion) {
+        repositoryVersion = cluster.getCurrentClusterVersion().getRepositoryVersion();
+      }
+
+      Service s = cluster.addService(request.getServiceName(), repositoryVersion);
 
       /**
        * Get the credential_store_supported field only from the stack definition.

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 623851a..b49b66e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -36,8 +36,6 @@ import java.util.Set;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.ambari.annotations.Experimental;
-import org.apache.ambari.annotations.ExperimentalFeature;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
@@ -92,8 +90,6 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.RepositoryType;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.StackInfo;
@@ -704,7 +700,9 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     UpgradeType upgradeType = upgradeContext.getType();
 
     ConfigHelper configHelper = getManagementController().getConfigHelper();
-    String userName = getManagementController().getAuthName();
+
+    // the upgrade context calculated these for us based on direction
+    StackId sourceStackId = upgradeContext.getOriginalStackId();
 
     // the version being upgraded or downgraded to (ie 2.2.1.0-1234)
     final String version = upgradeContext.getVersion();
@@ -721,20 +719,24 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
 
     switch (direction) {
       case UPGRADE:
-        StackId sourceStackId = cluster.getCurrentStackVersion();
-
-        RepositoryVersionEntity targetRepositoryVersion = s_repoVersionDAO.findByStackNameAndVersion(
-            sourceStackId.getStackName(), version);
+        RepositoryVersionEntity targetRepositoryVersion = upgradeContext.getTargetRepositoryVersion();
+        RepositoryType repositoryType = targetRepositoryVersion.getType();
 
         // !!! Consult the version definition and add the service names to supportedServices
-        if (targetRepositoryVersion.getType() != RepositoryType.STANDARD) {
+        if (repositoryType != RepositoryType.STANDARD) {
+          scope = UpgradeScope.PARTIAL;
+
           try {
             VersionDefinitionXml vdf = targetRepositoryVersion.getRepositoryXml();
             supportedServices.addAll(vdf.getAvailableServiceNames());
 
-            // !!! better not be, but just in case
-            if (!supportedServices.isEmpty()) {
-              scope = UpgradeScope.PARTIAL;
+            // if this is every true, then just stop the upgrade attempt and
+            // throw an exception
+            if (supportedServices.isEmpty()) {
+              String message = String.format(
+                  "When using a VDF of type %s, the available services must be defined in the VDF",
+                  targetRepositoryVersion.getType());
+              throw new AmbariException(message);
             }
 
           } catch (Exception e) {
@@ -820,10 +822,6 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     List<UpgradeGroupEntity> groupEntities = new ArrayList<>();
     RequestStageContainer req = createRequest(direction, version);
 
-    // the upgrade context calculated these for us based on direction
-    StackId sourceStackId = upgradeContext.getOriginalStackId();
-    StackId targetStackId = upgradeContext.getTargetStackId();
-
     /**
     During a Rolling Upgrade, change the desired Stack Id if jumping across
     major stack versions (e.g., HDP 2.2 -> 2.3), and then set config changes
@@ -837,7 +835,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     if (pack.getType() == UpgradeType.ROLLING) {
       // Desired configs must be set before creating stages because the config tag
       // names are read and set on the command for filling in later
-      applyStackAndProcessConfigurations(targetStackId.getStackName(), cluster, version, direction, pack, userName);
+      applyStackAndProcessConfigurations(upgradeContext);
+
+      // move component desired version and upgrade state
+      s_upgradeHelper.putComponentsToUpgradingState(upgradeContext);
     }
 
     // resolve or build a proper config upgrade pack - always start out with the config pack
@@ -847,26 +848,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     // HDP 2.2 to 2.4 should start with HDP 2.2 and merge in HDP 2.3's config-upgrade.xml
     ConfigUpgradePack configUpgradePack = ConfigurationPackBuilder.build(pack, sourceStackId);
 
-    // TODO: for now, all service components are transitioned to upgrading state
-    // TODO: When performing patch upgrade, we should only target supported services/components
-    // from upgrade pack
-    @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES)
-    Set<Service> services = new HashSet<>(cluster.getServices().values());
-
-    @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES)
-    Map<Service, Set<ServiceComponent>> targetComponents = new HashMap<>();
-    for (Service service: services) {
-      if (upgradeContext.isServiceSupported(service.getName())) {
-        Set<ServiceComponent> serviceComponents = new HashSet<>(service.getServiceComponents().values());
-        targetComponents.put(service, serviceComponents);
-      }
-    }
-
-    // !!! determine which stack to check for component isAdvertised
-    StackId componentStack = upgradeContext.getDirection() == Direction.UPGRADE ?
-        upgradeContext.getTargetStackId() : upgradeContext.getOriginalStackId();
-    s_upgradeHelper.putComponentsToUpgradingState(version, targetComponents, componentStack);
-
+    // create the upgrade and request
     for (UpgradeGroupHolder group : groups) {
       boolean skippable = group.skippable;
       boolean supportsAutoSkipOnFailure = group.supportsAutoSkipOnFailure;
@@ -1015,23 +997,19 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
    * fail due to missing configurations.</li>
    * </ul>
    *
-   *
-   * @param stackName Stack name such as HDP, HDPWIN, BIGTOP
-   * @param cluster
-   *          the cluster
-   * @param version
-   *          the version
-   * @param direction
-   *          upgrade or downgrade
-   * @param upgradePack
-   *          upgrade pack used for upgrade or downgrade. This is needed to determine
-   *          which services are effected.
-   * @param userName
-   *          username performing the action
+   * @param upgradeContext  the upgrade context (not {@code null}).
    * @throws AmbariException
    */
-  public void applyStackAndProcessConfigurations(String stackName, Cluster cluster, String version, Direction direction, UpgradePack upgradePack, String userName)
+  public void applyStackAndProcessConfigurations(UpgradeContext upgradeContext)
     throws AmbariException {
+
+    Cluster cluster = upgradeContext.getCluster();
+    Direction direction = upgradeContext.getDirection();
+    UpgradePack upgradePack = upgradeContext.getUpgradePack();
+    String stackName = upgradeContext.getTargetStackId().getStackName();
+    String version = upgradeContext.getVersion();
+    String userName = getManagementController().getAuthName();
+
     RepositoryVersionEntity targetRve = s_repoVersionDAO.findByStackNameAndVersion(stackName, version);
     if (null == targetRve) {
       LOG.info("Could not find version entity for {}; not setting new configs", version);
@@ -1255,7 +1233,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
 
     // !!! update the stack
     cluster.setDesiredStackVersion(
-        new StackId(targetStack.getStackName(), targetStack.getStackVersion()), true);
+        new StackId(targetStack.getStackName(), targetStack.getStackVersion()));
 
     // !!! configs must be created after setting the stack version
     if (null != newConfigurationsByType) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/DistributeRepositoriesActionListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/DistributeRepositoriesActionListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/DistributeRepositoriesActionListener.java
index cd82957..3fda160 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/DistributeRepositoriesActionListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/DistributeRepositoriesActionListener.java
@@ -34,6 +34,7 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -118,10 +119,11 @@ public class DistributeRepositoriesActionListener {
 
         // Handle the case in which the version to install did not contain the build number,
         // but the structured output does contain the build number.
-        if (null != structuredOutput.getActualVersion() && !structuredOutput.getActualVersion().isEmpty() &&
-            null != structuredOutput.getInstalledRepositoryVersion() && !structuredOutput.getInstalledRepositoryVersion().isEmpty() &&
-            null != structuredOutput.getStackId() && !structuredOutput.getStackId().isEmpty() &&
-            !structuredOutput.getActualVersion().equals(structuredOutput.getInstalledRepositoryVersion())) {
+        if (!StringUtils.isEmpty(structuredOutput.getActualVersion())
+            && !StringUtils.isEmpty(structuredOutput.getInstalledRepositoryVersion())
+            && !StringUtils.isEmpty(structuredOutput.getStackId())
+            && !StringUtils.equals(structuredOutput.getActualVersion(),
+                structuredOutput.getInstalledRepositoryVersion())) {
 
           // !!! getInstalledRepositoryVersion() from the agent is the one
           // entered in the UI.  getActualVersion() is computed.

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
index abf8e6b..33c622f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/StackVersionListener.java
@@ -184,7 +184,6 @@ public class StackVersionListener {
   private void processUnknownDesiredVersion(Cluster cluster, ServiceComponent sc,
                                             ServiceComponentHost sch,
                                             String newVersion) throws AmbariException {
-    sc.setDesiredVersion(newVersion);
     sch.setUpgradeState(UpgradeState.NONE);
     sch.setVersion(newVersion);
     bootstrapVersion(cluster, sch);
@@ -205,8 +204,8 @@ public class StackVersionListener {
     if (upgradeState == UpgradeState.IN_PROGRESS) {
       // Component status update is received during upgrade process
       if (desiredVersion.equals(newVersion)) {
-        sch.setUpgradeState(UpgradeState.COMPLETE);  // Component upgrade confirmed
-        sch.setStackVersion(cluster.getDesiredStackVersion());
+        // Component upgrade confirmed
+        sch.setUpgradeState(UpgradeState.COMPLETE);
       } else { // Unexpected (wrong) version received
         // Even during failed upgrade, we should not receive wrong version
         // That's why mark as VERSION_MISMATCH

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java
index 930a535..3871b67 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostVersionDAO.java
@@ -275,9 +275,31 @@ public class HostVersionDAO extends CrudDAO<HostVersionEntity, Long> {
     return daoUtils.selectSingle(query);
   }
 
+  /**
+   * Gets all host version entities assocaited with the specified cluster and
+   * repository.
+   *
+   * @param clusterId
+   *          the cluster ID.
+   * @param repositoryVersion
+   *          the repository (not {@code null}).
+   * @return the host versions.
+   */
+  @RequiresSession
+  public List<HostVersionEntity> findHostVersionByClusterAndRepository(long clusterId,
+      RepositoryVersionEntity repositoryVersion) {
+    TypedQuery<HostVersionEntity> query = entityManagerProvider.get().createNamedQuery(
+        "findHostVersionByClusterAndRepository", HostVersionEntity.class);
+
+    query.setParameter("clusterId", clusterId);
+    query.setParameter("repositoryVersion", repositoryVersion);
+
+    return daoUtils.selectList(query);
+  }
+
   @Transactional
   public void removeByHostName(String hostName) {
-    Collection<HostVersionEntity> hostVersions = this.findByHost(hostName);
+    Collection<HostVersionEntity> hostVersions = findByHost(hostName);
     this.remove(hostVersions);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
index ea2938b..2049969 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
@@ -32,7 +32,6 @@ import javax.persistence.JoinColumns;
 import javax.persistence.ManyToOne;
 import javax.persistence.NamedQueries;
 import javax.persistence.NamedQuery;
-import javax.persistence.OneToOne;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
 import javax.persistence.UniqueConstraint;
@@ -100,13 +99,6 @@ public class HostComponentDesiredStateEntity {
   @Enumerated(value = EnumType.STRING)
   private SecurityState securityState = SecurityState.UNSECURED;
 
-  /**
-   * Unidirectional one-to-one association to {@link StackEntity}
-   */
-  @OneToOne
-  @JoinColumn(name = "desired_stack_id", unique = false, nullable = false)
-  private StackEntity desiredStack;
-
   @Enumerated(value = EnumType.STRING)
   @Column(name = "admin_state", nullable = true, insertable = true, updatable = true)
   private HostComponentAdminState adminState;
@@ -176,14 +168,6 @@ public class HostComponentDesiredStateEntity {
     this.securityState = securityState;
   }
 
-  public StackEntity getDesiredStack() {
-    return desiredStack;
-  }
-
-  public void setDesiredStack(StackEntity desiredStack) {
-    this.desiredStack = desiredStack;
-  }
-
   public HostComponentAdminState getAdminState() {
     return adminState;
   }
@@ -223,10 +207,6 @@ public class HostComponentDesiredStateEntity {
       return false;
     }
 
-    if (!Objects.equal(desiredStack, that.desiredStack)) {
-      return false;
-    }
-
     if (!Objects.equal(desiredState, that.desiredState)) {
       return false;
     }
@@ -249,7 +229,6 @@ public class HostComponentDesiredStateEntity {
     result = 31 * result + (hostEntity != null ? hostEntity.hashCode() : 0);
     result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
     result = 31 * result + (desiredState != null ? desiredState.hashCode() : 0);
-    result = 31 * result + (desiredStack != null ? desiredStack.hashCode() : 0);
     result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
     return result;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
index 0b3d8ce..0151a41 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentStateEntity.java
@@ -30,7 +30,6 @@ import javax.persistence.JoinColumns;
 import javax.persistence.ManyToOne;
 import javax.persistence.NamedQueries;
 import javax.persistence.NamedQuery;
-import javax.persistence.OneToOne;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
 
@@ -110,13 +109,6 @@ public class HostComponentStateEntity {
   @Column(name = "security_state", nullable = false, insertable = true, updatable = true)
   private SecurityState securityState = SecurityState.UNSECURED;
 
-  /**
-   * Unidirectional one-to-one association to {@link StackEntity}
-   */
-  @OneToOne
-  @JoinColumn(name = "current_stack_id", unique = false, nullable = false, insertable = true, updatable = true)
-  private StackEntity currentStack;
-
   @ManyToOne
   @JoinColumns({
       @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false),
@@ -188,14 +180,6 @@ public class HostComponentStateEntity {
     this.upgradeState = upgradeState;
   }
 
-  public StackEntity getCurrentStack() {
-    return currentStack;
-  }
-
-  public void setCurrentStack(StackEntity currentStack) {
-    this.currentStack = currentStack;
-  }
-
   public String getVersion() {
     return version;
   }
@@ -229,11 +213,6 @@ public class HostComponentStateEntity {
       return false;
     }
 
-    if (currentStack != null ? !currentStack.equals(that.currentStack)
-        : that.currentStack != null) {
-      return false;
-    }
-
     if (currentState != null ? !currentState.equals(that.currentState)
         : that.currentState != null) {
       return false;
@@ -267,7 +246,6 @@ public class HostComponentStateEntity {
     result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
     result = 31 * result + (currentState != null ? currentState.hashCode() : 0);
     result = 31 * result + (upgradeState != null ? upgradeState.hashCode() : 0);
-    result = 31 * result + (currentStack != null ? currentStack.hashCode() : 0);
     result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
     result = 31 * result + (version != null ? version.hashCode() : 0);
     return result;

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java
index 4ed9617..9be30a3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostVersionEntity.java
@@ -71,7 +71,12 @@ import org.apache.ambari.server.state.RepositoryVersionState;
     @NamedQuery(name = "hostVersionByClusterStackVersionAndHostId", query =
         "SELECT hostVersion FROM HostVersionEntity hostVersion JOIN hostVersion.hostEntity host JOIN host.clusterEntities clusters " +
         "WHERE hostVersion.hostId=:hostId AND clusters.clusterId=:clusterId AND hostVersion.repositoryVersion.stack.stackName=:stackName " +
-        "AND hostVersion.repositoryVersion.stack.stackVersion=:stackVersion AND hostVersion.repositoryVersion.version=:version")
+        "AND hostVersion.repositoryVersion.stack.stackVersion=:stackVersion AND hostVersion.repositoryVersion.version=:version"),
+
+    @NamedQuery(
+        name = "findHostVersionByClusterAndRepository",
+        query = "SELECT hostVersion FROM HostVersionEntity hostVersion JOIN hostVersion.hostEntity host JOIN host.clusterEntities clusters "
+            + "WHERE clusters.clusterId = :clusterId AND hostVersion.repositoryVersion = :repositoryVersion") 
 })
 public class HostVersionEntity {
 
@@ -115,9 +120,9 @@ public class HostVersionEntity {
    * This constructor is mainly used by the unit tests in order to construct an object without the id.
    */
   public HostVersionEntity(HostVersionEntity other) {
-    this.hostEntity = other.hostEntity;
-    this.repositoryVersion = other.repositoryVersion;
-    this.state = other.state;
+    hostEntity = other.hostEntity;
+    repositoryVersion = other.repositoryVersion;
+    state = other.state;
   }
 
   public Long getId() {
@@ -169,15 +174,29 @@ public class HostVersionEntity {
 
   @Override
   public boolean equals(Object obj) {
-    if (this == obj) return true;
-    if (obj == null) return false;
-    if (getClass() != obj.getClass()) return false;
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
 
     HostVersionEntity other = (HostVersionEntity) obj;
-    if (id != null ? id != other.id : other.id != null) return false;
-    if (hostEntity != null ? !hostEntity.equals(other.hostEntity) : other.hostEntity != null) return false;
-    if (repositoryVersion != null ? !repositoryVersion.equals(other.repositoryVersion) : other.repositoryVersion != null) return false;
-    if (state != other.state) return false;
+    if (id != null ? id != other.id : other.id != null) {
+      return false;
+    }
+    if (hostEntity != null ? !hostEntity.equals(other.hostEntity) : other.hostEntity != null) {
+      return false;
+    }
+    if (repositoryVersion != null ? !repositoryVersion.equals(other.repositoryVersion) : other.repositoryVersion != null) {
+      return false;
+    }
+    if (state != other.state) {
+      return false;
+    }
     return true;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java
index 17fd323..eb1b187 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceComponentDesiredStateEntity.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.orm.entities;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Objects;
 
 import javax.persistence.CascadeType;
 import javax.persistence.Column;
@@ -42,6 +43,7 @@ import javax.persistence.UniqueConstraint;
 
 import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.State;
+import org.apache.commons.lang.builder.EqualsBuilder;
 
 @Entity
 @Table(
@@ -90,18 +92,16 @@ public class ServiceComponentDesiredStateEntity {
   private RepositoryVersionState repoState = RepositoryVersionState.INIT;
 
   /**
-   * Unidirectional one-to-one association to {@link StackEntity}
+   * Unidirectional one-to-one association to {@link RepositoryVersionEntity}
    */
   @OneToOne
-  @JoinColumn(name = "desired_stack_id", unique = false, nullable = false, insertable = true, updatable = true)
-  private StackEntity desiredStack;
-
-  /**
-   * Version string that should be followed by instances
-   * of component on hosts. Includes both stack version and build
-   */
-  @Column(name = "desired_version", nullable = false, insertable = true, updatable = true)
-  private String desiredVersion = State.UNKNOWN.toString();
+  @JoinColumn(
+      name = "desired_repo_version_id",
+      unique = false,
+      nullable = true,
+      insertable = true,
+      updatable = true)
+  private RepositoryVersionEntity desiredRepositoryVersion;
 
   @ManyToOne
   @JoinColumns({@javax.persistence.JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false), @JoinColumn(name = "service_name", referencedColumnName = "service_name", nullable = false)})
@@ -161,20 +161,20 @@ public class ServiceComponentDesiredStateEntity {
     this.desiredState = desiredState;
   }
 
-  public StackEntity getDesiredStack() {
-    return desiredStack;
+  public RepositoryVersionEntity getDesiredRepositoryVersion() {
+    return desiredRepositoryVersion;
   }
 
-  public void setDesiredStack(StackEntity desiredStack) {
-    this.desiredStack = desiredStack;
+  public void setDesiredRepositoryVersion(RepositoryVersionEntity desiredRepositoryVersion) {
+    this.desiredRepositoryVersion = desiredRepositoryVersion;
   }
 
-  public String getDesiredVersion() {
-    return desiredVersion;
+  public StackEntity getDesiredStack() {
+    return desiredRepositoryVersion.getStack();
   }
 
-  public void setDesiredVersion(String desiredVersion) {
-    this.desiredVersion = desiredVersion;
+  public String getDesiredVersion() {
+    return desiredRepositoryVersion.getVersion();
   }
 
   /**
@@ -232,6 +232,9 @@ public class ServiceComponentDesiredStateEntity {
     this.recoveryEnabled = (recoveryEnabled == false) ? 0 : 1;
   }
 
+  /**
+   * {@inheritDoc}
+   */
   @Override
   public boolean equals(Object o) {
     if (this == o) {
@@ -243,39 +246,21 @@ public class ServiceComponentDesiredStateEntity {
     }
 
     ServiceComponentDesiredStateEntity that = (ServiceComponentDesiredStateEntity) o;
-
-    if (id != null ? !id.equals(that.id) : that.id != null) {
-      return false;
-    }
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) {
-      return false;
-    }
-    if (componentName != null ? !componentName.equals(that.componentName) : that.componentName != null) {
-      return false;
-    }
-    if (desiredState != null ? !desiredState.equals(that.desiredState) : that.desiredState != null) {
-      return false;
-    }
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) {
-      return false;
-    }
-    if (desiredStack != null ? !desiredStack.equals(that.desiredStack)
-        : that.desiredStack != null) {
-      return false;
-    }
-    return true;
+    EqualsBuilder equalsBuilder = new EqualsBuilder();
+    equalsBuilder.append(id, that.id);
+    equalsBuilder.append(clusterId, that.clusterId);
+    equalsBuilder.append(componentName, that.componentName);
+    equalsBuilder.append(desiredState, that.desiredState);
+    equalsBuilder.append(serviceName, that.serviceName);
+    equalsBuilder.append(desiredRepositoryVersion, that.desiredRepositoryVersion);
+
+    return equalsBuilder.isEquals();
   }
 
   @Override
   public int hashCode() {
-    int result = id != null ? id.hashCode() : 0;
-    result = 31 * result + (clusterId != null ? clusterId.hashCode() : 0);
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (componentName != null ? componentName.hashCode() : 0);
-    result = 31 * result + (desiredState != null ? desiredState.hashCode() : 0);
-    result = 31 * result + (desiredStack != null ? desiredStack.hashCode() : 0);
-
-    return result;
+    return Objects.hash(id, clusterId, serviceName, componentName, desiredState,
+        desiredRepositoryVersion);
   }
 
   public ClusterServiceEntity getClusterServiceEntity() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/dcbd826c/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceDesiredStateEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceDesiredStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceDesiredStateEntity.java
index 885f995..d22513a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceDesiredStateEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceDesiredStateEntity.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.orm.entities;
 
+import java.util.Objects;
+
 import javax.persistence.Basic;
 import javax.persistence.Column;
 import javax.persistence.Entity;
@@ -25,11 +27,13 @@ import javax.persistence.EnumType;
 import javax.persistence.Enumerated;
 import javax.persistence.Id;
 import javax.persistence.JoinColumn;
+import javax.persistence.ManyToOne;
 import javax.persistence.OneToOne;
 
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.State;
+import org.apache.commons.lang.builder.EqualsBuilder;
 
 @javax.persistence.IdClass(ServiceDesiredStateEntityPK.class)
 @javax.persistence.Table(name = "servicedesiredstate")
@@ -52,13 +56,6 @@ public class ServiceDesiredStateEntity {
   @Basic
   private int desiredHostRoleMapping = 0;
 
-  /**
-   * Unidirectional one-to-one association to {@link StackEntity}
-   */
-  @OneToOne
-  @JoinColumn(name = "desired_stack_id", unique = false, nullable = false, insertable = true, updatable = true)
-  private StackEntity desiredStack;
-
   @Column(name = "maintenance_state", nullable = false, insertable = true, updatable = true)
   @Enumerated(value = EnumType.STRING)
   private MaintenanceState maintenanceState = MaintenanceState.OFF;
@@ -78,6 +75,13 @@ public class ServiceDesiredStateEntity {
       })
   private ClusterServiceEntity clusterServiceEntity;
 
+  /**
+   * The desired repository that the service should be on.
+   */
+  @ManyToOne
+  @JoinColumn(name = "desired_repo_version_id", unique = false, nullable = false, insertable = true, updatable = true)
+  private RepositoryVersionEntity desiredRepositoryVersion;
+
   public Long getClusterId() {
     return clusterId;
   }
@@ -111,11 +115,7 @@ public class ServiceDesiredStateEntity {
   }
 
   public StackEntity getDesiredStack() {
-    return desiredStack;
-  }
-
-  public void setDesiredStack(StackEntity desiredStack) {
-    this.desiredStack = desiredStack;
+    return desiredRepositoryVersion.getStack();
   }
 
   public MaintenanceState getMaintenanceState() {
@@ -152,6 +152,9 @@ public class ServiceDesiredStateEntity {
     this.credentialStoreEnabled = (short)((credentialStoreEnabled == false) ? 0 : 1);
   }
 
+  /**
+   * {@inheritDoc}
+   */
   @Override
   public boolean equals(Object o) {
     if (this == o) {
@@ -163,37 +166,23 @@ public class ServiceDesiredStateEntity {
     }
 
     ServiceDesiredStateEntity that = (ServiceDesiredStateEntity) o;
+    EqualsBuilder equalsBuilder = new EqualsBuilder();
+    equalsBuilder.append(clusterId, that.clusterId);
+    equalsBuilder.append(desiredState, that.desiredState);
+    equalsBuilder.append(desiredHostRoleMapping, that.desiredHostRoleMapping);
+    equalsBuilder.append(serviceName, that.serviceName);
+    equalsBuilder.append(desiredRepositoryVersion, that.desiredRepositoryVersion);
 
-    if (clusterId != null ? !clusterId.equals(that.clusterId) : that.clusterId != null) {
-      return false;
-    }
-
-    if (desiredState != null ? !desiredState.equals(that.desiredState) : that.desiredState != null) {
-      return false;
-    }
-
-    if (desiredHostRoleMapping != that.desiredHostRoleMapping) {
-      return false;
-    }
-
-    if (serviceName != null ? !serviceName.equals(that.serviceName) : that.serviceName != null) {
-      return false;
-    }
-
-    if (desiredStack != null ? !desiredStack.equals(that.desiredStack) : that.desiredStack != null) {
-      return false;
-    }
-    return true;
+    return equalsBuilder.isEquals();
   }
 
+  /**
+   * {@inheritDoc}
+   */
   @Override
   public int hashCode() {
-    int result = clusterId != null ? clusterId.intValue() : 0;
-    result = 31 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 31 * result + (desiredState != null ? desiredState.hashCode() : 0);
-    result = 31 * result + desiredHostRoleMapping;
-    result = 31 * result + (desiredStack != null ? desiredStack.hashCode() : 0);
-    return result;
+    return Objects.hash(clusterId, serviceName, desiredState, desiredHostRoleMapping,
+        desiredRepositoryVersion);
   }
 
   public ClusterServiceEntity getClusterServiceEntity() {
@@ -203,4 +192,24 @@ public class ServiceDesiredStateEntity {
   public void setClusterServiceEntity(ClusterServiceEntity clusterServiceEntity) {
     this.clusterServiceEntity = clusterServiceEntity;
   }
+
+  /**
+   * Gets the desired repository version.
+   *
+   * @return the desired repository (never {@code null}).
+   */
+  public RepositoryVersionEntity getDesiredRepositoryVersion() {
+    return desiredRepositoryVersion;
+  }
+
+  /**
+   * Sets the desired repository for this service.
+   *
+   * @param desiredRepositoryVersion
+   *          the desired repository (not {@code null}).
+   */
+  public void setDesiredRepositoryVersion(RepositoryVersionEntity desiredRepositoryVersion) {
+    this.desiredRepositoryVersion = desiredRepositoryVersion;
+  }
+
 }