You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/18 15:03:26 UTC

[50/50] [abbrv] ambari git commit: Merge branch 'branch-2.5' into branch-feature-AMBARI-21450

Merge branch 'branch-2.5' into branch-feature-AMBARI-21450


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5cdcd070
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5cdcd070
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5cdcd070

Branch: refs/heads/branch-feature-AMBARI-21450
Commit: 5cdcd0701aa0003587c1c94a68af0167ddf4df64
Parents: ae3ce90 d8a5bad
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Tue Jul 18 11:02:00 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Jul 18 11:02:00 2017 -0400

----------------------------------------------------------------------
 .../controllers/groups/GroupsEditCtrl.js        |   3 +
 .../stackVersions/StackVersionsCreateCtrl.js    |   3 +-
 .../resource_management/TestPackageResource.py  |   4 +-
 .../core/providers/package/yumrpm.py            |   2 +-
 .../core/providers/package/zypper.py            |   2 +-
 .../libraries/functions/stack_features.py       |  28 +-
 .../libraries/functions/stack_tools.py          |  15 +-
 .../kafka/KafkaTimelineMetricsReporter.java     |   6 +-
 ambari-server/pom.xml                           |   2 +-
 ambari-server/sbin/ambari-server                |   6 +-
 .../ambari/server/agent/ExecutionCommand.java   |   2 +
 .../ambari/server/checks/CheckDescription.java  |  38 +-
 .../checks/ComponentsExistInRepoCheck.java      | 142 ++++++
 .../controller/ActionExecutionContext.java      |  28 ++
 .../controller/AmbariActionExecutionHelper.java |  26 +-
 .../AmbariCustomCommandExecutionHelper.java     |  22 +-
 .../AmbariManagementControllerImpl.java         | 122 +++--
 .../server/controller/ServiceRequest.java       |  26 +-
 .../AbstractControllerResourceProvider.java     |  23 +
 .../ClusterStackVersionResourceProvider.java    |   2 +
 .../internal/HostResourceProvider.java          |   1 +
 .../internal/ServiceResourceProvider.java       |  64 ++-
 .../controller/utilities/PropertyHelper.java    |   8 +
 .../apache/ambari/server/orm/DBAccessor.java    |  14 +
 .../ambari/server/orm/DBAccessorImpl.java       |  24 +
 .../LdapToPamMigrationHelper.java               |  73 +++
 .../server/security/authorization/Users.java    |   4 +
 .../upgrades/ChangeStackReferencesAction.java   |   4 +-
 .../upgrades/FinalizeUpgradeAction.java         |   1 +
 .../upgrades/UpgradeUserKerberosDescriptor.java |  41 +-
 .../org/apache/ambari/server/state/Host.java    |   4 +-
 .../ambari/server/state/PropertyInfo.java       |   2 +
 .../ambari/server/state/UpgradeContext.java     |   3 +-
 .../ambari/server/state/host/HostImpl.java      |  17 +-
 .../KerberosDescriptorUpdateHelper.java         |   9 +-
 .../ambari/server/topology/AmbariContext.java   |  17 +-
 .../server/upgrade/UpgradeCatalog252.java       | 110 ++++-
 ambari-server/src/main/python/ambari-server.py  |  10 +-
 .../main/python/ambari_server/setupActions.py   |   1 +
 .../main/python/ambari_server/setupSecurity.py  | 123 ++++-
 .../0.96.0.2.0/package/scripts/hbase_master.py  |  10 +-
 .../0.96.0.2.0/package/scripts/hbase_service.py |  40 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  |   9 +
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |   6 +
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      |   8 +
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  26 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  11 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |   7 +
 .../package/templates/include_hosts_list.j2     |  21 +
 .../0.12.0.2.0/package/scripts/params_linux.py  |   4 +
 .../0.12.0.2.0/package/scripts/service_check.py |   3 +-
 .../common-services/JNBG/0.2.0/alerts.json      |  32 ++
 .../JNBG/0.2.0/configuration/jnbg-env.xml       | 209 +++++++++
 .../common-services/JNBG/0.2.0/kerberos.json    |  59 +++
 .../common-services/JNBG/0.2.0/metainfo.xml     | 108 +++++
 .../JNBG/0.2.0/package/files/jkg_install.sh     | 169 +++++++
 .../JNBG/0.2.0/package/files/jkg_start.sh       |  84 ++++
 .../JNBG/0.2.0/package/files/log4j_setup.sh     |  79 ++++
 .../0.2.0/package/files/pyspark_configure.sh    | 104 +++++
 .../JNBG/0.2.0/package/files/pythonenv_setup.sh | 138 ++++++
 .../JNBG/0.2.0/package/files/toree_configure.sh | 151 +++++++
 .../JNBG/0.2.0/package/files/toree_install.sh   | 176 ++++++++
 .../JNBG/0.2.0/package/scripts/jkg_toree.py     | 134 ++++++
 .../0.2.0/package/scripts/jkg_toree_params.py   | 177 ++++++++
 .../JNBG/0.2.0/package/scripts/jnbg_helpers.py  |  81 ++++
 .../JNBG/0.2.0/package/scripts/jnbg_params.py   |  66 +++
 .../JNBG/0.2.0/package/scripts/py_client.py     |  63 +++
 .../0.2.0/package/scripts/py_client_params.py   |  39 ++
 .../JNBG/0.2.0/package/scripts/service_check.py |  44 ++
 .../JNBG/0.2.0/package/scripts/status_params.py |  26 ++
 .../0.10.0/configuration/ranger-kafka-audit.xml |  58 +++
 .../common-services/KAFKA/0.10.0/kerberos.json  |  79 ++++
 .../common-services/KAFKA/0.10.0/metainfo.xml   |  28 ++
 .../KAFKA/0.8.1/package/scripts/kafka.py        |  12 +
 .../KAFKA/0.8.1/package/scripts/params.py       |   3 +
 .../KAFKA/0.9.0/configuration/kafka-broker.xml  |   2 +-
 .../0.5.0.2.2/package/scripts/params_linux.py   |   8 +
 .../KNOX/0.5.0.2.2/package/scripts/upgrade.py   |   2 +-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |  26 +-
 .../4.0.0.2.0/package/scripts/oozie_server.py   |   4 +-
 .../package/scripts/oozie_server_upgrade.py     |  15 +-
 .../4.0.0.2.0/package/scripts/params_linux.py   |  15 +-
 .../R4ML/0.8.0/configuration/r4ml-env.xml       |  48 ++
 .../common-services/R4ML/0.8.0/metainfo.xml     |  92 ++++
 .../R4ML/0.8.0/package/files/Install.R          |  25 ++
 .../R4ML/0.8.0/package/files/ServiceCheck.R     |  28 ++
 .../R4ML/0.8.0/package/files/localr.repo        |  22 +
 .../R4ML/0.8.0/package/scripts/__init__.py      |  19 +
 .../R4ML/0.8.0/package/scripts/params.py        |  80 ++++
 .../R4ML/0.8.0/package/scripts/r4ml_client.py   | 201 +++++++++
 .../R4ML/0.8.0/package/scripts/service_check.py |  45 ++
 .../SPARK/1.2.1/package/scripts/params.py       |  11 +-
 .../SPARK/1.2.1/package/scripts/setup_spark.py  |   6 +-
 .../1.2.1/package/scripts/spark_service.py      |   6 +-
 .../SYSTEMML/0.10.0/metainfo.xml                |  77 ++++
 .../SYSTEMML/0.10.0/package/scripts/__init__.py |  19 +
 .../SYSTEMML/0.10.0/package/scripts/params.py   |  40 ++
 .../0.10.0/package/scripts/service_check.py     |  43 ++
 .../0.10.0/package/scripts/systemml_client.py   |  49 ++
 .../common-services/TITAN/1.0.0/alerts.json     |  33 ++
 .../1.0.0/configuration/gremlin-server.xml      |  85 ++++
 .../TITAN/1.0.0/configuration/hadoop-gryo.xml   |  94 ++++
 .../1.0.0/configuration/hadoop-hbase-read.xml   | 102 +++++
 .../TITAN/1.0.0/configuration/titan-env.xml     | 157 +++++++
 .../1.0.0/configuration/titan-hbase-solr.xml    |  69 +++
 .../TITAN/1.0.0/configuration/titan-log4j.xml   |  65 +++
 .../common-services/TITAN/1.0.0/kerberos.json   |  52 +++
 .../common-services/TITAN/1.0.0/metainfo.xml    | 124 +++++
 .../package/alerts/alert_check_titan_server.py  |  65 +++
 .../package/files/gremlin-server-script.sh      |  86 ++++
 .../package/files/tinkergraph-empty.properties  |  18 +
 .../TITAN/1.0.0/package/files/titanSmoke.groovy |  20 +
 .../TITAN/1.0.0/package/scripts/params.py       | 202 +++++++++
 .../1.0.0/package/scripts/params_server.py      |  37 ++
 .../1.0.0/package/scripts/service_check.py      |  88 ++++
 .../TITAN/1.0.0/package/scripts/titan.py        | 143 ++++++
 .../TITAN/1.0.0/package/scripts/titan_client.py |  61 +++
 .../TITAN/1.0.0/package/scripts/titan_server.py |  67 +++
 .../1.0.0/package/scripts/titan_service.py      | 150 +++++++
 .../templates/titan_solr_client_jaas.conf.j2    |  23 +
 .../package/templates/titan_solr_jaas.conf.j2   |  26 ++
 .../YARN/2.1.0.2.0/configuration/yarn-site.xml  |   6 +
 .../2.1.0.2.0/package/scripts/params_linux.py   |  12 +-
 .../2.1.0.2.0/package/scripts/params_windows.py |  10 +-
 .../package/scripts/resourcemanager.py          |  18 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../0.6.0.2.5/configuration/zeppelin-env.xml    |   2 +-
 .../services/HDFS/configuration/hdfs-site.xml   |   6 +
 .../HDFS/package/scripts/hdfs_namenode.py       |  12 +-
 .../0.8/services/HDFS/package/scripts/params.py |  11 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../0.8/services/YARN/package/scripts/params.py |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../BigInsights/4.0/services/HIVE/metainfo.xml  |   2 +-
 .../configuration/spark-javaopts-properties.xml |   3 +
 .../BigInsights/4.0/services/SPARK/metainfo.xml |   2 +-
 .../package/scripts/spark_thrift_server.py      | 125 ++++++
 .../SPARK/package/scripts/thrift_server.py      | 125 ------
 .../4.0/stack-advisor/stack_advisor_25.py       |   5 +-
 .../stacks/BigInsights/4.2.5/metainfo.xml       |   2 +-
 .../HBASE/package/files/draining_servers.rb     | 164 +++++++
 .../HBASE/package/files/hbase-smoke-cleanup.sh  |  23 +
 .../HBASE/package/files/hbaseSmokeVerify.sh     |  34 ++
 .../services/HBASE/package/scripts/__init__.py  |  19 +
 .../services/HBASE/package/scripts/functions.py |  54 +++
 .../services/HBASE/package/scripts/hbase.py     | 234 ++++++++++
 .../HBASE/package/scripts/hbase_client.py       |  82 ++++
 .../HBASE/package/scripts/hbase_decommission.py |  93 ++++
 .../HBASE/package/scripts/hbase_master.py       | 163 +++++++
 .../HBASE/package/scripts/hbase_regionserver.py | 166 +++++++
 .../package/scripts/hbase_restgatewayserver.py  |  83 ++++
 .../HBASE/package/scripts/hbase_service.py      |  93 ++++
 .../HBASE/package/scripts/hbase_upgrade.py      |  41 ++
 .../services/HBASE/package/scripts/params.py    |  29 ++
 .../HBASE/package/scripts/params_linux.py       | 447 +++++++++++++++++++
 .../HBASE/package/scripts/params_windows.py     |  43 ++
 .../package/scripts/phoenix_queryserver.py      |  88 ++++
 .../HBASE/package/scripts/phoenix_service.py    |  55 +++
 .../HBASE/package/scripts/service_check.py      |  95 ++++
 .../HBASE/package/scripts/setup_ranger_hbase.py | 106 +++++
 .../HBASE/package/scripts/status_params.py      |  68 +++
 .../services/HBASE/package/scripts/upgrade.py   |  65 +++
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 | 117 +++++
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 | 116 +++++
 .../HBASE/package/templates/hbase-smoke.sh.j2   |  44 ++
 .../HBASE/package/templates/hbase.conf.j2       |  35 ++
 .../package/templates/hbase_client_jaas.conf.j2 |  23 +
 .../templates/hbase_grant_permissions.j2        |  40 ++
 .../package/templates/hbase_master_jaas.conf.j2 |  26 ++
 .../templates/hbase_queryserver_jaas.conf.j2    |  26 ++
 .../templates/hbase_regionserver_jaas.conf.j2   |  26 ++
 .../package/templates/hbase_rest_jaas.conf.j2   |  26 ++
 .../HBASE/package/templates/regionservers.j2    |  20 +
 .../4.2.5/services/HIVE/metainfo.xml            |   2 +-
 .../4.2.5/services/JNBG/metainfo.xml            |  26 ++
 .../4.2.5/services/R4ML/metainfo.xml            |  37 ++
 .../spark2-javaopts-properties.xml              |   5 +-
 .../4.2.5/services/SPARK2/metainfo.xml          |   2 +-
 .../4.2.5/services/SQOOP/metainfo.xml           |   2 +-
 .../4.2.5/services/SYSTEMML/metainfo.xml        |  37 ++
 .../4.2.5/services/TITAN/metainfo.xml           |  40 ++
 .../4.2.5/upgrades/config-upgrade.xml           | 101 ++++-
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 229 +++++++++-
 .../services/HBASE/package/scripts/params.py    |   4 +-
 .../BigInsights/4.2/services/HIVE/metainfo.xml  |   2 +-
 .../BigInsights/4.2/services/KNOX/kerberos.json |   6 -
 .../RANGER/configuration/ranger-admin-site.xml  |  14 +
 .../configuration/spark-javaopts-properties.xml |   3 +
 .../BigInsights/4.2/services/SPARK/metainfo.xml |   2 +-
 .../services/SPARK/package/scripts/params.py    |   5 +-
 .../package/scripts/spark_thrift_server.py      | 119 +++++
 .../SPARK/package/scripts/thrift_server.py      | 119 -----
 .../services/TITAN/configuration/titan-env.xml  |   4 +-
 .../TITAN/configuration/titan-hbase-solr.xml    |   2 +-
 .../TITAN/configuration/titan-log4j.xml         |   2 +-
 .../BigInsights/4.2/upgrades/config-upgrade.xml | 116 ++++-
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 206 ++++++++-
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../services/YARN/package/scripts/params.py     |   9 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/exclude_hosts_list.j2     |  21 +
 .../package/templates/include_hosts_list.j2     |  21 +
 .../hooks/before-ANY/files/changeToSecureUid.sh |  13 +-
 .../before-ANY/scripts/shared_initialization.py |  45 +-
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../services/YARN/package/scripts/params.py     |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |   9 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 .../stacks/HDP/2.5/services/KAFKA/metainfo.xml  |   1 +
 .../stacks/HDP/2.5/services/stack_advisor.py    |  11 +-
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |   4 +
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |   6 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |   1 +
 .../configuration/application-properties.xml    |  17 +
 .../services/HIVE/configuration/hive-env.xml    |  78 ++--
 .../HIVE/configuration/hive-interactive-env.xml |  62 +--
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |   4 +
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |   4 +
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |   1 +
 .../src/main/resources/stacks/stack_advisor.py  |  18 +
 .../checks/ComponentExistsInRepoCheckTest.java  | 329 ++++++++++++++
 .../AmbariManagementControllerTest.java         |   8 +-
 .../server/orm/dao/ClusterVersionDAOTest.java   | 264 -----------
 .../ChangeStackReferencesActionTest.java        |   1 +
 .../upgrades/UpgradeActionTest.java             |   2 -
 .../UpgradeUserKerberosDescriptorTest.java      |  19 +-
 .../KerberosDescriptorUpdateHelperTest.java     |  70 +++
 .../src/test/python/TestAmbariServer.py         |  13 +-
 .../src/test/python/TestStackFeature.py         |  44 +-
 .../python/custom_actions/test_ru_set_all.py    |   6 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |   2 +-
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |   2 +-
 .../python/stacks/2.0.6/configs/default.json    |   2 +-
 .../2.0.6/configs/default_ams_embedded.json     |   2 +-
 .../2.0.6/configs/default_hive_nn_ha.json       |   2 +-
 .../2.0.6/configs/default_hive_nn_ha_2.json     |   2 +-
 .../2.0.6/configs/default_hive_non_hdfs.json    |   2 +-
 .../2.0.6/configs/default_no_install.json       |   2 +-
 .../2.0.6/configs/default_oozie_mysql.json      |   2 +-
 .../default_update_exclude_file_only.json       |   2 +-
 .../2.0.6/configs/default_with_bucket.json      |   2 +-
 .../python/stacks/2.0.6/configs/flume_22.json   |   2 +-
 .../python/stacks/2.0.6/configs/flume_only.json |   2 +-
 .../stacks/2.0.6/configs/hbase_no_phx.json      |   2 +-
 .../stacks/2.0.6/configs/hbase_with_phx.json    |   2 +-
 .../test/python/stacks/2.0.6/configs/nn_eu.json |   2 +-
 .../stacks/2.0.6/configs/nn_eu_standby.json     |   2 +-
 .../2.0.6/configs/oozie_existing_sqla.json      |   2 +-
 .../python/stacks/2.0.6/configs/secured.json    |   2 +-
 .../2.0.6/hooks/before-ANY/test_before_any.py   | 294 +++++++-----
 .../2.1/configs/hive-metastore-upgrade.json     |   2 +-
 .../python/stacks/2.2/configs/knox_upgrade.json |   2 +-
 .../test/python/stacks/2.3/configs/ats_1_5.json |   2 +-
 .../stacks/2.5/common/test_stack_advisor.py     | 150 ++++---
 .../python/stacks/2.5/configs/hsi_default.json  |   2 +-
 .../2.5/configs/hsi_default_for_restart.json    |   2 +-
 .../test/python/stacks/2.5/configs/hsi_ha.json  |   2 +-
 .../main/admin/stack_and_upgrade_controller.js  |  22 +
 .../app/controllers/wizard/step7_controller.js  |  67 +++
 .../configs/stack_config_properties_mapper.js   |  14 +-
 ambari-web/app/messages.js                      |   1 +
 ambari-web/app/styles/application.less          |  15 +
 .../main/admin/stack_upgrade/versions.hbs       |   9 +-
 ...ontrols_service_config_usergroup_with_id.hbs |  27 ++
 ambari-web/app/utils/ajax/ajax.js               |   2 +-
 ambari-web/app/utils/config.js                  |   3 +
 .../configs/service_configs_by_category_view.js |   6 +
 ambari-web/app/views/common/controls_view.js    |  39 ++
 .../stack_upgrade/upgrade_version_box_view.js   |  11 +-
 .../main/admin/stack_upgrade/versions_view.js   |  16 -
 .../admin/stack_and_upgrade_controller_test.js  |  19 +
 .../admin/stack_upgrade/version_view_test.js    |  42 --
 .../services/YARN/configuration/yarn-site.xml   |   6 +
 .../YARN/package/scripts/params_linux.py        |   9 +-
 .../YARN/package/scripts/params_windows.py      |  10 +-
 .../YARN/package/scripts/resourcemanager.py     |  18 +-
 .../package/templates/include_hosts_list.j2     |  21 +
 279 files changed, 10995 insertions(+), 1193 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 441c047,7948d30..9b6b2f5
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@@ -452,6 -433,9 +452,8 @@@ public class ExecutionCommand extends A
      String GROUP_LIST = "group_list";
      String USER_GROUPS = "user_groups";
      String NOT_MANAGED_HDFS_PATH_LIST = "not_managed_hdfs_path_list";
 -    String VERSION = "version";
+     String SOURCE_STACK = "source_stack";
+     String TARGET_STACK = "target_stack";
      String REFRESH_TOPOLOGY = "refresh_topology";
      String HOST_SYS_PREPPED = "host_sys_prepped";
      String MAX_DURATION_OF_RETRIES = "max_duration_for_retries";

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
index 42a95c0,af506f2..9939ce7
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ActionExecutionContext.java
@@@ -24,9 -23,9 +24,10 @@@ import java.util.List
  import java.util.Map;
  
  import org.apache.ambari.server.actionmanager.TargetHostType;
 +import org.apache.ambari.server.agent.ExecutionCommand;
  import org.apache.ambari.server.controller.internal.RequestOperationLevel;
  import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+ import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
  
  /**
   * The context required to create tasks and stages for a custom action
@@@ -43,8 -42,7 +44,9 @@@ public class ActionExecutionContext 
    private String expectedComponentName;
    private boolean hostsInMaintenanceModeExcluded = true;
    private boolean allowRetry = false;
 +
 +  private List<ExecutionCommandVisitor> m_visitors = new ArrayList<>();
+   private RepositoryVersionEntity repositoryVersion;
  
    /**
     * {@code true} if slave/client component failures should be automatically
@@@ -173,15 -171,31 +175,41 @@@
    }
  
    /**
+    * Gets the stack/version to use for generating stack-associated values for a
+    * command. In some cases the cluster's stack is not the correct one to use,
+    * such as when distributing a repository.
+    *
+    * @return the repository for the stack/version to use when generating
+    *         stack-specific content for the command.
+    *
+    * @return
+    */
+   public RepositoryVersionEntity getRepositoryVersion() {
+     return repositoryVersion;
+   }
+ 
+   /**
+    * Sets the stack/version to use for generating stack-associated values for a
+    * command. In some cases the cluster's stack is not the correct one to use,
+    * such as when distributing a repository.
+    *
+    * @param stackId
+    *          the stackId to use for stack-based properties on the command.
+    */
+   public void setRepositoryVersion(RepositoryVersionEntity repositoryVersion) {
+     this.repositoryVersion = repositoryVersion;
+   }
+ 
++  /**
 +   * Adds a command visitor that will be invoked after a command is created.  Provides access
 +   * to the command.
 +   *
 +   * @param visitor the visitor
 +   */
 +  public void addVisitor(ExecutionCommandVisitor visitor) {
 +    m_visitors.add(visitor);
 +  }
 +
    @Override
    public String toString() {
      return "ActionExecutionContext{" +

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 1b0e0e0,0638910..04f1cb3
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@@ -461,11 -459,7 +462,12 @@@ public class AmbariActionExecutionHelpe
          hostLevelParams.put(previousDBConnectorName.getKey(), previousDBConnectorName.getValue());
        }
  
 -      addRepoInfoToHostLevelParams(cluster, actionContext, hostLevelParams, hostName);
 +      if (StringUtils.isNotBlank(serviceName)) {
 +        Service service = cluster.getService(serviceName);
-         addRepoInfoToHostLevelParams(service.getDesiredRepositoryVersion(), hostLevelParams, hostName);
++        addRepoInfoToHostLevelParams(actionContext, service.getDesiredRepositoryVersion(),
++            hostLevelParams, hostName);
 +      }
 +
  
        Map<String, String> roleParams = execCmd.getRoleParams();
        if (roleParams == null) {
@@@ -527,10 -519,10 +529,25 @@@
    *
    * */
  
-   private void addRepoInfoToHostLevelParams(RepositoryVersionEntity repositoryVersion,
 -  private void addRepoInfoToHostLevelParams(Cluster cluster, ActionExecutionContext actionContext,
--      Map<String, String> hostLevelParams, String hostName) throws AmbariException {
 -    if (null == cluster) {
++  private void addRepoInfoToHostLevelParams(ActionExecutionContext actionContext,
++      RepositoryVersionEntity repositoryVersion, Map<String, String> hostLevelParams,
++      String hostName) throws AmbariException {
++
++    // if the repo is null, see if any values from the context should go on the
++    // host params and then return
 +    if (null == repositoryVersion) {
++      // see if the action context has a repository set to use for the command
++      if (null != actionContext.getRepositoryVersion()) {
++        StackId stackId = actionContext.getRepositoryVersion().getStackId();
++        hostLevelParams.put(STACK_NAME, stackId.getStackName());
++        hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
++      }
++
        return;
++    } else {
++      StackId stackId = repositoryVersion.getStackId();
++      hostLevelParams.put(STACK_NAME, stackId.getStackName());
++      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
      }
  
      JsonObject rootJsonObject = new JsonObject();
@@@ -554,8 -550,18 +571,5 @@@
      }
  
      hostLevelParams.put(REPO_INFO, rootJsonObject.toString());
--
-     hostLevelParams.put(STACK_NAME, repositoryVersion.getStackName());
-     hostLevelParams.put(STACK_VERSION, repositoryVersion.getStackVersion());
 -    // set the host level params if not already set by whoever is creating this command
 -    if (!hostLevelParams.containsKey(STACK_NAME) || !hostLevelParams.containsKey(STACK_VERSION)) {
 -      // see if the action context has a repository set to use for the command, otherwise use the
 -      // cluster's current stack ID
 -      StackId stackId = cluster.getCurrentStackVersion();
 -      if (null != actionContext.getRepositoryVersion()) {
 -        stackId = actionContext.getRepositoryVersion().getStackId();
 -      }
 -
 -      hostLevelParams.put(STACK_NAME, stackId.getStackName());
 -      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
 -    }
    }
  }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 6360a04,e321559..011ebfd
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@@ -1044,8 -1010,8 +1050,8 @@@ public class AmbariCustomCommandExecuti
        }
  
        if (!serviceName.equals(Service.Type.HBASE.name()) || hostName.equals(primaryCandidate)) {
-         commandParams.put(UPDATE_EXCLUDE_FILE_ONLY, "false");
+         commandParams.put(UPDATE_FILES_ONLY, "false");
 -        addCustomCommandAction(commandContext, commandFilter, stage, commandParams, commandDetail.toString());
 +        addCustomCommandAction(commandContext, commandFilter, stage, commandParams, commandDetail.toString(), null);
        }
      }
    }
@@@ -1467,14 -1318,18 +1473,18 @@@
          hostParamsStageJson);
    }
  
 -  Map<String, String> createDefaultHostParams(Cluster cluster) throws AmbariException {
 -    StackId stackId = cluster.getDesiredStackVersion();
 -    return createDefaultHostParams(cluster, stackId);
 +  Map<String, String> createDefaultHostParams(Cluster cluster, RepositoryVersionEntity repositoryVersion) throws AmbariException {
 +    return createDefaultHostParams(cluster, repositoryVersion.getStackId());
    }
  
 -  Map<String, String> createDefaultHostParams(Cluster cluster, StackId stackId) throws AmbariException{
 +  Map<String, String> createDefaultHostParams(Cluster cluster, StackId stackId) throws AmbariException {
 +
      TreeMap<String, String> hostLevelParams = new TreeMap<>();
      hostLevelParams.put(JDK_LOCATION, managementController.getJdkResourceUrl());
+     hostLevelParams.put(JAVA_HOME, managementController.getJavaHome());
+     hostLevelParams.put(JAVA_VERSION, String.valueOf(configs.getJavaVersion()));
+     hostLevelParams.put(JDK_NAME, managementController.getJDKName());
+     hostLevelParams.put(JCE_NAME, managementController.getJCEName());
      hostLevelParams.put(STACK_NAME, stackId.getStackName());
      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
      hostLevelParams.put(DB_NAME, managementController.getServerDB());

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
index 0fbb63f,a8e6315..1a6a040
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceRequest.java
@@@ -28,25 -28,17 +28,27 @@@ public class ServiceRequest 
    private String credentialStoreEnabled; // CREATE/UPDATE/GET
    private String credentialStoreSupported; //GET
  
-   private Long desiredRepositoryVersionId;
 -  public ServiceRequest(String clusterName, String serviceName,
 -                        String desiredState) {
 -    this(clusterName, serviceName, desiredState, null);
++  private String desiredStack;
++  private String desiredRepositoryVersion;
 +  /**
 +   * Short-lived object that gets set while validating a request
 +   */
 +  private RepositoryVersionEntity resolvedRepository;
 +
-   public ServiceRequest(String clusterName, String serviceName,
-       Long desiredRepositoryVersionId, String desiredState) {
-     this(clusterName, serviceName, desiredRepositoryVersionId, desiredState, null);
++  public ServiceRequest(String clusterName, String serviceName, String desiredStack,
++      String desiredRepositoryVersion, String desiredState) {
++    this(clusterName, serviceName, desiredStack, desiredRepositoryVersion, desiredState, null);
    }
  
--  public ServiceRequest(String clusterName, String serviceName,
-       Long desiredRepositoryVersionId, String desiredState, String credentialStoreEnabled) {
 -                        String desiredState,
 -                        String credentialStoreEnabled) {
++  public ServiceRequest(String clusterName, String serviceName, String desiredStack,
++      String desiredRepositoryVersion, String desiredState, String credentialStoreEnabled) {
      this.clusterName = clusterName;
      this.serviceName = serviceName;
      this.desiredState = desiredState;
 +
-     this.desiredRepositoryVersionId = desiredRepositoryVersionId;
++    this.desiredStack = desiredStack;
++    this.desiredRepositoryVersion = desiredRepositoryVersion;
 +
      this.credentialStoreEnabled = credentialStoreEnabled;
      // Credential store supported cannot be changed after
      // creation since it comes from the stack definition.
@@@ -81,10 -73,6 +83,14 @@@
      this.desiredState = desiredState;
    }
  
-   public Long getDesiredRepositoryVersionId() {
-     return desiredRepositoryVersionId;
++  public String getDesiredStack() {
++    return desiredStack;
++  }
++
++  public String getDesiredRepositoryVersion() {
++    return desiredRepositoryVersion;
 +  }
 +
    /**
     * @return the clusterName
     */
@@@ -142,25 -130,13 +148,25 @@@
      this.credentialStoreSupported = credentialStoreSupported;
    }
  
 +  @Override
    public String toString() {
      StringBuilder sb = new StringBuilder();
 -    sb.append("clusterName=" + clusterName
 -        + ", serviceName=" + serviceName
 -        + ", desiredState=" + desiredState
 -        + ", credentialStoreEnabled=" + credentialStoreEnabled
 -        + ", credentialStoreSupported=" + credentialStoreSupported);
 +    sb.append("clusterName=").append(clusterName)
 +      .append(", serviceName=").append(serviceName)
 +      .append(", desiredState=").append(desiredState)
 +      .append(", credentialStoreEnabled=").append(credentialStoreEnabled)
 +      .append(", credentialStoreSupported=").append(credentialStoreSupported);
      return sb.toString();
    }
 -}
 +
 +  /**
 +   * @param repositoryVersion
 +   */
 +  public void setResolvedRepository(RepositoryVersionEntity repositoryVersion) {
 +    resolvedRepository = repositoryVersion;
 +  }
 +
 +  public RepositoryVersionEntity getResolvedRepository() {
 +    return resolvedRepository;
 +  }
- }
++}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
index eba1816,b26814a..a9d234d
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
@@@ -27,6 -27,6 +27,7 @@@ import org.apache.ambari.server.control
  import org.apache.ambari.server.controller.spi.Resource;
  import org.apache.ambari.server.controller.spi.ResourceProvider;
  import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
++import org.apache.ambari.server.controller.utilities.PropertyHelper;
  import org.apache.ambari.server.state.Cluster;
  
  /**
@@@ -56,6 -56,6 +57,28 @@@ public abstract class AbstractControlle
      super(propertyIds, keyPropertyIds);
      this.managementController = managementController;
    }
++  
++  /**
++   * Create a new resource provider for the given management controller. This
++   * constructor will initialize the specified {@link Resource.Type} with the
++   * provided keys. It should be used in cases where the provider declares its
++   * own keys instead of reading them from a JSON file.
++   *
++   * @param type
++   *          the type to set the properties for (not {@code null}).
++   * @param propertyIds
++   *          the property ids
++   * @param keyPropertyIds
++   *          the key property ids
++   * @param managementController
++   *          the management controller
++   */
++  AbstractControllerResourceProvider(Resource.Type type, Set<String> propertyIds,
++      Map<Resource.Type, String> keyPropertyIds, AmbariManagementController managementController) {
++    this(propertyIds, keyPropertyIds, managementController);
++    PropertyHelper.setPropertyIds(type, propertyIds);
++    PropertyHelper.setKeyPropertyIds(type, keyPropertyIds);
++  }
  
    public static void init(ResourceProviderFactory factory) {
      resourceProviderFactory = factory;

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index 93c02be,633fe8c..d87d7a4
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@@ -600,16 -710,13 +600,18 @@@ public class ClusterStackVersionResourc
      RequestResourceFilter filter = new RequestResourceFilter(null, null,
              Collections.singletonList(host.getHostName()));
  
 -    ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
 -        INSTALL_PACKAGES_ACTION, Collections.singletonList(filter), params);
 +    ActionExecutionContext actionContext = new ActionExecutionContext(
 +            cluster.getClusterName(), INSTALL_PACKAGES_ACTION,
 +            Collections.singletonList(filter),
 +            roleParams);
+ 
+     actionContext.setRepositoryVersion(repoVersion);
      actionContext.setTimeout(Short.valueOf(configuration.getDefaultAgentTaskTimeout(true)));
  
 +    repoVersionHelper.addCommandRepository(actionContext, osFamily, repoVersion, repoInfo);
 +
      return actionContext;
 +
    }
  
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index 387e0dc,0cc1745..dcaaad9
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@@ -74,6 -72,6 +74,7 @@@ import org.apache.ambari.server.state.S
  import org.apache.ambari.server.state.ServiceInfo;
  import org.apache.ambari.server.state.StackId;
  import org.apache.ambari.server.state.State;
++import org.apache.commons.collections.CollectionUtils;
  import org.apache.commons.lang.StringUtils;
  import org.apache.commons.lang.Validate;
  
@@@ -85,76 -83,37 +86,76 @@@ import com.google.inject.assistedinject
   * Resource provider for service resources.
   */
  public class ServiceResourceProvider extends AbstractControllerResourceProvider {
 +  public static final String SERVICE_CLUSTER_NAME_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "cluster_name");
  
 +  public static final String SERVICE_SERVICE_NAME_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "service_name");
  
 -  // ----- Property ID constants ---------------------------------------------
 +  public static final String SERVICE_SERVICE_STATE_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "state");
  
 -  // Services
 -  public static final String SERVICE_CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceInfo", "cluster_name");
 -  public static final String SERVICE_SERVICE_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceInfo", "service_name");
 -  public static final String SERVICE_SERVICE_STATE_PROPERTY_ID   = PropertyHelper.getPropertyId("ServiceInfo", "state");
 -  public static final String SERVICE_MAINTENANCE_STATE_PROPERTY_ID = PropertyHelper.getPropertyId("ServiceInfo", "maintenance_state");
 -  public static final String SERVICE_CREDENTIAL_STORE_SUPPORTED_PROPERTY_ID =
 -    PropertyHelper.getPropertyId("ServiceInfo", "credential_store_supported");
 -  public static final String SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID =
 -    PropertyHelper.getPropertyId("ServiceInfo", "credential_store_enabled");
 +  public static final String SERVICE_MAINTENANCE_STATE_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "maintenance_state");
  
 -  public static final String SERVICE_ATTRIBUTES_PROPERTY_ID = PropertyHelper.getPropertyId("Services", "attributes");
 +  public static final String SERVICE_CREDENTIAL_STORE_SUPPORTED_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "credential_store_supported");
  
 -  //Parameters from the predicate
 -  private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID =
 -    "params/run_smoke_test";
 +  public static final String SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "credential_store_enabled");
 +
 +  public static final String SERVICE_ATTRIBUTES_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "Services", "attributes");
 +
 +  public static final String SERVICE_DESIRED_STACK_PROPERTY_ID = PropertyHelper.getPropertyId(
 +      "ServiceInfo", "desired_stack");
  
-   public static final String SERVICE_DESIRED_REPO_VERSION_ID_PROPERTY_ID = PropertyHelper.getPropertyId(
-       "ServiceInfo", "desired_repository_version_id");
 -  private static final String QUERY_PARAMETERS_RECONFIGURE_CLIENT =
 -    "params/reconfigure_client";
++  public static final String SERVICE_DESIRED_REPO_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId(
++      "ServiceInfo", "desired_repository_version");
  
 -  private static final String QUERY_PARAMETERS_START_DEPENDENCIES =
 -    "params/start_dependencies";
 +  protected static final String SERVICE_REPOSITORY_STATE = "ServiceInfo/repository_state";
 +
 +  //Parameters from the predicate
 +  private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID = "params/run_smoke_test";
 +  private static final String QUERY_PARAMETERS_RECONFIGURE_CLIENT = "params/reconfigure_client";
 +  private static final String QUERY_PARAMETERS_START_DEPENDENCIES = "params/start_dependencies";
  
    private static Set<String> pkPropertyIds =
 -      new HashSet<String>(Arrays.asList(new String[]{
 -          SERVICE_CLUSTER_NAME_PROPERTY_ID,
 -          SERVICE_SERVICE_NAME_PROPERTY_ID}));
 +    new HashSet<>(Arrays.asList(new String[]{
 +      SERVICE_CLUSTER_NAME_PROPERTY_ID,
 +      SERVICE_SERVICE_NAME_PROPERTY_ID}));
  
 +  /**
 +   * The property ids for an service resource.
 +   */
 +  private static final Set<String> PROPERTY_IDS = new HashSet<>();
 +
 +  /**
 +   * The key property ids for an service resource.
 +   */
 +  private static final Map<Resource.Type, String> KEY_PROPERTY_IDS = new HashMap<>();
 +
 +  static {
 +    // properties
 +    PROPERTY_IDS.add(SERVICE_CLUSTER_NAME_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_SERVICE_NAME_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_SERVICE_STATE_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_MAINTENANCE_STATE_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_CREDENTIAL_STORE_SUPPORTED_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_ATTRIBUTES_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_DESIRED_STACK_PROPERTY_ID);
-     PROPERTY_IDS.add(SERVICE_DESIRED_REPO_VERSION_ID_PROPERTY_ID);
++    PROPERTY_IDS.add(SERVICE_DESIRED_REPO_VERSION_PROPERTY_ID);
 +    PROPERTY_IDS.add(SERVICE_REPOSITORY_STATE);
 +
 +    PROPERTY_IDS.add(QUERY_PARAMETERS_RUN_SMOKE_TEST_ID);
 +    PROPERTY_IDS.add(QUERY_PARAMETERS_RECONFIGURE_CLIENT);
 +    PROPERTY_IDS.add(QUERY_PARAMETERS_START_DEPENDENCIES);
 +
 +    // keys
 +    KEY_PROPERTY_IDS.put(Resource.Type.Service, SERVICE_SERVICE_NAME_PROPERTY_ID);
 +    KEY_PROPERTY_IDS.put(Resource.Type.Cluster, SERVICE_CLUSTER_NAME_PROPERTY_ID);
 +  }
  
    private MaintenanceStateHelper maintenanceStateHelper;
  
@@@ -177,12 -131,14 +178,12 @@@
     * @param managementController  the management controller
     */
    @AssistedInject
 -  public ServiceResourceProvider(@Assisted Set<String> propertyIds,
 -                          @Assisted Map<Resource.Type, String> keyPropertyIds,
 -                          @Assisted AmbariManagementController managementController,
 -                          MaintenanceStateHelper maintenanceStateHelper) {
 -    super(propertyIds, keyPropertyIds, managementController);
 +  public ServiceResourceProvider(
 +      @Assisted AmbariManagementController managementController,
 +      MaintenanceStateHelper maintenanceStateHelper, RepositoryVersionDAO repositoryVersionDAO) {
-     super(PROPERTY_IDS, KEY_PROPERTY_IDS, managementController);
++    super(Resource.Type.Service, PROPERTY_IDS, KEY_PROPERTY_IDS, managementController);
      this.maintenanceStateHelper = maintenanceStateHelper;
 +    this.repositoryVersionDAO = repositoryVersionDAO;
  
      setRequiredCreateAuthorizations(EnumSet.of(RoleAuthorization.SERVICE_ADD_DELETE_SERVICES));
      setRequiredUpdateAuthorizations(RoleAuthorization.AUTHORIZATIONS_UPDATE_SERVICE);
@@@ -251,19 -207,6 +252,15 @@@
        setResourceProperty(resource, SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID,
            String.valueOf(response.isCredentialStoreEnabled()), requestedIds);
  
-       RepositoryVersionEntity repoVersion = repositoryVersionDAO.findByPK(response.getDesiredRepositoryVersionId());
++      setResourceProperty(resource, SERVICE_DESIRED_STACK_PROPERTY_ID,
++          response.getDesiredStackId(), requestedIds);
 +
-       // !!! TODO is the UI using this?
-       if (null != repoVersion) {
-         setResourceProperty(resource, SERVICE_DESIRED_STACK_PROPERTY_ID, repoVersion.getStackId(), requestedIds);
-       }
- 
-       setResourceProperty(resource, SERVICE_DESIRED_REPO_VERSION_ID_PROPERTY_ID,
-           response.getDesiredRepositoryVersionId(), requestedIds);
++      setResourceProperty(resource, SERVICE_DESIRED_REPO_VERSION_PROPERTY_ID,
++          response.getDesiredRepositoryVersion(), requestedIds);
 +
 +      setResourceProperty(resource, SERVICE_REPOSITORY_STATE,
 +          response.getRepositoryVersionState(), requestedIds);
 +
        Map<String, Object> serviceSpecificProperties = getServiceSpecificProperties(
            response.getClusterName(), response.getServiceName(), requestedIds);
  
@@@ -385,13 -328,9 +382,13 @@@
     * @return the service request object
     */
    private ServiceRequest getRequest(Map<String, Object> properties) {
- 
-     String desiredRepoId = (String) properties.get(SERVICE_DESIRED_REPO_VERSION_ID_PROPERTY_ID);
++    String desiredStack = (String)properties.get(SERVICE_DESIRED_STACK_PROPERTY_ID);
++    String desiredRepositoryVersion = (String)properties.get(SERVICE_DESIRED_REPO_VERSION_PROPERTY_ID);
 +
      ServiceRequest svcRequest = new ServiceRequest(
          (String) properties.get(SERVICE_CLUSTER_NAME_PROPERTY_ID),
          (String) properties.get(SERVICE_SERVICE_NAME_PROPERTY_ID),
-         null == desiredRepoId ? null : Long.valueOf(desiredRepoId),
++        desiredStack, desiredRepositoryVersion,
          (String) properties.get(SERVICE_SERVICE_STATE_PROPERTY_ID),
          (String) properties.get(SERVICE_CREDENTIAL_STORE_ENABLED_PROPERTY_ID));
  
@@@ -423,15 -362,10 +420,19 @@@
      for (ServiceRequest request : requests) {
        Cluster cluster = clusters.getCluster(request.getClusterName());
  
 -      // Already checked that service does not exist
 -      Service s = cluster.addService(request.getServiceName());
++      String desiredStack = request.getDesiredStack();
++
 +      RepositoryVersionEntity repositoryVersion = request.getResolvedRepository();
 +
 +      if (null == repositoryVersion) {
-         throw new AmbariException("Could not find any repository on the request.");
++        throw new AmbariException(String.format("Could not find any repositories defined by %s", desiredStack));
++      } else {
++        desiredStack = repositoryVersion.getStackId().toString();
 +      }
  
 -      /**
 +      Service s = cluster.addService(request.getServiceName(), repositoryVersion);
 +
 +      /*
         * Get the credential_store_supported field only from the stack definition.
         * Not possible to update the value through a request.
         */
@@@ -1065,21 -1023,7 +1066,36 @@@
          // Expected
        }
  
-       Long desiredRepositoryVersion = request.getDesiredRepositoryVersionId();
-       if (null == desiredRepositoryVersion) {
-         throw new IllegalArgumentException(String.format("%s is required when adding a service.", SERVICE_DESIRED_REPO_VERSION_ID_PROPERTY_ID));
-       }
 -      StackId stackId = cluster.getDesiredStackVersion();
++      String desiredStack = request.getDesiredStack();
++      StackId stackId = new StackId(desiredStack);
 +
-       RepositoryVersionEntity repositoryVersion = repositoryVersionDAO.findByPK(desiredRepositoryVersion);
++      String desiredRepositoryVersion = request.getDesiredRepositoryVersion();
++      RepositoryVersionEntity repositoryVersion = null;
++      if (StringUtils.isNotBlank(desiredRepositoryVersion)){
++        repositoryVersion = repositoryVersionDAO.findByVersion(desiredRepositoryVersion);
++      }
 +
 +      if (null == repositoryVersion) {
-         throw new IllegalArgumentException(String.format("Could not find any repositories defined by %d", desiredRepositoryVersion));
++        // !!! FIXME hack until the UI always sends the repository
++        if (null == desiredStack) {
++          desiredStack = cluster.getDesiredStackVersion().toString();
++        }
++
++        List<RepositoryVersionEntity> allVersions = repositoryVersionDAO.findByStack(new StackId(desiredStack));
++
++        if (CollectionUtils.isNotEmpty(allVersions)) {
++          repositoryVersion = allVersions.get(0);
++        }
 +      }
 +
-       StackId stackId = repositoryVersion.getStackId();
++      if (null == repositoryVersion) {
++        throw new AmbariException(String.format("Could not find any repositories defined by %s", desiredStack));
++      } else {
++        stackId = repositoryVersion.getStackId();
++      }
 +
 +      request.setResolvedRepository(repositoryVersion);
 +
        if (!ambariMetaInfo.isValidService(stackId.getStackName(),
                stackId.getStackVersion(), request.getServiceName())) {
          throw new IllegalArgumentException("Unsupported or invalid service in stack, clusterName=" + clusterName
@@@ -1114,4 -1058,4 +1130,4 @@@
      }
  
    }
--}
++}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
index 455d569,4ded10e..79aef9a
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PropertyHelper.java
@@@ -115,6 -118,6 +115,10 @@@ public class PropertyHelper 
      return propertyIds == null ? Collections.<String>emptySet() : propertyIds;
    }
  
++  public static void setPropertyIds(Resource.Type resourceType, Set<String> propertyIds) {
++    PROPERTY_IDS.put(resourceType.getInternalType(), propertyIds);
++  }
++
    /**
     * Extract the set of property ids from a component PropertyInfo map.
     *
@@@ -146,6 -149,6 +150,10 @@@
    public static Map<Resource.Type, String> getKeyPropertyIds(Resource.Type resourceType) {
      return KEY_PROPERTY_IDS.get(resourceType.getInternalType());
    }
++  
++  public static void setKeyPropertyIds(Resource.Type resourceType, Map<Resource.Type, String> keyPropertyKeys) {
++    KEY_PROPERTY_IDS.put(resourceType.getInternalType(), keyPropertyKeys);
++  }
  
    /**
     * Helper to get a property name from a string.

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index d2c0ea2,93e6393..0ab2263
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@@ -854,350 -560,4 +854,349 @@@ public class UpgradeContext 
      parameters.put(KeyNames.REFRESH_CONFIG_TAGS_BEFORE_EXECUTION, "true");
      return parameters;
    }
 -}
 +
 +  /**
 +   * {@inheritDoc}
 +   */
 +  @Override
 +  public String toString() {
 +    return Objects.toStringHelper(this)
 +        .add("direction", m_direction)
 +        .add("type", m_type)
 +        .add("target", m_repositoryVersion).toString();
 +  }
 +
 +  /**
 +   * Gets whether a downgrade is allowed for this upgrade. If the direction is
 +   * {@link Direction#DOWNGRADE}, then this method always returns false.
 +   * Otherwise it will consule {@link UpgradePack#isDowngradeAllowed()}.
 +   *
 +   * @return {@code true} of a downgrade is allowed for this upgrade,
 +   *         {@code false} otherwise.
 +   */
 +  public boolean isDowngradeAllowed() {
 +    if (m_direction == Direction.DOWNGRADE) {
 +      return false;
 +    }
 +
 +    return m_upgradePack.isDowngradeAllowed();
 +  }
 +
 +  /**
 +   * @return
 +   */
 +  public boolean isPatchRevert() {
 +    return m_isRevert;
 +  }
 +
 +  /**
 +   * Builds a chain of {@link UpgradeRequestValidator}s to ensure that the
 +   * incoming request to create a new upgrade is valid.
 +   *
 +   * @param upgradeType
 +   *          the type of upgrade to build the validator for.
 +   * @return the validator which can check to ensure that the properties are
 +   *         valid.
 +   */
 +  private UpgradeRequestValidator buildValidator(UpgradeType upgradeType){
 +    UpgradeRequestValidator validator = new BasicUpgradePropertiesValidator();
 +    UpgradeRequestValidator preReqValidator = new PreReqCheckValidator();
 +    validator.setNextValidator(preReqValidator);
 +
 +    final UpgradeRequestValidator upgradeTypeValidator;
 +    switch (upgradeType) {
 +      case HOST_ORDERED:
 +        upgradeTypeValidator = new HostOrderedUpgradeValidator();
 +        break;
 +      case NON_ROLLING:
 +      case ROLLING:
 +      default:
 +        upgradeTypeValidator = null;
 +        break;
 +    }
 +
 +    preReqValidator.setNextValidator(upgradeTypeValidator);
 +    return validator;
 +  }
 +
 +  /**
 +   * The {@link UpgradeRequestValidator} contains the logic to check for correct
 +   * upgrade request properties and then pass the responsibility onto the next
 +   * validator in the chain.
 +   */
 +  private abstract class UpgradeRequestValidator {
 +    /**
 +     * The next validator.
 +     */
 +    UpgradeRequestValidator m_nextValidator;
 +
 +    /**
 +     * Sets the next validator in the chain.
 +     *
 +     * @param nextValidator
 +     *          the next validator to run, or {@code null} for none.
 +     */
 +    void setNextValidator(UpgradeRequestValidator nextValidator) {
 +      m_nextValidator = nextValidator;
 +    }
 +
 +    /**
 +     * Validates the upgrade request from this point in the chain.
 +     *
 +     * @param cluster
 +     * @param direction
 +     * @param type
 +     * @param upgradePack
 +     * @param requestMap
 +     * @throws AmbariException
 +     */
 +    final void validate(Cluster cluster, Direction direction, UpgradeType type,
 +        UpgradePack upgradePack, Map<String, Object> requestMap) throws AmbariException {
 +
 +      // run this instance's check
 +      check(cluster, direction, type, upgradePack, requestMap);
 +
 +      // pass along to the next
 +      if (null != m_nextValidator) {
 +        m_nextValidator.validate(cluster, direction, type, upgradePack, requestMap);
 +      }
 +    }
 +
 +    /**
 +     * Checks to ensure that upgrade request is valid given the specific
 +     * arguments.
 +     *
 +     * @param cluster
 +     * @param direction
 +     * @param type
 +     * @param upgradePack
 +     * @param requestMap
 +     * @throws AmbariException
 +     */
 +    abstract void check(Cluster cluster, Direction direction, UpgradeType type,
 +        UpgradePack upgradePack, Map<String, Object> requestMap) throws AmbariException;
 +  }
 +
 +  /**
 +   * The {@link BasicUpgradePropertiesValidator} ensures that the basic required
 +   * properties are present on the upgrade request.
 +   */
 +  private final class BasicUpgradePropertiesValidator extends UpgradeRequestValidator {
 +
 +    /**
 +     * {@inheritDoc}
 +     */
 +    @Override
 +    public void check(Cluster cluster, Direction direction, UpgradeType type,
 +        UpgradePack upgradePack, Map<String, Object> requestMap) throws AmbariException {
 +
 +      if (direction == Direction.UPGRADE) {
 +        String repositoryVersionId = (String) requestMap.get(UPGRADE_REPO_VERSION_ID);
 +        if (StringUtils.isBlank(repositoryVersionId)) {
 +          throw new AmbariException(
 +              String.format("%s is required for upgrades", UPGRADE_REPO_VERSION_ID));
 +        }
 +      }
 +    }
 +  }
 +
 +  /**
 +   * The {@link PreReqCheckValidator} ensures that the upgrade pre-requisite
 +   * checks have passed.
 +   */
 +  private final class PreReqCheckValidator extends UpgradeRequestValidator {
 +    /**
 +     * {@inheritDoc}
 +     */
 +    @Override
 +    void check(Cluster cluster, Direction direction, UpgradeType type, UpgradePack upgradePack,
 +        Map<String, Object> requestMap) throws AmbariException {
 +
 +      String repositoryVersionId = (String) requestMap.get(UPGRADE_REPO_VERSION_ID);
 +      boolean skipPrereqChecks = Boolean.parseBoolean((String) requestMap.get(UPGRADE_SKIP_PREREQUISITE_CHECKS));
 +      boolean failOnCheckWarnings = Boolean.parseBoolean((String) requestMap.get(UPGRADE_FAIL_ON_CHECK_WARNINGS));
 +      String preferredUpgradePack = requestMap.containsKey(UPGRADE_PACK) ? (String) requestMap.get(UPGRADE_PACK) : null;
 +
 +      // verify that there is not an upgrade or downgrade that is in progress or suspended
 +      UpgradeEntity existingUpgrade = cluster.getUpgradeInProgress();
 +      if (null != existingUpgrade) {
 +        throw new AmbariException(
 +            String.format("Unable to perform %s as another %s (request ID %s) is in progress.",
 +                direction.getText(false), existingUpgrade.getDirection().getText(false),
 +                existingUpgrade.getRequestId()));
 +      }
 +
 +      // skip this check if it's a downgrade or we are instructed to skip it
 +      if (direction.isDowngrade() || skipPrereqChecks) {
 +        return;
 +      }
 +
 +      RepositoryVersionEntity repositoryVersion = m_repoVersionDAO.findByPK(
 +          Long.valueOf(repositoryVersionId));
 +
 +      // Validate pre-req checks pass
 +      PreUpgradeCheckResourceProvider provider = (PreUpgradeCheckResourceProvider) AbstractControllerResourceProvider.getResourceProvider(
 +          Resource.Type.PreUpgradeCheck);
 +
 +      Predicate preUpgradeCheckPredicate = new PredicateBuilder().property(
 +          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_CLUSTER_NAME_PROPERTY_ID).equals(cluster.getClusterName()).and().property(
 +          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_REPOSITORY_VERSION_PROPERTY_ID).equals(repositoryVersion.getVersion()).and().property(
 +          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_FOR_REVERT_PROPERTY_ID).equals(m_isRevert).and().property(
 +          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_TYPE_PROPERTY_ID).equals(type).and().property(
 +          PreUpgradeCheckResourceProvider.UPGRADE_CHECK_UPGRADE_PACK_PROPERTY_ID).equals(preferredUpgradePack).toPredicate();
 +
 +      Request preUpgradeCheckRequest = PropertyHelper.getReadRequest();
 +
 +      Set<Resource> preUpgradeCheckResources;
 +      try {
 +        preUpgradeCheckResources = provider.getResources(
 +            preUpgradeCheckRequest, preUpgradeCheckPredicate);
 +      } catch (NoSuchResourceException|SystemException|UnsupportedPropertyException|NoSuchParentResourceException e) {
 +        throw new AmbariException(
 +            String.format("Unable to perform %s. Prerequisite checks could not be run",
 +                direction.getText(false), e));
 +      }
 +
 +      List<Resource> failedResources = new LinkedList<>();
 +      if (preUpgradeCheckResources != null) {
 +        for (Resource res : preUpgradeCheckResources) {
 +          PrereqCheckStatus prereqCheckStatus = (PrereqCheckStatus) res.getPropertyValue(
 +              PreUpgradeCheckResourceProvider.UPGRADE_CHECK_STATUS_PROPERTY_ID);
 +
 +          if (prereqCheckStatus == PrereqCheckStatus.FAIL
 +              || (failOnCheckWarnings && prereqCheckStatus == PrereqCheckStatus.WARNING)) {
 +            failedResources.add(res);
 +          }
 +        }
 +      }
 +
 +      if (!failedResources.isEmpty()) {
 +        throw new AmbariException(
 +            String.format("Unable to perform %s. Prerequisite checks failed %s",
 +                direction.getText(false), m_gson.toJson(failedResources)));
 +      }
 +    }
 +  }
 +
 +  /**
 +   * Ensures that for {@link UpgradeType#HOST_ORDERED}, the properties supplied
 +   * are valid.
 +   */
 +  @SuppressWarnings("unchecked")
 +  private final class HostOrderedUpgradeValidator extends UpgradeRequestValidator {
 +
 +    /**
 +     * {@inheritDoc}
 +     */
 +    @Override
 +    void check(Cluster cluster, Direction direction, UpgradeType type, UpgradePack upgradePack,
 +        Map<String, Object> requestMap) throws AmbariException {
 +
 +      String skipFailuresRequestProperty = (String) requestMap.get(UPGRADE_SKIP_FAILURES);
 +      if (Boolean.parseBoolean(skipFailuresRequestProperty)) {
 +        throw new AmbariException(
 +            String.format("The %s property is not valid when creating a %s upgrade.",
 +                UPGRADE_SKIP_FAILURES, UpgradeType.HOST_ORDERED));
 +      }
 +
 +      String skipManualVerification = (String) requestMap.get(UPGRADE_SKIP_MANUAL_VERIFICATION);
 +      if (Boolean.parseBoolean(skipManualVerification)) {
 +        throw new AmbariException(
 +            String.format("The %s property is not valid when creating a %s upgrade.",
 +                UPGRADE_SKIP_MANUAL_VERIFICATION, UpgradeType.HOST_ORDERED));
 +      }
 +
 +      if (!requestMap.containsKey(UPGRADE_HOST_ORDERED_HOSTS)) {
 +        throw new AmbariException(
 +            String.format("The %s property is required when creating a %s upgrade.",
 +                UPGRADE_HOST_ORDERED_HOSTS, UpgradeType.HOST_ORDERED));
 +      }
 +
 +      List<HostOrderItem> hostOrderItems = extractHostOrderItemsFromRequest(requestMap);
 +      List<String> hostsFromRequest = new ArrayList<>(hostOrderItems.size());
 +      for (HostOrderItem hostOrderItem : hostOrderItems) {
 +        if (hostOrderItem.getType() == HostOrderActionType.HOST_UPGRADE) {
 +          hostsFromRequest.addAll(hostOrderItem.getActionItems());
 +        }
 +      }
 +
 +      // ensure that all hosts for this cluster are accounted for
 +      Collection<Host> hosts = cluster.getHosts();
 +      Set<String> clusterHostNames = new HashSet<>(hosts.size());
 +      for (Host host : hosts) {
 +        clusterHostNames.add(host.getHostName());
 +      }
 +
 +      Collection<String> disjunction = CollectionUtils.disjunction(hostsFromRequest,
 +          clusterHostNames);
 +
 +      if (CollectionUtils.isNotEmpty(disjunction)) {
 +        throw new AmbariException(String.format(
 +            "The supplied list of hosts must match the cluster hosts in an upgrade of type %s. The following hosts are either missing or invalid: %s",
 +            UpgradeType.HOST_ORDERED, StringUtils.join(disjunction, ", ")));
 +      }
 +
 +      // verify that the upgradepack has the required grouping and set the
 +      // action items on it
 +      HostOrderGrouping hostOrderGrouping = null;
 +      List<Grouping> groupings = upgradePack.getGroups(direction);
 +      for (Grouping grouping : groupings) {
 +        if (grouping instanceof HostOrderGrouping) {
 +          hostOrderGrouping = (HostOrderGrouping) grouping;
 +          hostOrderGrouping.setHostOrderItems(hostOrderItems);
 +        }
 +      }
 +    }
 +
 +    /**
 +     * Builds the list of {@link HostOrderItem}s from the upgrade request. If
 +     * the upgrade request does not contain the hosts
 +     *
 +     * @param requestMap
 +     *          the map of properties from the request (not {@code null}).
 +     * @return the ordered list of actions to orchestrate for the
 +     *         {@link UpgradeType#HOST_ORDERED} upgrade.
 +     * @throws AmbariException
 +     *           if the request properties are not valid.
 +     */
 +    private List<HostOrderItem> extractHostOrderItemsFromRequest(Map<String, Object> requestMap)
 +        throws AmbariException {
 +      // ewwww
 +      Set<Map<String, List<String>>> hostsOrder = (Set<Map<String, List<String>>>) requestMap.get(
 +          UPGRADE_HOST_ORDERED_HOSTS);
 +
 +      if (CollectionUtils.isEmpty(hostsOrder)) {
 +        throw new AmbariException(
 +            String.format("The %s property must be specified when using a %s upgrade type.",
 +                UPGRADE_HOST_ORDERED_HOSTS, UpgradeType.HOST_ORDERED));
 +      }
 +
 +      List<HostOrderItem> hostOrderItems = new ArrayList<>();
 +
 +      // extract all of the hosts so that we can ensure they are all accounted
 +      // for
 +      Iterator<Map<String, List<String>>> iterator = hostsOrder.iterator();
 +      while (iterator.hasNext()) {
 +        Map<String, List<String>> grouping = iterator.next();
 +        List<String> hosts = grouping.get("hosts");
 +        List<String> serviceChecks = grouping.get("service_checks");
 +
 +        if (CollectionUtils.isEmpty(hosts) && CollectionUtils.isEmpty(serviceChecks)) {
 +          throw new AmbariException(String.format(
 +              "The %s property must contain at least one object with either a %s or %s key",
 +              UPGRADE_HOST_ORDERED_HOSTS, "hosts", "service_checks"));
 +        }
 +
 +        if (CollectionUtils.isNotEmpty(hosts)) {
 +          hostOrderItems.add(new HostOrderItem(HostOrderActionType.HOST_UPGRADE, hosts));
 +        }
 +
 +        if (CollectionUtils.isNotEmpty(serviceChecks)) {
 +          hostOrderItems.add(new HostOrderItem(HostOrderActionType.SERVICE_CHECK, serviceChecks));
 +        }
 +      }
 +
 +      return hostOrderItems;
 +    }
 +  }
- 
- }
++}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index ebd3468,ebd3468..345f598
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@@ -74,6 -74,6 +74,7 @@@ import org.apache.ambari.server.state.C
  import org.apache.ambari.server.state.DesiredConfig;
  import org.apache.ambari.server.state.Host;
  import org.apache.ambari.server.state.SecurityType;
++import org.apache.ambari.server.state.StackId;
  import org.apache.ambari.server.state.configgroup.ConfigGroup;
  import org.apache.ambari.server.utils.RetryHelper;
  import org.slf4j.Logger;
@@@ -187,9 -187,9 +188,10 @@@ public class AmbariContext 
  
    public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType, String repoVersion) {
      Stack stack = topology.getBlueprint().getStack();
++    StackId stackId = new StackId(stack.getName(), stack.getVersion());
  
      createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType, repoVersion);
--    createAmbariServiceAndComponentResources(topology, clusterName);
++    createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion);
    }
  
    public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType, String repoVersion) {
@@@ -216,7 -216,7 +218,8 @@@
      }
    }
  
--  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName) {
++  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
++      StackId stackId, String repositoryVersion) {
      Collection<String> services = topology.getBlueprint().getServices();
  
      try {
@@@ -229,7 -229,7 +232,9 @@@
      Set<ServiceComponentRequest> componentRequests = new HashSet<>();
      for (String service : services) {
        String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
--      serviceRequests.add(new ServiceRequest(clusterName, service, null, credentialStoreEnabled));
++      serviceRequests.add(new ServiceRequest(clusterName, service, stackId.getStackId(),
++          repositoryVersion, null, credentialStoreEnabled));
++
        for (String component : topology.getBlueprint().getComponents(service)) {
          String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component);
          componentRequests.add(new ServiceComponentRequest(clusterName, service, component, null, recoveryEnabled));
@@@ -250,7 -250,7 +255,7 @@@
      startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
      startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
      Predicate predicate = new EqualsPredicate<>(
--        ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
++      ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
      try {
        getServiceResourceProvider().updateResources(
            new RequestImpl(null, Collections.singleton(installProps), null, null), predicate);
@@@ -284,7 -284,7 +289,7 @@@
  
      Map<String, Object> properties = new HashMap<>();
      properties.put(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
--    properties.put(HostResourceProvider.HOST_NAME_PROPERTY_ID, hostName);
++    properties.put(HostResourceProvider.HOST_HOST_NAME_PROPERTY_ID, hostName);
      properties.put(HostResourceProvider.HOST_RACK_INFO_PROPERTY_ID, host.getRackInfo());
  
      try {
@@@ -740,4 -740,4 +745,4 @@@
      }
      return componentResourceProvider;
    }
--}
++}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
index 6d2ab84,ea1b034..0fcf779
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog252.java
@@@ -24,12 -28,15 +28,17 @@@ import java.util.Set
  
  import org.apache.ambari.server.AmbariException;
  import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+ import org.apache.ambari.server.orm.dao.ClusterDAO;
 -import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
++import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
+ import org.apache.ambari.server.orm.entities.ClusterEntity;
  import org.apache.ambari.server.state.Cluster;
  import org.apache.ambari.server.state.Clusters;
  import org.apache.ambari.server.state.Config;
  import org.apache.ambari.server.state.ConfigHelper;
  import org.apache.ambari.server.state.PropertyInfo;
  import org.apache.commons.lang.StringUtils;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
  
  import com.google.common.collect.Sets;
  import com.google.inject.Inject;
@@@ -54,6 -63,13 +63,13 @@@ public class UpgradeCatalog252 extends 
  
    private static final String CLUSTER_ENV = "cluster-env";
  
+   private static final List<String> configTypesToEnsureSelected = Arrays.asList("spark2-javaopts-properties");
 -  
++
+   /**
+    * Logger.
+    */
+   private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog252.class);
+ 
    /**
     * Constructor.
     *
@@@ -196,4 -214,86 +214,86 @@@
        updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, newStackProperties, true, false);
      }
    }
+ 
+   /**
+    * When doing a cross-stack upgrade, we found that one config type (spark2-javaopts-properties)
+    * did not have any mappings that were selected, so it caused Ambari Server start to fail on the DB Consistency Checker.
+    * To fix this, iterate over all config types and ensure that at least one is selected.
+    * If none are selected, then pick the one with the greatest time stamp; this should be safe since we are only adding
+    * more data to use as opposed to removing.
+    */
+   private void ensureConfigTypesHaveAtLeastOneVersionSelected() {
+     ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+     List<ClusterEntity> clusters = clusterDAO.findAll();
+ 
+     if (null == clusters) {
+       return;
+     }
+ 
+     for (ClusterEntity clusterEntity : clusters) {
+       LOG.info("Ensuring all config types have at least one selected config for cluster {}", clusterEntity.getClusterName());
+ 
+       boolean atLeastOneChanged = false;
 -      Collection<ClusterConfigMappingEntity> configMappingEntities = clusterEntity.getConfigMappingEntities();
++      Collection<ClusterConfigEntity> configEntities = clusterEntity.getClusterConfigEntities();
+ 
 -      if (configMappingEntities != null) {
++      if (configEntities != null) {
+         Set<String> configTypesNotSelected = new HashSet<>();
+         Set<String> configTypesWithAtLeastOneSelected = new HashSet<>();
+ 
 -        for (ClusterConfigMappingEntity clusterConfigMappingEntity : configMappingEntities) {
 -          String typeName = clusterConfigMappingEntity.getType();
++        for (ClusterConfigEntity clusterConfigEntity : configEntities) {
++          String typeName = clusterConfigEntity.getType();
+ 
 -          if (clusterConfigMappingEntity.isSelected() == 1) {
++          if (clusterConfigEntity.isSelected()) {
+             configTypesWithAtLeastOneSelected.add(typeName);
+           } else {
+             configTypesNotSelected.add(typeName);
+           }
+         }
+ 
+         // Due to the ordering, eliminate any configs with at least one selected.
+         configTypesNotSelected.removeAll(configTypesWithAtLeastOneSelected);
+         if (!configTypesNotSelected.isEmpty()) {
 -          LOG.info("The following config types have config mappings which don't have at least one as selected. {}", StringUtils.join(configTypesNotSelected, ", "));
++          LOG.info("The following config types have entries which are not enabled: {}", StringUtils.join(configTypesNotSelected, ", "));
+ 
+           LOG.info("Filtering only config types these config types: {}", StringUtils.join(configTypesToEnsureSelected, ", "));
+           // Get the intersection with a subset of configs that are allowed to be selected during the migration.
+           configTypesNotSelected.retainAll(configTypesToEnsureSelected);
+         }
+ 
+         if (!configTypesNotSelected.isEmpty()) {
 -          LOG.info("The following config types have config mappings which don't have at least one as selected. {}", StringUtils.join(configTypesNotSelected, ", "));
++          LOG.info("The following config types have entries which don't have at least one as selected. {}", StringUtils.join(configTypesNotSelected, ", "));
+ 
+           for (String typeName : configTypesNotSelected) {
 -            ClusterConfigMappingEntity clusterConfigMappingWithGreatestTimeStamp = null;
++            ClusterConfigEntity clusterConfigMappingWithGreatestTimeStamp = null;
+ 
 -            for (ClusterConfigMappingEntity clusterConfigMappingEntity : configMappingEntities) {
 -              if (typeName.equals(clusterConfigMappingEntity.getType())) {
++            for (ClusterConfigEntity clusterConfigEntity : configEntities) {
++              if (typeName.equals(clusterConfigEntity.getType())) {
+ 
+                 if (null == clusterConfigMappingWithGreatestTimeStamp) {
 -                  clusterConfigMappingWithGreatestTimeStamp = clusterConfigMappingEntity;
++                  clusterConfigMappingWithGreatestTimeStamp = clusterConfigEntity;
+                 } else {
 -                  if (clusterConfigMappingEntity.getCreateTimestamp() >= clusterConfigMappingWithGreatestTimeStamp.getCreateTimestamp()) {
 -                    clusterConfigMappingWithGreatestTimeStamp = clusterConfigMappingEntity;
++                  if (clusterConfigEntity.getTimestamp() >= clusterConfigMappingWithGreatestTimeStamp.getTimestamp()) {
++                    clusterConfigMappingWithGreatestTimeStamp = clusterConfigEntity;
+                   }
+                 }
+               }
+             }
+ 
+             if (null != clusterConfigMappingWithGreatestTimeStamp) {
+               LOG.info("Saving. Config type {} has a mapping with tag {} and greatest timestamp {} that is not selected, so will mark it selected.",
 -                  typeName, clusterConfigMappingWithGreatestTimeStamp.getTag(), clusterConfigMappingWithGreatestTimeStamp.getCreateTimestamp());
++                  typeName, clusterConfigMappingWithGreatestTimeStamp.getTag(), clusterConfigMappingWithGreatestTimeStamp.getTimestamp());
+               atLeastOneChanged = true;
 -              clusterConfigMappingWithGreatestTimeStamp.setSelected(1);
++              clusterConfigMappingWithGreatestTimeStamp.setSelected(true);
+             }
+           }
+         } else {
+           LOG.info("All config types have at least one mapping that is selected. Nothing to do.");
+         }
+       }
+ 
+       if (atLeastOneChanged) {
 -        clusterDAO.mergeConfigMappings(configMappingEntities);
++        clusterDAO.merge(clusterEntity);
+       }
+     }
+   }
  }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5cdcd070/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
index 81fa8e1,81fa8e1..0000000
deleted file mode 100644,100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ClusterVersionDAOTest.java
+++ /dev/null
@@@ -1,264 -1,264 +1,0 @@@
--/**
-- * Licensed to the Apache Software Foundation (ASF) under one
-- * or more contributor license agreements.  See the NOTICE file
-- * distributed with this work for additional information
-- * regarding copyright ownership.  The ASF licenses this file
-- * to you under the Apache License, Version 2.0 (the
-- * "License"); you may not use this file except in compliance
-- * with the License.  You may obtain a copy of the License at
-- *
-- *     http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing, software
-- * distributed under the License is distributed on an "AS IS" BASIS,
-- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- * See the License for the specific language governing permissions and
-- * limitations under the License.
-- */
--
--package org.apache.ambari.server.orm.dao;
--
--import java.sql.SQLException;
--
--import org.apache.ambari.server.AmbariException;
--import org.apache.ambari.server.H2DatabaseCleaner;
--import org.apache.ambari.server.orm.GuiceJpaInitializer;
--import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
--import org.apache.ambari.server.orm.OrmTestHelper;
--import org.apache.ambari.server.orm.entities.ClusterEntity;
--import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
--import org.apache.ambari.server.state.RepositoryVersionState;
--import org.apache.ambari.server.state.StackId;
--import org.junit.After;
--import org.junit.Assert;
--import org.junit.Before;
--import org.junit.Test;
--
--import com.google.inject.Guice;
--import com.google.inject.Injector;
--
--/**
-- * ClusterVersionDAO unit tests.
-- */
--public class ClusterVersionDAOTest {
--
--  private static Injector injector;
--  private ClusterVersionDAO clusterVersionDAO;
--  private ClusterDAO clusterDAO;
--  private OrmTestHelper helper;
--
--  private long clusterId;
--  ClusterEntity cluster;
--  private int lastStep = -1;
--
--  ClusterVersionEntity cvA;
--  long cvAId = 0L;
--
--  ClusterVersionEntity cvB;
--  long cvBId = 0L;
--
--  ClusterVersionEntity cvC;
--  long cvCId = 0L;
--
--  private final static StackId HDP_22_STACK = new StackId("HDP", "2.2.0");
--  private final static StackId BAD_STACK = new StackId("BADSTACK", "1.0");
--
--  @Before
--  public void before() {
--    injector = Guice.createInjector(new InMemoryDefaultTestModule());
--    injector.getInstance(GuiceJpaInitializer.class);
--
--    clusterVersionDAO = injector.getInstance(ClusterVersionDAO.class);
--    clusterDAO = injector.getInstance(ClusterDAO.class);
--    helper = injector.getInstance(OrmTestHelper.class);
--  }
--
--  /**
--   * Helper function to transition the cluster through several cluster versions.
--   * @param currStep Step to go to is a value from 1 - 7, inclusive.
--   */
--  private void createRecordsUntilStep(int currStep) throws Exception {
--    // Fresh install on A
--    if (currStep >= 1 && lastStep <= 0) {
--      clusterId = helper.createCluster();
--      cluster = clusterDAO.findById(clusterId);
--
--      cvA = new ClusterVersionEntity(cluster, helper.getOrCreateRepositoryVersion(HDP_22_STACK, "2.2.0.0-995"), RepositoryVersionState.CURRENT, System.currentTimeMillis(), System.currentTimeMillis(), "admin");
--      clusterVersionDAO.create(cvA);
--      cvAId = cvA.getId();
--    } else {
--      cluster = clusterDAO.findById(clusterId);
--      cvA = clusterVersionDAO.findByPK(cvAId);
--    }
--
--    // Install B
--    if (currStep >= 2) {
--      if (lastStep <= 1) {
--        cvB = new ClusterVersionEntity(cluster, helper.getOrCreateRepositoryVersion(HDP_22_STACK, "2.2.0.1-998"), RepositoryVersionState.INSTALLED, System.currentTimeMillis(), System.currentTimeMillis(), "admin");
--        clusterVersionDAO.create(cvB);
--        cvBId = cvB.getId();
--      } else {
--        cvB = clusterVersionDAO.findByPK(cvBId);
--      }
--    }
--
--    // Switch from A to B
--    if (currStep >= 3 && lastStep <= 2) {
--      cvA.setState(RepositoryVersionState.INSTALLED);
--      cvB.setState(RepositoryVersionState.CURRENT);
--      clusterVersionDAO.merge(cvA);
--      clusterVersionDAO.merge(cvB);
--    }
--
--    // Start upgrading C
--    if (currStep >= 4) {
--      if (lastStep <= 3) {
--        cvC = new ClusterVersionEntity(cluster, helper.getOrCreateRepositoryVersion(HDP_22_STACK, "2.2.0.0-100"), RepositoryVersionState.INSTALLING, System.currentTimeMillis(), "admin");
--        clusterVersionDAO.create(cvC);
--        cvCId = cvC.getId();
--      } else {
--        cvC = clusterVersionDAO.findByPK(cvCId);
--      }
--    }
--
--    // Fail upgrade for C
--    if (currStep >= 5 && lastStep <= 4) {
--        cvC.setState(RepositoryVersionState.INSTALL_FAILED);
--        cvC.setEndTime(System.currentTimeMillis());
--        clusterVersionDAO.merge(cvC);
--    }
--
--    // Retry upgrade on C
--    if (currStep >= 6 && lastStep <= 5) {
--        cvC.setState(RepositoryVersionState.INSTALLING);
--        cvC.setEndTime(0L);
--        clusterVersionDAO.merge(cvC);
--    }
--
--    // Finalize upgrade on C to make it the current cluster version
--    if (currStep >= 7 && lastStep <= 6) {
--        cvC.setState(RepositoryVersionState.CURRENT);
--        cvC.setEndTime(System.currentTimeMillis());
--        clusterVersionDAO.merge(cvC);
--
--        cvA.setState(RepositoryVersionState.INSTALLED);
--        cvB.setState(RepositoryVersionState.INSTALLED);
--        clusterVersionDAO.merge(cvA);
--        clusterVersionDAO.merge(cvB);
--    }
--
--    lastStep = currStep;
--  }
--
--  @Test
--  public void testFindByStackAndVersion() throws Exception {
--    createRecordsUntilStep(1);
--    Assert.assertEquals(
--        0,
--        clusterVersionDAO.findByStackAndVersion("non existing", "non existing",
--            "non existing").size());
--
--    Assert.assertEquals(
--        1,
--        clusterVersionDAO.findByStackAndVersion(HDP_22_STACK.getStackName(),
--            HDP_22_STACK.getStackVersion(), "2.2.0.0-995").size());
--  }
--
--  @Test
--  public void testFindByCluster() throws Exception {
--    createRecordsUntilStep(1);
--    Assert.assertEquals(0, clusterVersionDAO.findByCluster("non existing").size());
--    Assert.assertEquals(1, clusterVersionDAO.findByCluster(cluster.getClusterName()).size());
--  }
--
--  @Test
--  public void testFindByClusterAndStackAndVersion() throws Exception {
--    createRecordsUntilStep(1);
--    Assert.assertNull(clusterVersionDAO.findByClusterAndStackAndVersion(
--        cluster.getClusterName(), BAD_STACK, "non existing"));
--
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStackAndVersion(
--        cluster.getClusterName(), HDP_22_STACK, "2.2.0.0-995"));
--  }
--
--  /**
--   * At all times the cluster should have a cluster version whose state is {@link org.apache.ambari.server.state.RepositoryVersionState#CURRENT}
--   */
--  @Test
--  public void testFindByClusterAndStateCurrent() throws Exception {
--    createRecordsUntilStep(1);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(2);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(3);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(4);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(5);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(6);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--
--    createRecordsUntilStep(7);
--    Assert.assertNotNull(clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName()));
--  }
--
--  /**
--   * Test the state of certain cluster versions.
--   */
--  @Test
--  public void testFindByClusterAndState() throws Exception {
--    createRecordsUntilStep(1);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(2);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(3);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(4);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(5);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(6);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--
--    createRecordsUntilStep(7);
--    Assert.assertEquals(1, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).size());
--    Assert.assertEquals(2, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLED).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALLING).size());
--    Assert.assertEquals(0, clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.INSTALL_FAILED).size());
--  }
--
--  @After
--  public void after() throws AmbariException, SQLException {
--    H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);
--    injector = null;
--  }
--}