You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ad...@apache.org on 2018/05/18 12:17:47 UTC

[ambari] branch branch-feature-AMBARI-14714 updated: AMBARI-23874. Move user-related info to stack-level params (#1307)

This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch branch-feature-AMBARI-14714
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-feature-AMBARI-14714 by this push:
     new ddf2ee3  AMBARI-23874. Move user-related info to stack-level params (#1307)
ddf2ee3 is described below

commit ddf2ee3bf25139c98b90cfebb3c9c1be77c7917f
Author: Doroszlai, Attila <64...@users.noreply.github.com>
AuthorDate: Fri May 18 14:17:43 2018 +0200

    AMBARI-23874. Move user-related info to stack-level params (#1307)
---
 .../execution_command/execution_command.py         |  12 +-
 .../functions/get_not_managed_resources.py         |   6 +-
 .../libraries/functions/stack_features.py          |  12 +-
 .../libraries/functions/version_select_util.py     |   2 +-
 .../resource_management/libraries/script/script.py |  32 +-
 .../ambari/server/agent/ExecutionCommand.java      |   3 +
 .../ambari/server/agent/stomp/MetadataHolder.java  |  12 +-
 .../ambari/server/configuration/AmbariConfig.java  | 130 +++++++
 .../controller/AmbariManagementController.java     |   3 -
 .../controller/AmbariManagementControllerImpl.java | 382 ++-----------------
 .../server/metadata/ClusterMetadataGenerator.java  | 286 ++++++++++++++
 .../serveraction/upgrades/ConfigureAction.java     |   6 +-
 .../apache/ambari/server/state/ConfigHelper.java   |  18 +-
 .../apache/ambari/server/state/UpgradeHelper.java  |   6 +-
 .../ambari/server/state/cluster/ClustersImpl.java  |   6 +-
 .../server/upgrade/AbstractUpgradeCatalog.java     |   4 +-
 .../stack-hooks/before-ANY/scripts/params.py       |  28 +-
 .../before-ANY/scripts/shared_initialization.py    |  77 ----
 .../stack-hooks/before-INSTALL/scripts/params.py   |  16 +-
 .../before-START/scripts/custom_extensions.py      |  31 +-
 .../stack-hooks/before-START/scripts/params.py     |  16 +-
 .../server/configuration/AmbariConfigTest.java     |  45 +++
 .../AmbariCustomCommandExecutionHelperTest.java    | 190 ++++++++--
 .../AmbariManagementControllerImplTest.java        | 416 +++++----------------
 .../controller/AmbariManagementControllerTest.java |  18 +-
 .../internal/ComponentResourceProviderTest.java    |  19 +-
 .../server/upgrade/UpgradeCatalog252Test.java      |   8 +-
 .../server/upgrade/UpgradeCatalog260Test.java      |  13 +-
 .../server/upgrade/UpgradeCatalog270Test.java      |   9 +-
 29 files changed, 840 insertions(+), 966 deletions(-)

diff --git a/ambari-common/src/main/python/resource_management/libraries/execution_command/execution_command.py b/ambari-common/src/main/python/resource_management/libraries/execution_command/execution_command.py
index 311211d..506deef 100644
--- a/ambari-common/src/main/python/resource_management/libraries/execution_command/execution_command.py
+++ b/ambari-common/src/main/python/resource_management/libraries/execution_command/execution_command.py
@@ -20,8 +20,6 @@ limitations under the License.
 
 __all__ = ["ExecutionCommand"]
 
-import ambari_simplejson
-
 from resource_management.libraries.execution_command import module_configs
 
 
@@ -265,28 +263,28 @@ class ExecutionCommand(object):
     Retrieve mpack version from command.json, i.e "stack_version": "1.0.0-b224"
     :return: mpack version string
     """
-    return self.__get_value("clusterLevelParams/stack_version")
+    return self.__get_value("stackSettings/stack_version")
 
   def get_user_groups(self):
     """
     Retrieve ambari server user groups, i.e "user_groups": "{\"zookeeper\":[\"hadoop\"],\"ambari-qa\":[\"hadoop\"]}"
     :return: a user group dict object
     """
-    return self.__get_value("clusterLevelParams/user_groups")
+    return self.__get_value("stackSettings/user_groups")
 
   def get_group_list(self):
     """
     Retrieve a list of user groups from command.json, i.e "group_list": "[\"hadoop\"]"
     :return: a list of groups
     """
-    return self.__get_value("clusterLevelParams/group_list")
+    return self.__get_value("stackSettings/group_list")
 
   def get_user_list(self):
     """
     Retrieve a list of users from command.json, i.e "user_list": "[\"zookeeper\",\"ambari-qa\"]"
     :return: a list of users
     """
-    return self.__get_value("clusterLevelParams/user_list")
+    return self.__get_value("stackSettings/user_list")
 
   """
   Agent related variable section
@@ -433,4 +431,4 @@ class ExecutionCommand(object):
     return self.__get_value('clusterHostInfo/all_racks', [])
 
   def get_all_ipv4_ips(self):
-    return self.__get_value('clusterHostInfo/all_ipv4_ips', [])
\ No newline at end of file
+    return self.__get_value('clusterHostInfo/all_ipv4_ips', [])
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py
index 6181d5f..611e1d2 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_not_managed_resources.py
@@ -30,11 +30,11 @@ from resource_management.libraries.functions.cluster_settings import get_cluster
 def get_not_managed_resources():
   """
   Returns a list of not managed hdfs paths.
-  The result contains all paths from clusterLevelParams/not_managed_hdfs_path_list
+  The result contains all paths from stackSettings/not_managed_hdfs_path_list
   except config values from cluster-env/managed_hdfs_resource_property_names
   """
   config = Script.get_config()
-  not_managed_hdfs_path_list = json.loads(config['clusterLevelParams']['not_managed_hdfs_path_list'])[:]
+  not_managed_hdfs_path_list = json.loads(config['stackSettings']['not_managed_hdfs_path_list'])[:]
   if get_cluster_setting_value('managed_hdfs_resource_property_names') is not None:
     managed_hdfs_resource_property_names = get_cluster_setting_value('managed_hdfs_resource_property_names')
     managed_hdfs_resource_property_list = filter(None, [property.strip() for property in managed_hdfs_resource_property_names.split(',')])
@@ -49,4 +49,4 @@ def get_not_managed_resources():
         while property_value in not_managed_hdfs_path_list:
           not_managed_hdfs_path_list.remove(property_value)
 
-  return not_managed_hdfs_path_list
\ No newline at end of file
+  return not_managed_hdfs_path_list
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
index d1d6451..fe93ff3 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py
@@ -39,7 +39,7 @@ def check_stack_feature(stack_feature, stack_version):
   IMPORTANT, notice that the mapping of feature to version comes from cluster-env if it exists there.
   :param stack_feature: Feature name to check if it is supported by the stack. For example: "rolling_upgrade"
   :param stack_version: Version of the stack
-  :return: Will return True if successful, otherwise, False. 
+  :return: Will return True if successful, otherwise, False.
   """
 
   from resource_management.libraries.functions.default import default
@@ -84,7 +84,7 @@ def check_stack_feature(stack_feature, stack_version):
         return True
   else:
     Logger.warning("Stack features not defined by stack")
-        
+
   return False
 
 
@@ -104,11 +104,11 @@ def get_stack_feature_version(config):
   """
   from resource_management.libraries.functions.default import default
 
-  if "clusterLevelParams" not in config or "commandParams" not in config:
-    raise Fail("Unable to determine the correct version since clusterLevelParams and commandParams were not present in the configuration dictionary")
+  if "stackSettings" not in config or "commandParams" not in config:
+    raise Fail("Unable to determine the correct version since stackSettings and commandParams were not present in the configuration dictionary")
 
   # should always be there
-  stack_version = config['clusterLevelParams']['stack_version']
+  stack_version = config['stackSettings']['stack_version']
 
   # something like 2.4.0.0-1234; represents the version for the command
   # (or None if this is a cluster install and it hasn't been calculated yet)
@@ -177,4 +177,4 @@ def _is_stop_command(config):
   if role_command == _ROLE_COMMAND_CUSTOM and custom_command == _ROLE_COMMAND_STOP:
     return True
 
-  return False
\ No newline at end of file
+  return False
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
index 5203407..538e427 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
@@ -34,7 +34,7 @@ def get_component_version_from_symlink(stack_name, component_name):
   version which is referenced by the symlink.
 
   :param stack_name: one of HDP, HDPWIN, BIGTOP, PHD, etc. usually retrieved from
-  the command-#.json file's ["clusterLevelParams"]["stack_name"]
+  the command-#.json file's ["stackSettings"]["stack_name"]
   :param component_name: Component name as a string necessary to get the version
   :return: Returns a string if found, e.g., 2.2.1.0-2175, otherwise, returns None
   """
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 3fd1f09..234d94a 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -66,14 +66,6 @@ from resource_management.libraries.execution_command.module_configs import Modul
 
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 
-if OSCheck.is_windows_family():
-  from resource_management.libraries.functions.install_windows_msi import install_windows_msi
-  from resource_management.libraries.functions.reload_windows_env import reload_windows_env
-  from resource_management.libraries.functions.zip_archive import archive_dir
-  from resource_management.libraries.resources import Msi
-else:
-  from resource_management.libraries.functions.tar_archive import archive_dir
-
 USAGE = """Usage: {0} <COMMAND> <JSON_CONFIG> <BASEDIR> <STROUTPUT> <LOGGING_LEVEL> <TMP_DIR> [PROTOCOL]
 
 <COMMAND> command type (INSTALL/CONFIGURE/START/STOP/SERVICE_CHECK...)
@@ -251,7 +243,7 @@ class Script(object):
     :return: True or False
     """
     from resource_management.libraries.functions.default import default
-    stack_version_unformatted = str(default("/clusterLevelParams/stack_version", ""))
+    stack_version_unformatted = self.execution_command.get_mpack_version()
     stack_version_formatted = format_stack_version(stack_version_unformatted)
     if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
       if command_name.lower() == "status":
@@ -458,11 +450,11 @@ class Script(object):
 
   def get_stack_version_before_packages_installed(self):
     """
-    This works in a lazy way (calculates the version first time and stores it). 
+    This works in a lazy way (calculates the version first time and stores it).
     If you need to recalculate the version explicitly set:
-    
+
     Script.stack_version_from_distro_select = None
-    
+
     before the call. However takes a bit of time, so better to avoid.
 
     :return: stack version including the build number. e.g.: 2.3.4.0-1234.
@@ -691,10 +683,10 @@ class Script(object):
     :return: a normalized stack version or None
     """
     config = Script.get_config()
-    if 'clusterLevelParams' not in config or 'stack_version' not in config['clusterLevelParams']:
+    if 'stackSettings' not in config or 'stack_version' not in config['stackSettings']:
       return None
 
-    stack_version_unformatted = str(config['clusterLevelParams']['stack_version'])
+    stack_version_unformatted = str(config['stackSettings']['stack_version'])
 
     if stack_version_unformatted is None or stack_version_unformatted == '':
       return None
@@ -806,7 +798,7 @@ class Script(object):
     List of packages that are required< by service is received from the server
     as a command parameter. The method installs all packages
     from this list
-    
+
     exclude_packages - list of regexes (possibly raw strings as well), the
     packages which match the regex won't be installed.
     NOTE: regexes don't have Python syntax, but simple package regexes which support only * and .* and ?
@@ -841,14 +833,6 @@ class Script(object):
     except KeyError:
       traceback.print_exc()
 
-    if OSCheck.is_windows_family():
-      #TODO hacky install of windows msi, remove it or move to old(2.1) stack definition when component based install will be implemented
-      hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
-      install_windows_msi(config['ambariLevelParams']['jdk_location'],
-                          config["agentLevelParams"]["agentCacheDir"], ["hdp-2.3.0.0.winpkg.msi", "hdp-2.3.0.0.cab", "hdp-2.3.0.0-01.cab"],
-                          hadoop_user, self.get_password(hadoop_user),
-                          str(config['clusterLevelParams']['stack_version']))
-      reload_windows_env()
 
   def check_package_condition(self, package):
     condition = package['condition']
@@ -872,7 +856,7 @@ class Script(object):
   @staticmethod
   def matches_any_regexp(string, regexp_list):
     for regex in regexp_list:
-      # we cannot use here Python regex, since * will create some troubles matching plaintext names. 
+      # we cannot use here Python regex, since * will create some troubles matching plaintext names.
       package_regex = '^' + re.escape(regex).replace('\\.\\*','.*').replace("\\?", ".").replace("\\*", ".*") + '$'
       if re.match(package_regex, string):
         return True
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index f1df6b5..f04d14e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -538,6 +538,9 @@ public class ExecutionCommand extends AgentCommand {
     String AMBARI_JDK_NAME = "ambari_jdk_name";
     String AMBARI_JCE_NAME = "ambari_jce_name";
     String AMBARI_JAVA_VERSION = "ambari_java_version";
+    String AMBARI_SERVER_HOST = "ambari_server_host";
+    String AMBARI_SERVER_PORT = "ambari_server_port";
+    String AMBARI_SERVER_USE_SSL = "ambari_server_use_ssl";
     String JAVA_VERSION = "java_version";
     String JDK_NAME = "jdk_name";
     String JCE_NAME = "jce_name";
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/stomp/MetadataHolder.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/stomp/MetadataHolder.java
index 11aa512..7cd6399 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/stomp/MetadataHolder.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/stomp/MetadataHolder.java
@@ -21,12 +21,12 @@ import java.util.Map;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.agent.stomp.dto.MetadataCluster;
-import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
 import org.apache.ambari.server.events.ClusterComponentsRepoChangedEvent;
 import org.apache.ambari.server.events.ClusterConfigChangedEvent;
 import org.apache.ambari.server.events.MetadataUpdateEvent;
 import org.apache.ambari.server.events.ServiceInstalledEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.commons.collections.MapUtils;
@@ -40,7 +40,7 @@ import com.google.inject.Singleton;
 public class MetadataHolder extends AgentClusterDataHolder<MetadataUpdateEvent> {
 
   @Inject
-  private AmbariManagementControllerImpl ambariManagementController;
+  private ClusterMetadataGenerator metadataGenerator;
 
   @Inject
   private Provider<Clusters> m_clusters;
@@ -52,7 +52,7 @@ public class MetadataHolder extends AgentClusterDataHolder<MetadataUpdateEvent>
 
   @Override
   public MetadataUpdateEvent getCurrentData() throws AmbariException {
-    return ambariManagementController.getClustersMetadata();
+    return metadataGenerator.getClustersMetadata(m_clusters.get());
   }
 
   @Override
@@ -85,19 +85,19 @@ public class MetadataHolder extends AgentClusterDataHolder<MetadataUpdateEvent>
   @Subscribe
   public void onConfigsChange(ClusterConfigChangedEvent configChangedEvent) throws AmbariException {
     Cluster cluster = m_clusters.get().getCluster(configChangedEvent.getClusterName());
-    updateData(ambariManagementController.getClusterMetadataOnConfigsUpdate(cluster));
+    updateData(metadataGenerator.getClusterMetadataOnConfigsUpdate(cluster));
 
   }
 
   @Subscribe
   public void onServiceCreate(ServiceInstalledEvent serviceInstalledEvent) throws AmbariException {
     Cluster cluster = m_clusters.get().getCluster(serviceInstalledEvent.getClusterId());
-    updateData(ambariManagementController.getClusterMetadataOnServiceInstall(cluster, serviceInstalledEvent.getServiceName()));
+    updateData(metadataGenerator.getClusterMetadataOnServiceInstall(cluster, serviceInstalledEvent.getServiceName()));
   }
 
   @Subscribe
   public void onClusterComponentsRepoUpdate(ClusterComponentsRepoChangedEvent clusterComponentsRepoChangedEvent) throws AmbariException {
     Cluster cluster = m_clusters.get().getCluster(clusterComponentsRepoChangedEvent.getClusterId());
-    updateData(ambariManagementController.getClusterMetadataOnRepoUpdate(cluster));
+    updateData(metadataGenerator.getClusterMetadataOnRepoUpdate(cluster));
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/AmbariConfig.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/AmbariConfig.java
new file mode 100644
index 0000000..74bd2e9
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/AmbariConfig.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.configuration;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+import org.apache.http.client.utils.URIBuilder;
+
+public class AmbariConfig {
+
+  private static final String JDK_RESOURCE_LOCATION = "/resources/";
+
+  private final String masterHostname;
+  private final Integer masterPort;
+  private final String masterProtocol;
+
+  private final String jdkResourceUrl;
+  private final String javaHome;
+  private final String jdkName;
+  private final String jceName;
+  private final String ojdbcUrl;
+  private final String serverDB;
+  private final String mysqljdbcUrl;
+
+  public AmbariConfig(Configuration configs) throws UnknownHostException {
+    this(configs, InetAddress.getLocalHost().getCanonicalHostName());
+  }
+
+  AmbariConfig(Configuration configs, String masterHostname) {
+    this.masterHostname = masterHostname;
+    if (configs != null) {
+      if (configs.getApiSSLAuthentication()) {
+        masterProtocol = "https";
+        masterPort = configs.getClientSSLApiPort();
+      } else {
+        masterProtocol = "http";
+        masterPort = configs.getClientApiPort();
+      }
+
+      javaHome = configs.getJavaHome();
+      jdkName = configs.getJDKName();
+      jceName = configs.getJCEName();
+      serverDB = configs.getServerDBName();
+
+      jdkResourceUrl = getAmbariServerURI(JDK_RESOURCE_LOCATION);
+      ojdbcUrl = getAmbariServerURI(JDK_RESOURCE_LOCATION + configs.getOjdbcJarName());
+      mysqljdbcUrl = getAmbariServerURI(JDK_RESOURCE_LOCATION + configs.getMySQLJarName());
+    } else {
+      masterProtocol = null;
+      masterPort = null;
+
+      jdkResourceUrl = null;
+      javaHome = null;
+      jdkName = null;
+      jceName = null;
+      ojdbcUrl = null;
+      mysqljdbcUrl = null;
+      serverDB = null;
+    }
+  }
+
+  public String getJdkResourceUrl() {
+    return jdkResourceUrl;
+  }
+
+  public String getJavaHome() {
+    return javaHome;
+  }
+
+  public String getJDKName() {
+    return jdkName;
+  }
+
+  public String getJCEName() {
+    return jceName;
+  }
+
+  public String getServerDB() {
+    return serverDB;
+  }
+
+  public String getOjdbcUrl() {
+    return ojdbcUrl;
+  }
+
+  public String getMysqljdbcUrl() {
+    return mysqljdbcUrl;
+  }
+
+  public String getAmbariServerURI(String path) {
+    return (masterProtocol == null || masterHostname == null || masterPort == null)
+      ? null
+      : getAmbariServerURI(path, masterProtocol, masterHostname, masterPort);
+  }
+
+  static String getAmbariServerURI(String path, String masterProtocol, String masterHostname, Integer masterPort) {
+    URIBuilder uriBuilder = new URIBuilder();
+    uriBuilder.setScheme(masterProtocol);
+    uriBuilder.setHost(masterHostname);
+    uriBuilder.setPort(masterPort);
+
+    String[] parts = path.split("\\?");
+
+    if (parts.length > 1) {
+      uriBuilder.setPath(parts[0]);
+      uriBuilder.setQuery(parts[1]);
+    } else {
+      uriBuilder.setPath(path);
+    }
+
+    return uriBuilder.toString();
+  }
+
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
index b7067db..4069e38 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
@@ -39,7 +39,6 @@ import org.apache.ambari.server.controller.metrics.MetricsCollectorHAManager;
 import org.apache.ambari.server.controller.metrics.timeline.cache.TimelineMetricCacheProvider;
 import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
 import org.apache.ambari.server.events.AmbariEvent;
-import org.apache.ambari.server.events.MetadataUpdateEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.metadata.RoleCommandOrder;
 import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
@@ -1011,7 +1010,5 @@ public interface AmbariManagementController {
 
   HostRepositories retrieveHostRepositories(Cluster cluster, Host host) throws AmbariException;
 
-  MetadataUpdateEvent getClusterMetadataOnConfigsUpdate(Cluster cluster) throws AmbariException;
-
 }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 202c474..24b95e6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -18,43 +18,23 @@
 
 package org.apache.ambari.server.controller;
 
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_COUNT;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_ON_UNAVAILABILITY;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CLIENTS_TO_UPDATE_CONFIGS;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_RETRY_ENABLED;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_TIMEOUT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CUSTOM_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_DRIVER_FILENAME;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GPL_LICENSE_ACCEPTED;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_PREPPED;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_HOME;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_VERSION;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MAX_DURATION_OF_RETRIES;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.UNLIMITED_KEY_JCE_REQUIRED;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_GROUPS;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_LIST;
 import static org.apache.ambari.server.controller.AmbariCustomCommandExecutionHelper.masterToSlaveMappingForDecom;
 
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
 import java.lang.reflect.Type;
-import java.net.InetAddress;
 import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -106,12 +86,11 @@ import org.apache.ambari.server.agent.stomp.HostLevelParamsHolder;
 import org.apache.ambari.server.agent.stomp.MetadataHolder;
 import org.apache.ambari.server.agent.stomp.TopologyHolder;
 import org.apache.ambari.server.agent.stomp.dto.HostRepositories;
-import org.apache.ambari.server.agent.stomp.dto.MetadataCluster;
-import org.apache.ambari.server.agent.stomp.dto.MetadataServiceInfo;
 import org.apache.ambari.server.agent.stomp.dto.TopologyCluster;
 import org.apache.ambari.server.agent.stomp.dto.TopologyComponent;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.api.services.LoggingService;
+import org.apache.ambari.server.configuration.AmbariConfig;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.configuration.Configuration.DatabaseType;
 import org.apache.ambari.server.controller.internal.DeleteHostComponentStatusMetaData;
@@ -129,10 +108,10 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
 import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.customactions.ActionDefinition;
-import org.apache.ambari.server.events.MetadataUpdateEvent;
 import org.apache.ambari.server.events.TopologyUpdateEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.metadata.RoleCommandOrder;
 import org.apache.ambari.server.metadata.RoleCommandOrderProvider;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
@@ -234,11 +213,9 @@ import org.apache.ambari.server.topology.TopologyDeleteFormer;
 import org.apache.ambari.server.utils.SecretReference;
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang.BooleanUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.math.NumberUtils;
-import org.apache.http.client.utils.URIBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -274,9 +251,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   private static final String CLUSTER_PHASE_PROPERTY = "phase";
   private static final String CLUSTER_PHASE_INITIAL_INSTALL = "INITIAL_INSTALL";
   private static final String CLUSTER_PHASE_INITIAL_START = "INITIAL_START";
-  private static final String AMBARI_SERVER_HOST = "ambari_server_host";
-  private static final String AMBARI_SERVER_PORT = "ambari_server_port";
-  private static final String AMBARI_SERVER_USE_SSL = "ambari_server_use_ssl";
 
   private static final String BASE_LOG_DIR = "/tmp/ambari";
 
@@ -292,6 +266,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   private final Injector injector;
 
   private final Gson gson;
+  private final AmbariConfig ambariConfig;
 
 
   @Inject
@@ -390,28 +365,12 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   @Inject
   private RepositoryVersionHelper repoVersionHelper;
 
+  private final ClusterMetadataGenerator metadataGenerator;
+
   /**
    * The KerberosHelper to help setup for enabling for disabling Kerberos
    */
-  private KerberosHelper kerberosHelper;
-
-  final private String masterHostname;
-  final private Integer masterPort;
-  final private String masterProtocol;
-
-  final private static String JDK_RESOURCE_LOCATION =
-      "/resources/";
-
-  final private static int REPO_URL_CONNECT_TIMEOUT = 3000;
-  final private static int REPO_URL_READ_TIMEOUT = 2000;
-
-  final private String jdkResourceUrl;
-  final private String javaHome;
-  final private String jdkName;
-  final private String jceName;
-  final private String ojdbcUrl;
-  final private String serverDB;
-  final private String mysqljdbcUrl;
+  private final KerberosHelper kerberosHelper;
 
   private boolean ldapSyncInProgress;
 
@@ -429,73 +388,27 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
   @Inject
   public AmbariManagementControllerImpl(ActionManager actionManager,
-      Clusters clusters, Injector injector) throws Exception {
+      Clusters clusters, ClusterMetadataGenerator metadataGenerator, Injector injector) throws Exception {
     this.clusters = clusters;
     this.actionManager = actionManager;
     this.injector = injector;
     injector.injectMembers(this);
     gson = injector.getInstance(Gson.class);
     LOG.info("Initializing the AmbariManagementControllerImpl");
-    masterHostname =  InetAddress.getLocalHost().getCanonicalHostName();
     maintenanceStateHelper = injector.getInstance(MaintenanceStateHelper.class);
     kerberosHelper = injector.getInstance(KerberosHelper.class);
     m_metadataHolder = injector.getProvider(MetadataHolder.class);
     m_agentConfigsHolder = injector.getProvider(AgentConfigsHolder.class);
     hostComponentStateDAO = injector.getInstance(HostComponentStateDAO.class);
     serviceComponentDesiredStateDAO = injector.getInstance(ServiceComponentDesiredStateDAO.class);
-    if(configs != null)
-    {
-      if (configs.getApiSSLAuthentication()) {
-        masterProtocol = "https";
-        masterPort = configs.getClientSSLApiPort();
-      } else {
-        masterProtocol = "http";
-        masterPort = configs.getClientApiPort();
-      }
-      jdkResourceUrl = getAmbariServerURI(JDK_RESOURCE_LOCATION);
-      javaHome = configs.getJavaHome();
-      jdkName = configs.getJDKName();
-      jceName = configs.getJCEName();
-      ojdbcUrl = getAmbariServerURI(JDK_RESOURCE_LOCATION + "/" + configs.getOjdbcJarName());
-      mysqljdbcUrl = getAmbariServerURI(JDK_RESOURCE_LOCATION + "/" + configs.getMySQLJarName());
-
-      serverDB = configs.getServerDBName();
-    } else {
-      masterProtocol = null;
-      masterPort = null;
-
-      jdkResourceUrl = null;
-      javaHome = null;
-      jdkName = null;
-      jceName = null;
-      ojdbcUrl = null;
-      mysqljdbcUrl = null;
-      serverDB = null;
-    }
+    this.metadataGenerator = metadataGenerator;
+    ambariConfig = metadataGenerator != null ? metadataGenerator.getAmbariConfig() : new AmbariConfig(configs);
     helper = new AmbariManagementHelper(stackDAO, extensionDAO, linkDAO);
   }
 
   @Override
   public String getAmbariServerURI(String path) {
-    if(masterProtocol==null || masterHostname==null || masterPort==null) {
-      return null;
-    }
-
-    URIBuilder uriBuilder = new URIBuilder();
-    uriBuilder.setScheme(masterProtocol);
-    uriBuilder.setHost(masterHostname);
-    uriBuilder.setPort(masterPort);
-
-    String[] parts = path.split("\\?");
-
-    if (parts.length > 1) {
-      uriBuilder.setPath(parts[0]);
-      uriBuilder.setQuery(parts[1]);
-    } else {
-      uriBuilder.setPath(path);
-    }
-
-    return uriBuilder.toString();
+    return ambariConfig.getAmbariServerURI(path);
   }
 
   @Override
@@ -1410,33 +1323,11 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       }
     }
 
-    if (request.getComponentName() != null) {
-      if (StringUtils.isBlank(request.getServiceName())) {
-
-        // !!! FIXME the assumption that a component is unique across all stacks is a ticking
-        // time bomb.  Blueprints are making this assumption.
-        String serviceName = "";
-        try {
-          serviceName = findServiceName(cluster, request.getComponentName());
-        } catch (ServiceNotFoundException e) {
-          // handled below
-        }
-
-        if (StringUtils.isBlank(serviceName)) {
-          LOG.error("Unable to find service for componentName : {}", request.getComponentName());
-          throw new ServiceComponentHostNotFoundException(
-              cluster.getClusterName(), null, request.getComponentName(), request.getHostname());
-        }
-
-        request.setServiceName(serviceName);
-      }
-    }
-
     List<Service> services;
-    if (!Strings.isNullOrEmpty(request.getServiceGroupName())) {
-      services = ImmutableList.copyOf(cluster.getServicesByServiceGroup(request.getServiceGroupName()));
-    } else if (!Strings.isNullOrEmpty(request.getServiceName())) {
+    if (!Strings.isNullOrEmpty(request.getServiceName())) {
       services = ImmutableList.of(cluster.getService(request.getServiceName()));
+    } else if (!Strings.isNullOrEmpty(request.getServiceGroupName())) {
+      services = ImmutableList.copyOf(cluster.getServicesByServiceGroup(request.getServiceGroupName()));
     } else {
       services = ImmutableList.copyOf(cluster.getServices().values());
     }
@@ -1529,6 +1420,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
           try {
             if (serviceComponentHostMap == null
                 || !serviceComponentHostMap.containsKey(request.getHostname())) {
+              LOG.debug("Host {} not found in service component's map {}", request.getHostname(), serviceComponentHostMap);
               throw new ServiceComponentHostNotFoundException(cluster.getClusterName(),
                 s.getName(), sc.getName(), request.getHostname());
             }
@@ -2089,7 +1981,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         }
       }
     }
-    m_metadataHolder.get().updateData(getClusterMetadataOnConfigsUpdate(cluster));
+    m_metadataHolder.get().updateData(metadataGenerator.getClusterMetadataOnConfigsUpdate(cluster));
     m_agentConfigsHolder.get().updateData(cluster.getClusterId(), null);
 
     StackId currentVersion = cluster.getCurrentStackVersion();
@@ -2779,35 +2671,13 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       String packageList = gson.toJson(packages);
       commandParams.put(PACKAGE_LIST, packageList);
     }
-    Set<PropertyInfo> stackProperties = ambariMetaInfo.getStackProperties(stackInfo.getName(), stackInfo.getVersion());
-
-    Set<PropertyInfo> clusterProperties = ambariMetaInfo.getClusterProperties();
-
-    Set<String> userSet = configHelper.getPropertyValuesWithPropertyType(PropertyType.USER, cluster, clusterDesiredConfigs, servicesMap, stackProperties, clusterProperties);
-    String userList = gson.toJson(userSet);
-    hostParams.put(USER_LIST, userList);
-
-    //Create a user_group mapping and send it as part of the hostLevelParams
-    Map<String, Set<String>> userGroupsMap = configHelper.createUserGroupsMap(
-      cluster, clusterDesiredConfigs, servicesMap, stackProperties, clusterProperties);
-    String userGroups = gson.toJson(userGroupsMap);
-    hostParams.put(USER_GROUPS, userGroups);
-
 
     // Set exec command with 'ClusterSettings' map
     execCmd.setClusterSettings(cluster.getClusterSettingsNameValueMap());
 
     // Set exec command with 'StackSettings' map
-    execCmd.setStackSettings(ambariMetaInfo.getStackSettingsNameValueMap(stackId));
-
-    Set<String> groupSet = configHelper.getPropertyValuesWithPropertyType(PropertyType.GROUP, cluster, clusterDesiredConfigs, servicesMap, stackProperties, clusterProperties);
-    String groupList = gson.toJson(groupSet);
-    hostParams.put(GROUP_LIST, groupList);
-
-    Map<PropertyInfo, String> notManagedHdfsPathMap = configHelper.getPropertiesWithPropertyType(PropertyType.NOT_MANAGED_HDFS_PATH, cluster, clusterDesiredConfigs, servicesMap, stackProperties, clusterProperties);
-    Set<String> notManagedHdfsPathSet = configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST);
-    String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
-    hostParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
+    // TODO avoid sending in each command, send in "async" metadata
+    execCmd.setStackSettings(metadataGenerator.getMetadataStackLevelParams(cluster, stackId));
 
     if (databaseType == DatabaseType.ORACLE) {
       hostParams.put(DB_DRIVER_FILENAME, configs.getOjdbcJarName());
@@ -4958,37 +4828,37 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
   @Override
   public String getJdkResourceUrl() {
-    return jdkResourceUrl;
+    return ambariConfig.getJdkResourceUrl();
   }
 
   @Override
   public String getJavaHome() {
-    return javaHome;
+    return ambariConfig.getJavaHome();
   }
 
   @Override
   public String getJDKName() {
-    return jdkName;
+    return ambariConfig.getJDKName();
   }
 
   @Override
   public String getJCEName() {
-    return jceName;
+    return ambariConfig.getJCEName();
   }
 
   @Override
   public String getServerDB() {
-    return serverDB;
+    return ambariConfig.getServerDB();
   }
 
   @Override
   public String getOjdbcUrl() {
-    return ojdbcUrl;
+    return ambariConfig.getOjdbcUrl();
   }
 
   @Override
   public String getMysqljdbcUrl() {
-    return mysqljdbcUrl;
+    return ambariConfig.getMysqljdbcUrl();
   }
 
   @Override
@@ -5727,7 +5597,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         }
       }
 
-      m_metadataHolder.get().updateData(getClusterMetadataOnConfigsUpdate(cluster));
+      m_metadataHolder.get().updateData(metadataGenerator.getClusterMetadataOnConfigsUpdate(cluster));
       m_agentConfigsHolder.get().updateData(cluster.getClusterId(), null);
 
       if (request.getVersion() != null) {
@@ -5756,87 +5626,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     return serviceConfigVersionResponses;
   }
 
-  /**
-   * Collects metadata info about clusters for agent.
-   * @return metadata info about clusters
-   * @throws AmbariException
-   */
-  public MetadataUpdateEvent getClustersMetadata() throws AmbariException {
-    TreeMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
-
-    for (Cluster cl : clusters.getClusters().values()) {
-      StackId stackId = cl.getDesiredStackVersion();
-
-      SecurityType securityType = cl.getSecurityType();
-
-      MetadataCluster metadataCluster = new MetadataCluster(securityType,
-          getMetadataServiceLevelParams(cl),
-          getMetadataClusterLevelParams(cl, stackId));
-      metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
-    }
-
-    MetadataUpdateEvent metadataUpdateEvent = new MetadataUpdateEvent(metadataClusters,
-        getMetadataAmbariLevelParams());
-    return metadataUpdateEvent;
-  }
-
-  public MetadataUpdateEvent getClusterMetadata(Cluster cl) throws AmbariException {
-    TreeMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
-    StackId stackId = cl.getDesiredStackVersion();
-
-    SecurityType securityType = cl.getSecurityType();
-
-    MetadataCluster metadataCluster = new MetadataCluster(securityType,
-        getMetadataServiceLevelParams(cl),
-        getMetadataClusterLevelParams(cl, stackId));
-    metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
-
-    MetadataUpdateEvent metadataUpdateEvent = new MetadataUpdateEvent(metadataClusters,
-        null);
-    return metadataUpdateEvent;
-  }
-
-  @Override
-  public MetadataUpdateEvent getClusterMetadataOnConfigsUpdate(Cluster cl) throws AmbariException {
-    TreeMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
-    StackId stackId = cl.getDesiredStackVersion();
-
-    MetadataCluster metadataCluster = new MetadataCluster(null,
-        new TreeMap<>(),
-        getMetadataClusterLevelConfigsParams(cl, stackId));
-    metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
-
-    MetadataUpdateEvent metadataUpdateEvent = new MetadataUpdateEvent(metadataClusters,
-        null);
-    return metadataUpdateEvent;
-  }
-
-  public MetadataUpdateEvent getClusterMetadataOnRepoUpdate(Cluster cl) throws AmbariException {
-    TreeMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
-
-    MetadataCluster metadataCluster = new MetadataCluster(null,
-        getMetadataServiceLevelParams(cl),
-        new TreeMap<>());
-    metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
-
-    MetadataUpdateEvent metadataUpdateEvent = new MetadataUpdateEvent(metadataClusters,
-        null);
-    return metadataUpdateEvent;
-  }
-
-  public MetadataUpdateEvent getClusterMetadataOnServiceInstall(Cluster cl, String serviceName) throws AmbariException {
-    TreeMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
-
-    MetadataCluster metadataCluster = new MetadataCluster(null,
-        getMetadataServiceLevelParams(cl.getService(serviceName)),
-        new TreeMap<>());
-    metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
-
-    MetadataUpdateEvent metadataUpdateEvent = new MetadataUpdateEvent(metadataClusters,
-        null);
-    return metadataUpdateEvent;
-  }
-
   private String getClientsToUpdateConfigs(ComponentInfo componentInfo) {
     List<String> clientsToUpdateConfigsList = componentInfo.getClientsToUpdateConfigs();
     if (clientsToUpdateConfigsList == null) {
@@ -5919,127 +5708,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     return commandParams;
   }
 
-  public TreeMap<String, String> getMetadataClusterLevelParams(Cluster cluster, StackId stackId) throws AmbariException {
-    TreeMap<String, String> clusterLevelParams = new TreeMap<>();
-    clusterLevelParams.put(STACK_NAME, stackId.getStackName());
-    clusterLevelParams.put(STACK_VERSION, stackId.getStackVersion());
-
-    Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
-    if (MapUtils.isNotEmpty(desiredConfigs)) {
-
-      Set<String> userSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.USER, cluster, desiredConfigs);
-      String userList = gson.toJson(userSet);
-      clusterLevelParams.put(USER_LIST, userList);
-
-      //Create a user_group mapping and send it as part of the hostLevelParams
-      Map<String, Set<String>> userGroupsMap = configHelper.createUserGroupsMap(
-          stackId, cluster, desiredConfigs);
-      String userGroups = gson.toJson(userGroupsMap);
-      clusterLevelParams.put(USER_GROUPS, userGroups);
-
-      Set<String> groupSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.GROUP, cluster, desiredConfigs);
-      String groupList = gson.toJson(groupSet);
-      clusterLevelParams.put(GROUP_LIST, groupList);
-    }
-    Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId,
-        PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
-    String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
-    clusterLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
-    clusterLevelParams.put(HOOKS_FOLDER, configs.getProperty(Configuration.HOOKS_FOLDER));
-
-    return clusterLevelParams;
-  }
-
-  public TreeMap<String, String> getMetadataClusterLevelConfigsParams(Cluster cluster, StackId stackId) throws AmbariException {
-    TreeMap<String, String> clusterLevelParams = new TreeMap<>();
-
-    Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
-    if (MapUtils.isNotEmpty(desiredConfigs)) {
-
-      Set<String> userSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.USER, cluster, desiredConfigs);
-      String userList = gson.toJson(userSet);
-      clusterLevelParams.put(USER_LIST, userList);
-
-      //Create a user_group mapping and send it as part of the hostLevelParams
-      Map<String, Set<String>> userGroupsMap = configHelper.createUserGroupsMap(
-          stackId, cluster, desiredConfigs);
-      String userGroups = gson.toJson(userGroupsMap);
-      clusterLevelParams.put(USER_GROUPS, userGroups);
-
-      Set<String> groupSet = configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.GROUP, cluster, desiredConfigs);
-      String groupList = gson.toJson(groupSet);
-      clusterLevelParams.put(GROUP_LIST, groupList);
-    }
-    Set<String> notManagedHdfsPathSet = configHelper.getPropertyValuesWithPropertyType(stackId,
-        PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs);
-    String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
-    clusterLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
-
-    return clusterLevelParams;
-  }
-
-  public TreeMap<String, MetadataServiceInfo> getMetadataServiceLevelParams(Cluster cluster) throws AmbariException {
-    TreeMap<String, MetadataServiceInfo> serviceLevelParams = new TreeMap<>();
-    for (Map.Entry<String, Service> serviceEntry : cluster.getServices().entrySet()) {
-      Service service = serviceEntry.getValue();
-      serviceLevelParams.putAll(getMetadataServiceLevelParams(service));
-    }
-    return serviceLevelParams;
-  }
-
-  public TreeMap<String, MetadataServiceInfo> getMetadataServiceLevelParams(Service service) throws AmbariException {
-    TreeMap<String, MetadataServiceInfo> serviceLevelParams = new TreeMap<>();
-
-    StackId serviceStackId = service.getStackId();
-
-    ServiceInfo serviceInfo = ambariMetaInfo.getService(serviceStackId.getStackName(),
-        serviceStackId.getStackVersion(), service.getName());
-    Long statusCommandTimeout = null;
-    if (serviceInfo.getCommandScript() != null) {
-      statusCommandTimeout = new Long(
-          ambariCustomCommandExecutionHelper.getStatusCommandTimeout(serviceInfo));
-    }
-
-    String servicePackageFolder = serviceInfo.getServicePackageFolder();
-
-    serviceLevelParams.put(serviceInfo.getName(), new MetadataServiceInfo(serviceInfo.getVersion(),
-        serviceInfo.isCredentialStoreEnabled(), statusCommandTimeout, servicePackageFolder));
-
-    return serviceLevelParams;
-  }
-
-  public TreeMap<String, String> getMetadataAmbariLevelParams() throws AmbariException {
-    TreeMap<String, String> ambariLevelParams = new TreeMap<>();
-    ambariLevelParams.put(JDK_LOCATION, getJdkResourceUrl());
-    ambariLevelParams.put(JAVA_HOME, getJavaHome());
-    ambariLevelParams.put(JAVA_VERSION, String.valueOf(configs.getJavaVersion()));
-    ambariLevelParams.put(JDK_NAME, getJDKName());
-    ambariLevelParams.put(JCE_NAME, getJCEName());
-    ambariLevelParams.put(DB_NAME, getServerDB());
-    ambariLevelParams.put(MYSQL_JDBC_URL, getMysqljdbcUrl());
-    ambariLevelParams.put(ORACLE_JDBC_URL, getOjdbcUrl());
-    ambariLevelParams.put(DB_DRIVER_FILENAME, configs.getMySQLJarName());
-    ambariLevelParams.put(HOST_SYS_PREPPED, configs.areHostsSysPrepped());
-    ambariLevelParams.put(AGENT_STACK_RETRY_ON_UNAVAILABILITY, configs.isAgentStackRetryOnInstallEnabled());
-    ambariLevelParams.put(AGENT_STACK_RETRY_COUNT, configs.getAgentStackRetryOnInstallCount());
-
-    boolean serverUseSsl = configs.getApiSSLAuthentication();
-    int port = serverUseSsl ? configs.getClientSSLApiPort() : configs.getClientApiPort();
-    ambariLevelParams.put(AMBARI_SERVER_HOST, StageUtils.getHostName());
-    ambariLevelParams.put(AMBARI_SERVER_PORT, Integer.toString(port));
-    ambariLevelParams.put(AMBARI_SERVER_USE_SSL, Boolean.toString(serverUseSsl));
-
-    for (Map.Entry<String, String> dbConnectorName : configs.getDatabaseConnectorNames().entrySet()) {
-      ambariLevelParams.put(dbConnectorName.getKey(), dbConnectorName.getValue());
-    }
-    for (Map.Entry<String, String> previousDBConnectorName : configs.getPreviousDatabaseConnectorNames().entrySet()) {
-      ambariLevelParams.put(previousDBConnectorName.getKey(), previousDBConnectorName.getValue());
-    }
-    ambariLevelParams.put(GPL_LICENSE_ACCEPTED, configs.getGplLicenseAccepted().toString());
-
-    return ambariLevelParams;
-  }
-
   @Override
   //TODO : Revisit, trunk merge 03/20
   public HostRepositories retrieveHostRepositories(Cluster cluster, Host host) throws AmbariException {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metadata/ClusterMetadataGenerator.java b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ClusterMetadataGenerator.java
new file mode 100644
index 0000000..4f2ba4e
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ClusterMetadataGenerator.java
@@ -0,0 +1,286 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.metadata;
+
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_COUNT;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_ON_UNAVAILABILITY;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_SERVER_HOST;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_SERVER_PORT;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_SERVER_USE_SSL;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_DRIVER_FILENAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GPL_LICENSE_ACCEPTED;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_PREPPED;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_HOME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_VERSION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_GROUPS;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_LIST;
+
+import java.net.UnknownHostException;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import javax.inject.Inject;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.agent.stomp.dto.MetadataCluster;
+import org.apache.ambari.server.agent.stomp.dto.MetadataServiceInfo;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.AmbariConfig;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.events.MetadataUpdateEvent;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.CommandScriptDefinition;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.PropertyInfo.PropertyType;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.utils.StageUtils;
+
+import com.google.gson.Gson;
+
+public class ClusterMetadataGenerator {
+
+  private final Configuration configs;
+  private final ConfigHelper configHelper;
+  private final AmbariMetaInfo ambariMetaInfo;
+  private final Gson gson;
+  private final AmbariConfig ambariConfig;
+
+  @Inject
+  public ClusterMetadataGenerator(AmbariMetaInfo metaInfo, Configuration configs, ConfigHelper configHelper, Gson gson) throws UnknownHostException {
+    this.ambariMetaInfo = metaInfo;
+    this.configs = configs;
+    this.configHelper = configHelper;
+    this.gson = gson;
+
+    ambariConfig = new AmbariConfig(configs);
+  }
+
+  public AmbariConfig getAmbariConfig() {
+    return ambariConfig;
+  }
+
+  public SortedMap<String, String> getMetadataStackLevelParams(Cluster cluster, StackId stackId) throws AmbariException {
+    SortedMap<String, String> stackLevelParams = new TreeMap<>(ambariMetaInfo.getStackSettingsNameValueMap(stackId));
+
+    // STACK_NAME is part of stack settings, but STACK_VERSION is not
+    stackLevelParams.put(STACK_VERSION, stackId.getStackVersion());
+
+    Map<String, DesiredConfig> clusterDesiredConfigs = cluster.getDesiredConfigs();
+    Set<PropertyInfo> stackProperties = ambariMetaInfo.getStackProperties(stackId.getStackName(), stackId.getStackVersion());
+    Map<String, ServiceInfo> servicesMap = ambariMetaInfo.getServices(stackId.getStackName(), stackId.getStackVersion());
+    Set<PropertyInfo> clusterProperties = ambariMetaInfo.getClusterProperties();
+
+    Map<PropertyInfo, String> users = configHelper.getPropertiesWithPropertyType(PropertyType.USER, cluster, clusterDesiredConfigs, servicesMap, stackProperties, clusterProperties);
+    Set<String> userSet = new TreeSet<>(users.values());
+    String userList = gson.toJson(userSet);
+    stackLevelParams.put(USER_LIST, userList);
+
+    Map<PropertyInfo, String> groups = configHelper.getPropertiesWithPropertyType(PropertyType.GROUP, cluster, clusterDesiredConfigs, servicesMap, stackProperties, clusterProperties);
+    Set<String> groupSet = new TreeSet<>(groups.values());
+    String groupList = gson.toJson(groupSet);
+    stackLevelParams.put(GROUP_LIST, groupList);
+
+    Map<String, Set<String>> userGroupsMap = configHelper.createUserGroupsMap(users, groups);
+    String userGroups = gson.toJson(userGroupsMap);
+    stackLevelParams.put(USER_GROUPS, userGroups);
+
+    Map<PropertyInfo, String> notManagedHdfsPathMap = configHelper.getPropertiesWithPropertyType(PropertyType.NOT_MANAGED_HDFS_PATH, cluster, clusterDesiredConfigs, servicesMap, stackProperties, clusterProperties);
+    Set<String> notManagedHdfsPathSet = configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST);
+    String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
+    stackLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST, notManagedHdfsPathList);
+
+    return stackLevelParams;
+  }
+
+  /**
+   * Collects metadata info about clusters for agent.
+   */
+  public MetadataUpdateEvent getClustersMetadata(Clusters clusters) throws AmbariException {
+    SortedMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
+
+    for (Cluster cl : clusters.getClusters().values()) {
+      SecurityType securityType = cl.getSecurityType();
+
+      MetadataCluster metadataCluster = new MetadataCluster(securityType,
+        getMetadataServiceLevelParams(cl),
+        getMetadataClusterLevelParams(cl));
+      metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
+    }
+
+    return new MetadataUpdateEvent(metadataClusters, getMetadataAmbariLevelParams());
+  }
+
+  public MetadataUpdateEvent getClusterMetadata(Cluster cl) throws AmbariException {
+    SortedMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
+    StackId stackId = cl.getDesiredStackVersion();
+
+    SecurityType securityType = cl.getSecurityType();
+
+    MetadataCluster metadataCluster = new MetadataCluster(securityType,
+      getMetadataServiceLevelParams(cl),
+      getMetadataClusterLevelParams(cl));
+    metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
+
+    return new MetadataUpdateEvent(metadataClusters, null);
+  }
+
+  public MetadataUpdateEvent getClusterMetadataOnConfigsUpdate(Cluster cl) {
+    SortedMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
+
+    MetadataCluster metadataCluster = new MetadataCluster(null,
+      new TreeMap<>(),
+      getMetadataClusterLevelParams(cl));
+    metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
+
+    return new MetadataUpdateEvent(metadataClusters, null);
+  }
+
+  public MetadataUpdateEvent getClusterMetadataOnRepoUpdate(Cluster cl) throws AmbariException {
+    SortedMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
+
+    MetadataCluster metadataCluster = new MetadataCluster(null,
+      getMetadataServiceLevelParams(cl),
+      new TreeMap<>());
+    metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
+
+    return new MetadataUpdateEvent(metadataClusters, null);
+  }
+
+  public MetadataUpdateEvent getClusterMetadataOnServiceInstall(Cluster cl, String serviceName) throws AmbariException {
+    SortedMap<String, MetadataCluster> metadataClusters = new TreeMap<>();
+
+    MetadataCluster metadataCluster = new MetadataCluster(null,
+      getMetadataServiceLevelParams(cl.getService(serviceName)),
+      new TreeMap<>());
+    metadataClusters.put(Long.toString(cl.getClusterId()), metadataCluster);
+
+    return new MetadataUpdateEvent(metadataClusters, null);
+  }
+
+  private SortedMap<String, String> getMetadataClusterLevelParams(Cluster cluster) {
+    TreeMap<String, String> clusterLevelParams = new TreeMap<>();
+    clusterLevelParams.put(HOOKS_FOLDER, configs.getProperty(Configuration.HOOKS_FOLDER));
+    return clusterLevelParams;
+  }
+
+  public SortedMap<String, MetadataServiceInfo> getMetadataServiceLevelParams(Cluster cluster) throws AmbariException {
+    SortedMap<String, MetadataServiceInfo> serviceLevelParams = new TreeMap<>();
+    for (Map.Entry<String, Service> serviceEntry : cluster.getServices().entrySet()) {
+      Service service = serviceEntry.getValue();
+      serviceLevelParams.putAll(getMetadataServiceLevelParams(service));
+    }
+    return serviceLevelParams;
+  }
+
+  public SortedMap<String, MetadataServiceInfo> getMetadataServiceLevelParams(Service service) throws AmbariException {
+    SortedMap<String, MetadataServiceInfo> serviceLevelParams = new TreeMap<>();
+
+    StackId serviceStackId = service.getStackId();
+
+    ServiceInfo serviceInfo = ambariMetaInfo.getService(serviceStackId.getStackName(),
+      serviceStackId.getStackVersion(), service.getName());
+    Long statusCommandTimeout = null;
+    if (serviceInfo.getCommandScript() != null) {
+      statusCommandTimeout = new Long(getStatusCommandTimeout(serviceInfo));
+    }
+
+    String servicePackageFolder = serviceInfo.getServicePackageFolder();
+
+    serviceLevelParams.put(serviceInfo.getName(), new MetadataServiceInfo(serviceInfo.getVersion(),
+      serviceInfo.isCredentialStoreEnabled(), statusCommandTimeout, servicePackageFolder));
+
+    return serviceLevelParams;
+  }
+
+  public TreeMap<String, String> getMetadataAmbariLevelParams() throws AmbariException {
+    TreeMap<String, String> ambariLevelParams = new TreeMap<>();
+    ambariLevelParams.put(JDK_LOCATION, ambariConfig.getJdkResourceUrl());
+    ambariLevelParams.put(JAVA_HOME, ambariConfig.getJavaHome());
+    ambariLevelParams.put(JAVA_VERSION, String.valueOf(configs.getJavaVersion()));
+    ambariLevelParams.put(JDK_NAME, ambariConfig.getJDKName());
+    ambariLevelParams.put(JCE_NAME, ambariConfig.getJCEName());
+    ambariLevelParams.put(DB_NAME, ambariConfig.getServerDB());
+    ambariLevelParams.put(MYSQL_JDBC_URL, ambariConfig.getMysqljdbcUrl());
+    ambariLevelParams.put(ORACLE_JDBC_URL, ambariConfig.getOjdbcUrl());
+    ambariLevelParams.put(DB_DRIVER_FILENAME, configs.getMySQLJarName());
+    ambariLevelParams.put(HOST_SYS_PREPPED, configs.areHostsSysPrepped());
+    ambariLevelParams.put(AGENT_STACK_RETRY_ON_UNAVAILABILITY, configs.isAgentStackRetryOnInstallEnabled());
+    ambariLevelParams.put(AGENT_STACK_RETRY_COUNT, configs.getAgentStackRetryOnInstallCount());
+
+    boolean serverUseSsl = configs.getApiSSLAuthentication();
+    int port = serverUseSsl ? configs.getClientSSLApiPort() : configs.getClientApiPort();
+    ambariLevelParams.put(AMBARI_SERVER_HOST, StageUtils.getHostName());
+    ambariLevelParams.put(AMBARI_SERVER_PORT, Integer.toString(port));
+    ambariLevelParams.put(AMBARI_SERVER_USE_SSL, Boolean.toString(serverUseSsl));
+
+    for (Map.Entry<String, String> dbConnectorName : configs.getDatabaseConnectorNames().entrySet()) {
+      ambariLevelParams.put(dbConnectorName.getKey(), dbConnectorName.getValue());
+    }
+    for (Map.Entry<String, String> previousDBConnectorName : configs.getPreviousDatabaseConnectorNames().entrySet()) {
+      ambariLevelParams.put(previousDBConnectorName.getKey(), previousDBConnectorName.getValue());
+    }
+    ambariLevelParams.put(GPL_LICENSE_ACCEPTED, configs.getGplLicenseAccepted().toString());
+
+    return ambariLevelParams;
+  }
+
+  public String getStatusCommandTimeout(ServiceInfo serviceInfo) throws AmbariException {
+    String commandTimeout = configs.getDefaultAgentTaskTimeout(false);
+
+    if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
+      // Service check command is not custom command
+      CommandScriptDefinition script = serviceInfo.getCommandScript();
+      if (script != null) {
+        if (script.getTimeout() > 0) {
+          commandTimeout = String.valueOf(script.getTimeout());
+        }
+      } else {
+        String message = String.format("Service %s has no command script " +
+          "defined. It is not possible to run service check" +
+          " for this service", serviceInfo.getName());
+        throw new AmbariException(message);
+      }
+    }
+
+    // Try to apply overridden service check timeout value if available
+    Long overriddenTimeout = configs.getAgentServiceCheckTaskTimeout();
+    if (!overriddenTimeout.equals(Configuration.AGENT_SERVICE_CHECK_TASK_TIMEOUT.getDefaultValue())) {
+      commandTimeout = String.valueOf(overriddenTimeout);
+    }
+    return commandTimeout;
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
index 77a1e20..4070eac 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
@@ -35,8 +35,8 @@ import org.apache.ambari.server.agent.stomp.MetadataHolder;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
 import org.apache.ambari.server.controller.ConfigurationRequest;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.state.Cluster;
@@ -127,7 +127,7 @@ public class ConfigureAction extends AbstractUpgradeServerAction {
   private Gson m_gson;
 
   @Inject
-  private Provider<AmbariManagementControllerImpl> m_ambariManagementController;
+  private Provider<ClusterMetadataGenerator> metadataGenerator;
 
   @Inject
   private Provider<MetadataHolder> m_metadataHolder;
@@ -542,7 +542,7 @@ public class ConfigureAction extends AbstractUpgradeServerAction {
       config.setProperties(newValues);
       config.save();
 
-      m_metadataHolder.get().updateData(m_ambariManagementController.get().getClusterMetadataOnConfigsUpdate(cluster));
+      m_metadataHolder.get().updateData(metadataGenerator.get().getClusterMetadataOnConfigsUpdate(cluster));
       m_agentConfigsHolder.get().updateData(cluster.getClusterId(), null);
 
       return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outputBuffer.toString(), "");
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 3cd15ae..b69243d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -40,11 +40,11 @@ import org.apache.ambari.server.agent.stomp.dto.ClusterConfigs;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
 import org.apache.ambari.server.events.AgentConfigsUpdateEvent;
 import org.apache.ambari.server.events.HostComponentUpdate;
 import org.apache.ambari.server.events.HostComponentsUpdateEvent;
 import org.apache.ambari.server.events.publishers.StateUpdateEventPublisher;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
@@ -137,7 +137,7 @@ public class ConfigHelper {
   private Provider<AgentConfigsHolder> m_agentConfigsHolder;
 
   @Inject
-  private Provider<AmbariManagementControllerImpl> m_ambariManagementController;
+  private Provider<ClusterMetadataGenerator> metadataGenerator;
 
   @Inject
   private StateUpdateEventPublisher stateUpdateEventPublisher;
@@ -709,14 +709,18 @@ public class ConfigHelper {
     Cluster cluster, Map<String, DesiredConfig> desiredConfigs,
     Map<String, ServiceInfo> servicesMap, Set<PropertyInfo> stackProperties, Set<PropertyInfo> clusterProperties) throws AmbariException {
 
-    Map<String, Set<String>> userGroupsMap = new HashMap<>();
     Map<PropertyInfo, String> userProperties = getPropertiesWithPropertyType(
       PropertyType.USER, cluster, desiredConfigs, servicesMap, stackProperties, clusterProperties);
     Map<PropertyInfo, String> groupProperties = getPropertiesWithPropertyType(
       PropertyType.GROUP, cluster, desiredConfigs, servicesMap, stackProperties, clusterProperties);
 
+    return createUserGroupsMap(userProperties, groupProperties);
+  }
+
+  public Map<String, Set<String>> createUserGroupsMap(Map<PropertyInfo, String> userProperties, Map<PropertyInfo, String> groupProperties) {
+    Map<String, Set<String>> userGroupsMap = new HashMap<>();
     if(userProperties != null && groupProperties != null) {
-      for(Map.Entry<PropertyInfo, String> userProperty : userProperties.entrySet()) {
+      for(Entry<PropertyInfo, String> userProperty : userProperties.entrySet()) {
         PropertyInfo userPropertyInfo = userProperty.getKey();
          String userPropertyValue = userProperty.getValue();
         if(userPropertyInfo.getPropertyValueAttributes() != null
@@ -725,7 +729,7 @@ public class ConfigHelper {
           Collection<UserGroupInfo> userGroupEntries = userPropertyInfo.getPropertyValueAttributes().getUserGroupEntries();
           for (UserGroupInfo userGroupInfo : userGroupEntries) {
             boolean found = false;
-            for(Map.Entry<PropertyInfo, String> groupProperty : groupProperties.entrySet()) {
+            for(Entry<PropertyInfo, String> groupProperty : groupProperties.entrySet()) {
               PropertyInfo groupPropertyInfo = groupProperty.getKey();
               String groupPropertyValue = groupProperty.getValue();
               if(StringUtils.equals(userGroupInfo.getType(),
@@ -1196,7 +1200,7 @@ public class ConfigHelper {
       || !Maps.difference(oldConfigProperties, properties).areEqual()) {
       if (createConfigType(cluster, stackId, controller, configType, properties,
         propertiesAttributes, authenticatedUserName, serviceVersionNote)) {
-        m_metadataHolder.get().updateData(m_ambariManagementController.get().getClusterMetadataOnConfigsUpdate(cluster));
+        m_metadataHolder.get().updateData(metadataGenerator.get().getClusterMetadataOnConfigsUpdate(cluster));
         m_agentConfigsHolder.get().updateData(cluster.getClusterId(), null);
       }
     }
@@ -1208,7 +1212,7 @@ public class ConfigHelper {
 
     if (createConfigType(cluster, stackId, controller, configType, properties,
       new HashMap<>(), authenticatedUserName, serviceVersionNote)) {
-      m_metadataHolder.get().updateData(m_ambariManagementController.get().getClusterMetadataOnConfigsUpdate(cluster));
+      m_metadataHolder.get().updateData(metadataGenerator.get().getClusterMetadataOnConfigsUpdate(cluster));
       m_agentConfigsHolder.get().updateData(cluster.getClusterId(), null);
     }
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index edff942..6f82357 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -55,6 +55,7 @@ import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.events.ClusterComponentsRepoChangedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
@@ -205,6 +206,9 @@ public class UpgradeHelper {
   private Provider<AmbariManagementControllerImpl> m_controllerProvider;
 
   @Inject
+  private Provider<ClusterMetadataGenerator> metadataGenerator;
+
+  @Inject
   private Provider<MetadataHolder> m_metadataHolder;
 
   @Inject
@@ -1166,7 +1170,7 @@ public class UpgradeHelper {
       }
     }
     if (configsChanged) {
-      m_metadataHolder.get().updateData(m_controllerProvider.get().getClusterMetadataOnConfigsUpdate(cluster));
+      m_metadataHolder.get().updateData(metadataGenerator.get().getClusterMetadataOnConfigsUpdate(cluster));
       m_agentConfigsHolder.get().updateData(cluster.getClusterId(), null);
     }
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index bf995be..0a591b2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -41,12 +41,12 @@ import org.apache.ambari.server.agent.DiskInfo;
 import org.apache.ambari.server.agent.stomp.MetadataHolder;
 import org.apache.ambari.server.agent.stomp.TopologyHolder;
 import org.apache.ambari.server.agent.stomp.dto.TopologyCluster;
-import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
 import org.apache.ambari.server.events.HostRegisteredEvent;
 import org.apache.ambari.server.events.HostsAddedEvent;
 import org.apache.ambari.server.events.HostsRemovedEvent;
 import org.apache.ambari.server.events.TopologyUpdateEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.HostConfigMappingDAO;
 import org.apache.ambari.server.orm.dao.HostDAO;
@@ -153,7 +153,7 @@ public class ClustersImpl implements Clusters {
   private Provider<MetadataHolder> m_metadataHolder;
 
   @Inject
-  private Provider<AmbariManagementControllerImpl> m_ambariManagementController;
+  private Provider<ClusterMetadataGenerator> metadataGenerator;
 
   @Inject
   public ClustersImpl(ClusterDAO clusterDAO, ClusterFactory clusterFactory, HostDAO hostDAO,
@@ -367,7 +367,7 @@ public class ClustersImpl implements Clusters {
     TopologyUpdateEvent topologyUpdateEvent = new TopologyUpdateEvent(addedClusters,
         TopologyUpdateEvent.EventType.UPDATE);
     m_topologyHolder.get().updateData(topologyUpdateEvent);
-    m_metadataHolder.get().updateData(m_ambariManagementController.get().getClusterMetadata(cluster));
+    m_metadataHolder.get().updateData(metadataGenerator.get().getClusterMetadata(cluster));
   }
 
   @Override
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index 198087d..f2fe789 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -52,6 +52,7 @@ import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.configuration.Configuration.DatabaseType;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.dao.ArtifactDAO;
@@ -642,7 +643,8 @@ public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
             }
             MetadataHolder metadataHolder = injector.getInstance(MetadataHolder.class);
             AgentConfigsHolder agentConfigsHolder = injector.getInstance(AgentConfigsHolder.class);
-            metadataHolder.updateData(controller.getClusterMetadataOnConfigsUpdate(cluster));
+            ClusterMetadataGenerator metadataGenerator = injector.getInstance(ClusterMetadataGenerator.class);
+            metadataHolder.updateData(metadataGenerator.getClusterMetadataOnConfigsUpdate(cluster));
             agentConfigsHolder.updateData(cluster.getClusterId(), null);
           }
         } else {
diff --git a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
index ddd8c45..546a230 100644
--- a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
@@ -30,16 +30,9 @@ from resource_management.libraries.functions import format
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option_value
 from resource_management.libraries.functions.is_empty import is_empty
 from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.expect import expect
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.stack_features import get_stack_feature_version
 from resource_management.libraries.functions.get_architecture import get_architecture
 from resource_management.libraries.functions.cluster_settings import get_cluster_setting_value
-from ambari_commons.constants import AMBARI_SUDO_BINARY, HADOOP_CLIENTS_MODULE_NAME, HADOOP_CLIENT_COMPONENT_TYPE
-from resource_management.libraries.functions.mpack_manager_helper import get_component_conf_path, get_component_home_path
-from resource_management.libraries.execution_command import execution_command
-from resource_management.libraries.execution_command import module_configs
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 
 config = Script.get_config()
 execution_command = Script.get_execution_command()
@@ -172,7 +165,6 @@ mapred_log_dir_prefix = module_configs.get_property_value(module_name, 'mapred-e
 hadoop_env_sh_template = module_configs.get_property_value(module_name, 'hadoop-env', 'content')
 
 #users and groups
-hbase_user = module_configs.get_property_value(module_name, 'hbase-env', 'hbase_user')
 smoke_user =  get_cluster_setting_value('smokeuser')
 gmetad_user = module_configs.get_property_value(module_name, 'ganglia-env', 'gmetad_user')
 gmond_user = module_configs.get_property_value(module_name, 'ganglia-env', 'gmond_user')
@@ -187,25 +179,21 @@ user_group = get_cluster_setting_value('user_group')
 
 ganglia_server_hosts = execution_command.get_component_hosts('ganglia_server')
 namenode_host = execution_command.get_component_hosts('namenode')
-hbase_master_hosts = execution_command.get_component_hosts('hbase_master')
 oozie_servers = execution_command.get_component_hosts('oozie_server')
 falcon_server_hosts = execution_command.get_component_hosts('falcon_server')
 ranger_admin_hosts = execution_command.get_component_hosts('ranger_admin')
 zeppelin_master_hosts = execution_command.get_component_hosts('zeppelin_master')
 
 # get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
 
 
 has_namenode = not len(namenode_host) == 0
 has_ganglia_server = not len(ganglia_server_hosts) == 0
 has_tez = bool(module_configs.get_all_properties(module_name, 'tez-site'))
-has_hbase_masters = not len(hbase_master_hosts) == 0
 has_oozie_server = not len(oozie_servers) == 0
 has_falcon_server_hosts = not len(falcon_server_hosts) == 0
 has_ranger_admin = not len(ranger_admin_hosts) == 0
 has_zeppelin_master = not len(zeppelin_master_hosts) == 0
-stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
 
 # HDFS High Availability properties
 dfs_ha_enabled = False
@@ -219,14 +207,6 @@ if dfs_ha_namenode_ids:
   if dfs_ha_namenode_ids_array_len > 1:
     dfs_ha_enabled = True
 
-# if has_namenode or dfs_type == 'HCFS':
-#     hadoop_conf_dir = get_component_conf_path(mpack_name=mpack_name, instance_name=mpack_instance_name,
-#                                               module_name=HADOOP_CLIENTS_MODULE_NAME,
-#                                               components_instance_type=HADOOP_CLIENT_COMPONENT_TYPE)
-#     hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
 proxyuser_group = module_configs.get_property_value(module_name, 'hadoop-env', 'proxyuser_group', 'users')
 ranger_group = module_configs.get_property_value(module_name, 'ranger-env', 'ranger_group')
 dfs_cluster_administrators_group = module_configs.get_property_value(module_name, 'hdfs-site', 'dfs.cluster.administrators')
@@ -236,8 +216,6 @@ ignore_groupsusers_create = get_cluster_setting_value('ignore_groupsusers_create
 fetch_nonlocal_groups = get_cluster_setting_value('fetch_nonlocal_groups')
 
 smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
 #repo params
 repo_info = execution_command.get_repo_info()
 service_repo_info = execution_command.get_service_repo_info()
@@ -260,7 +238,3 @@ host_sys_prepped = execution_command.is_host_system_prepared()
 
 tez_am_view_acls = module_configs.get_property_value(module_name, 'tez-site', 'tez.am.view-acls')
 override_uid = get_cluster_setting_value('override_uid')
-
-# if NN HA on secure clutser, access Zookeper securely
-# if stack_supports_zk_security and dfs_ha_enabled and security_enabled:
-#     hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username=zookeeper -Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf -Dzookeeper.sasl.clientconfig=Client")
diff --git a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
index 373632c..d32dd8f 100644
--- a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
@@ -19,14 +19,10 @@ limitations under the License.
 
 import os
 import re
-import getpass
 import tempfile
 from copy import copy
-from resource_management.libraries.functions.version import compare_versions
 from resource_management import *
 from resource_management.core import shell
-from resource_management.libraries.execution_command import execution_command
-from resource_management.libraries.execution_command import module_configs
 
 def setup_users():
   """
@@ -62,50 +58,6 @@ def setup_users():
     pass
 
 
-  if params.has_hbase_masters:
-    Directory (params.hbase_tmp_dir,
-               owner = params.hbase_user,
-               mode=0775,
-               create_parents = True,
-               cd_access="a",
-    )
-
-    if params.override_uid == "true":
-      set_uid(params.hbase_user, params.hbase_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for hbase user as host is sys prepped')
-
-  if should_create_users_and_groups:
-    if params.has_namenode:
-      create_dfs_cluster_admins()
-    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
-      create_tez_am_view_acls()
-  else:
-    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
-
-def create_dfs_cluster_admins():
-  """
-  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
-
-  User(params.hdfs_user,
-    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
-    fetch_nonlocal_groups = params.fetch_nonlocal_groups
-  )
-
-def create_tez_am_view_acls():
-
-  """
-  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  if not params.tez_am_view_acls.startswith("*"):
-    create_users_and_groups(params.tez_am_view_acls)
-
 def create_users_and_groups(user_and_groups):
 
   import params
@@ -178,35 +130,6 @@ def get_uid(user, return_existing=False):
       # do not return UID for existing user, used in User resource call to let OS to choose UID for us
       return None
 
-def setup_hadoop_env():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-
-    # create /etc/hadoop
-    Directory(params.hadoop_dir, mode=0755)
-
-    #Write out the conf directory
-    #TODO: Change with instance manager
-    Directory(params.hadoop_conf_dir, mode=0755)
-    # write out hadoop-env.sh, but only if the directory exists
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
-        group=params.user_group,
-        content=InlineTemplate(params.hadoop_env_sh_template))
-
-    # Create tmp dir for java.io.tmpdir
-    # Handle a situation when /tmp is set to noexec
-    Directory(params.hadoop_java_io_tmpdir,
-              owner=params.hdfs_user,
-              group=params.user_group,
-              mode=01777
-    )
 
 def setup_java():
   """
diff --git a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
index c68fb60..20c0459 100644
--- a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
@@ -18,14 +18,10 @@ limitations under the License.
 """
 
 from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from resource_management.core.system import System
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import default, format
-from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions import format
 from resource_management.libraries.functions.cluster_settings import get_cluster_setting_value
-from resource_management.libraries.execution_command import execution_command
-from resource_management.libraries.execution_command import module_configs
 
 config = Script.get_config()
 execution_command = Script.get_execution_command()
@@ -40,7 +36,6 @@ agent_stack_retry_count = execution_command.get_agent_stack_retry_count()
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 #users and groups
-hbase_user = module_configs.get_property_value(module_name, 'hbase-env', 'hbase_user')
 smoke_user = get_cluster_setting_value('smokeuser')
 gmetad_user = module_configs.get_property_value(module_name, 'ganglia-env', 'gmetad_user')
 gmond_user = module_configs.get_property_value(module_name, 'ganglia-env', 'gmond_user')
@@ -63,7 +58,6 @@ slave_hosts = execution_command.get_component_hosts('datanode')
 oozie_servers = execution_command.get_component_hosts('oozie_server')
 hcat_server_hosts = execution_command.get_component_hosts('webhcat_server')
 hive_server_host =  execution_command.get_component_hosts('hive_server')
-hbase_master_hosts = execution_command.get_component_hosts('hbase_master')
 hs_host = execution_command.get_component_hosts('historyserver')
 namenode_host = execution_command.get_component_hosts('namenode')
 zk_hosts = execution_command.get_component_hosts('zookeeper_server')
@@ -79,7 +73,6 @@ has_slaves = not len(slave_hosts) == 0
 has_oozie_server = not len(oozie_servers)  == 0
 has_hcat_server_host = not len(hcat_server_hosts) == 0
 has_hive_server_host = not len(hive_server_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
 has_zk_host = not len(zk_hosts) == 0
 has_ganglia_server = not len(ganglia_server_hosts) == 0
 has_storm_server = not len(storm_server_hosts) == 0
@@ -89,13 +82,10 @@ has_tez = bool(module_configs.get_all_properties(module_name, 'tez-site'))
 is_namenode_master = hostname in namenode_host
 is_rmnode_master = hostname in rm_host
 is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
 is_slave = hostname in slave_hosts
 if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
 
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
 #security params
 security_enabled = get_cluster_setting_value('security_enabled')
 
@@ -110,8 +100,6 @@ ignore_groupsusers_create = get_cluster_setting_value('ignore_groupsusers_create
 host_sys_prepped = execution_command.is_host_system_prepared()
 
 smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
 #repo params
 repo_info = execution_command.get_repo_info()
 service_repo_info = execution_command.get_service_repo_info()
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
index ab9646b..e108cf8 100644
--- a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
@@ -23,19 +23,16 @@ from resource_management.core.resources import Directory
 from resource_management.core.resources import Execute
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import format
-from resource_management.libraries.execution_command import execution_command
-from resource_management.libraries.execution_command import module_configs
 
 
 DEFAULT_HADOOP_HDFS_EXTENSION_DIR = "/hdp/ext/{0}/hadoop"
 DEFAULT_HADOOP_HIVE_EXTENSION_DIR = "/hdp/ext/{0}/hive"
-DEFAULT_HADOOP_HBASE_EXTENSION_DIR = "/hdp/ext/{0}/hbase"
 
 def setup_extensions():
   """
   The goal of this method is to distribute extensions (for example jar files) from
   HDFS (/hdp/ext/{major_stack_version}/{service_name}) to all nodes which contain related
-  components of service (YARN, HIVE or HBASE). Extensions should be added to HDFS by
+  components of service (YARN, HIVE). Extensions should be added to HDFS by
   user manually.
   """
 
@@ -61,32 +58,6 @@ def setup_extensions():
 
   setup_extensions_hive()
 
-  hbase_custom_extensions_services = []
-  hbase_custom_extensions_services.append("HBASE")
-  if params.current_service in hbase_custom_extensions_services:
-    setup_hbase_extensions()
-
-
-def setup_hbase_extensions():
-  import params
-
-  # HBase Custom extensions
-  hbase_custom_extensions_enabled = params.module_configs.get_property_value(params.module_name, 'hbase-site', 'hbase.custom-extensions.enabled', False)
-  hbase_custom_extensions_owner = params.module_configs.get_property_value(params.module_name, 'hbase-site', 'hbase.custom-extensions.owner', params.hdfs_user)
-  hbase_custom_extensions_hdfs_dir = get_config_formatted_value(params.module_configs.get_property_value(params.module_name, 'hbase-site', 'hbase.custom-extensions.root',
-                                                DEFAULT_HADOOP_HBASE_EXTENSION_DIR.format(params.major_stack_version)))
-  hbase_custom_extensions_local_dir = "{0}/current/ext/hbase".format(Script.get_stack_root())
-
-  impacted_components = ['HBASE_MASTER', 'HBASE_REGIONSERVER', 'PHOENIX_QUERY_SERVER'];
-  role = params.execution_command.get_component_type()
-
-  if role in impacted_components:
-    clean_extensions(hbase_custom_extensions_local_dir)
-    if hbase_custom_extensions_enabled:
-      download_extensions(hbase_custom_extensions_owner, params.user_group,
-                          hbase_custom_extensions_hdfs_dir,
-                          hbase_custom_extensions_local_dir)
-
 
 def setup_extensions_hive():
   import params
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
index 94ef4e6..0569615 100644
--- a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
@@ -21,11 +21,9 @@ import os
 
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import default
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option_value
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_stack_version, compare_versions, get_major_version
-from ambari_commons.os_check import OSCheck
+from resource_management.libraries.functions.version import format_stack_version, get_major_version
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
@@ -33,8 +31,6 @@ from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.stack_features import check_stack_feature
 from resource_management.libraries.functions.stack_features import get_stack_feature_version
 from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.execution_command import execution_command
-from resource_management.libraries.execution_command import module_configs
 from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.libraries.functions.cluster_settings import get_cluster_setting_value
 
@@ -79,7 +75,7 @@ hadoop_bin = stack_select.get_hadoop_dir("sbin")
 mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
 hadoop_home = stack_select.get_hadoop_dir("home")
 create_lib_snappy_symlinks = False
-  
+
 current_service = module_name
 
 #security params
@@ -112,7 +108,6 @@ slave_hosts = execution_command.get_component_hosts('datanode')
 oozie_servers = execution_command.get_component_hosts('oozie_server')
 hcat_server_hosts = execution_command.get_component_hosts('webhcat_server')
 hive_server_host =  execution_command.get_component_hosts('hive_server')
-hbase_master_hosts = execution_command.get_component_hosts('hbase_master')
 hs_host = execution_command.get_component_hosts('historyserver')
 namenode_host = execution_command.get_component_hosts('namenode')
 zk_hosts = execution_command.get_component_hosts('zookeeper_server')
@@ -131,7 +126,6 @@ has_slaves = not len(slave_hosts) == 0
 has_oozie_server = not len(oozie_servers) == 0
 has_hcat_server_host = not len(hcat_server_hosts) == 0
 has_hive_server_host = not len(hive_server_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
 has_zk_host = not len(zk_hosts) == 0
 has_ganglia_server = not len(ganglia_server_hosts) == 0
 has_metric_collector = not len(ams_collector_hosts) == 0
@@ -139,7 +133,6 @@ has_metric_collector = not len(ams_collector_hosts) == 0
 is_namenode_master = hostname in namenode_host
 is_rmnode_master = hostname in rm_host
 is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
 is_slave = hostname in slave_hosts
 
 if has_ganglia_server:
@@ -186,7 +179,6 @@ if has_namenode or dfs_type == 'HCFS':
 
 hadoop_pid_dir_prefix = module_configs.get_property_value(module_name, 'hadoop-env', 'hadoop_pid_dir_prefix')
 hdfs_log_dir_prefix = module_configs.get_property_value(module_name, 'hadoop-env', 'hdfs_log_dir_prefix')
-hbase_tmp_dir = "/tmp/hbase-hbase"
 #db params
 oracle_driver_symlink_url = format("{ambari_server_resources_url}/oracle-jdbc-driver.jar")
 mysql_driver_symlink_url = format("{ambari_server_resources_url}/mysql-jdbc-driver.jar")
@@ -244,7 +236,7 @@ refresh_topology = execution_command.need_refresh_topology()
 ambari_java_home = execution_command.get_ambari_java_home()
 ambari_jdk_name = execution_command.get_ambari_jdk_name()
 ambari_jce_name = execution_command.get_ambari_jce_name()
-  
+
 ambari_libs_dir = "/var/lib/ambari-agent/lib"
 is_webhdfs_enabled = module_configs.get_property_value(module_name, 'hdfs-site', 'dfs.webhdfs.enabled')
 default_fs = module_configs.get_property_value(module_name, 'core-site', 'fs.defaultFS')
@@ -261,7 +253,7 @@ net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
 net_topology_mapping_data_file_name = 'topology_mappings.data'
 net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
 
-#Added logic to create /tmp and /user directory for HCFS stack.  
+#Added logic to create /tmp and /user directory for HCFS stack.
 has_core_site = bool(module_configs.get_all_properties(module_name, "core-site"))
 hdfs_user_keytab = module_configs.get_property_value(module_name, 'hadoop-env', 'hdfs_user_keytab')
 kinit_path_local = get_kinit_path()
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/AmbariConfigTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/AmbariConfigTest.java
new file mode 100644
index 0000000..4371c0e
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/AmbariConfigTest.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.configuration;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
+
+public class AmbariConfigTest {
+
+  @Test
+  public void http() {
+    assertEquals("http://hostname:8080/jdk_path", AmbariConfig.getAmbariServerURI("/jdk_path", "http", "hostname", 8080));
+  }
+
+  @Test
+  public void https() {
+    assertEquals("https://somesecuredhost:8443/mysql_path", AmbariConfig.getAmbariServerURI("/mysql_path", "https", "somesecuredhost", 8443));
+  }
+
+  @Test
+  public void longerPath() {
+    assertEquals("https://othersecuredhost:8443/oracle/ojdbc/", AmbariConfig.getAmbariServerURI("/oracle/ojdbc/", "https", "othersecuredhost", 8443));
+  }
+
+  @Test
+  public void withQueryString() {
+    assertEquals("http://hostname:8080/jdk_path?query", AmbariConfig.getAmbariServerURI("/jdk_path?query", "http", "hostname", 8080));
+  }
+}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
index 9179d71..d1e0d21 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperTest.java
@@ -17,8 +17,21 @@
  */
 package org.apache.ambari.server.controller;
 
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_DRIVER_FILENAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_PREPPED;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_VERSION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
+import static org.easymock.EasyMock.createMockBuilder;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
+import java.lang.reflect.Field;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -41,6 +54,7 @@ import org.apache.ambari.server.actionmanager.Request;
 import org.apache.ambari.server.actionmanager.Stage;
 import org.apache.ambari.server.agent.CommandRepository;
 import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.configuration.AmbariConfig;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.internal.ComponentResourceProviderTest;
 import org.apache.ambari.server.controller.internal.RequestOperationLevel;
@@ -48,6 +62,7 @@ import org.apache.ambari.server.controller.internal.RequestResourceFilter;
 import org.apache.ambari.server.controller.internal.ServiceResourceProviderTest;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
@@ -88,6 +103,7 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.springframework.security.core.context.SecurityContextHolder;
 
+import com.google.gson.Gson;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 
@@ -136,12 +152,12 @@ public class AmbariCustomCommandExecutionHelperTest {
     ambariManagementController = injector.getInstance(AmbariManagementController.class);
     clusters = injector.getInstance(Clusters.class);
 
-    EasyMock.expect(configHelper.getPropertyValuesWithPropertyType(EasyMock.anyObject(StackId.class),
+    expect(configHelper.getPropertyValuesWithPropertyType(EasyMock.anyObject(StackId.class),
         EasyMock.anyObject(PropertyInfo.PropertyType.class),
         EasyMock.anyObject(Cluster.class),
         EasyMock.anyObject(Map.class))).andReturn(Collections.EMPTY_SET);
 
-    EasyMock.replay(configHelper);
+    replay(configHelper);
 
     StageUtils.setTopologyManager(injector.getInstance(TopologyManager.class));
     StageUtils.setConfiguration(injector.getInstance(Configuration.class));
@@ -152,14 +168,14 @@ public class AmbariCustomCommandExecutionHelperTest {
     EasyMock.verify(configHelper);
     EasyMock.reset(configHelper);
 
-    EasyMock.expect(hostRoleCommand.getTaskId()).andReturn(1L);
-    EasyMock.expect(hostRoleCommand.getStageId()).andReturn(1L);
-    EasyMock.expect(hostRoleCommand.getRoleCommand()).andReturn(RoleCommand.CUSTOM_COMMAND);
-    EasyMock.expect(hostRoleCommand.getRole()).andReturn(Role.AMBARI_SERVER_ACTION);
-    EasyMock.expect(hostRoleCommand.getStatus()).andReturn(HostRoleStatus.PENDING);
+    expect(hostRoleCommand.getTaskId()).andReturn(1L);
+    expect(hostRoleCommand.getStageId()).andReturn(1L);
+    expect(hostRoleCommand.getRoleCommand()).andReturn(RoleCommand.CUSTOM_COMMAND);
+    expect(hostRoleCommand.getRole()).andReturn(Role.AMBARI_SERVER_ACTION);
+    expect(hostRoleCommand.getStatus()).andReturn(HostRoleStatus.PENDING);
 
-    EasyMock.expect(actionManager.getNextRequestId()).andReturn(1L).anyTimes();
-    EasyMock.expect(actionManager.getRequestTasks(1L)).andReturn(Collections.singletonList(hostRoleCommand));
+    expect(actionManager.getNextRequestId()).andReturn(1L).anyTimes();
+    expect(actionManager.getRequestTasks(1L)).andReturn(Collections.singletonList(hostRoleCommand));
 
     StackInfo stackInfo = new StackInfo();
     stackInfo.setName("HDP");
@@ -190,11 +206,11 @@ public class AmbariCustomCommandExecutionHelperTest {
     Map<String, Set<String>> userGroupsMap = new HashMap<>();
     userGroupsMap.put("zookeeperUser", new HashSet<>(Arrays.asList("zookeeperGroup")));
     Cluster cluster = clusters.getCluster("c1");
-    EasyMock.expect(configHelper.getPropertiesWithPropertyType(
+    expect(configHelper.getPropertiesWithPropertyType(
       stackId, PropertyInfo.PropertyType.USER, cluster, desiredConfigMap)).andReturn(userProperties).anyTimes();
-    EasyMock.expect(configHelper.getPropertiesWithPropertyType(
+    expect(configHelper.getPropertiesWithPropertyType(
       stackId, PropertyInfo.PropertyType.GROUP, cluster, desiredConfigMap)).andReturn(groupProperties).anyTimes();
-    EasyMock.expect(configHelper.createUserGroupsMap(stackId, cluster, desiredConfigMap)).andReturn(userGroupsMap).anyTimes();
+    expect(configHelper.createUserGroupsMap(stackId, cluster, desiredConfigMap)).andReturn(userGroupsMap).anyTimes();
 
     actionManager.sendActions(EasyMock.capture(requestCapture), EasyMock.anyObject(ExecuteActionRequest.class));
     EasyMock.expectLastCall();
@@ -224,7 +240,7 @@ public class AmbariCustomCommandExecutionHelperTest {
         }, false);
     actionRequest.getResourceFilters().add(new RequestResourceFilter("CORE", "YARN", "RESOURCEMANAGER", Collections.singletonList("c1-c6401")));
 
-    EasyMock.replay(hostRoleCommand, actionManager, configHelper);
+    replay(hostRoleCommand, actionManager, configHelper);
 
     createServiceComponentHosts("c1", "CORE", "c1");
 
@@ -271,7 +287,7 @@ public class AmbariCustomCommandExecutionHelperTest {
        new RequestOperationLevel(Resource.Type.Service, "c1", "CORE", "GANGLIA", null, null),
       new HashMap<>(), false);
 
-    EasyMock.replay(hostRoleCommand, actionManager, configHelper);
+    replay(hostRoleCommand, actionManager, configHelper);
 
     createServiceComponentHosts("c1", "CORE", "c1");
 
@@ -309,7 +325,7 @@ public class AmbariCustomCommandExecutionHelperTest {
         new RequestOperationLevel(Resource.Type.Service, "c1", "CORE", "GANGLIA", null, null),
       new HashMap<>(), false);
 
-    EasyMock.replay(hostRoleCommand, actionManager, configHelper);
+    replay(hostRoleCommand, actionManager, configHelper);
 
     createServiceComponentHosts("c1", "CORE", "c1");
 
@@ -345,7 +361,7 @@ public class AmbariCustomCommandExecutionHelperTest {
         new RequestOperationLevel(Resource.Type.Host, "c1", "CORE", "GANGLIA", null, null),
       new HashMap<>(), false);
 
-    EasyMock.replay(hostRoleCommand, actionManager, configHelper);
+    replay(hostRoleCommand, actionManager, configHelper);
 
     createServiceComponentHosts("c1", "CORE", "c1");
 
@@ -398,7 +414,7 @@ public class AmbariCustomCommandExecutionHelperTest {
         new RequestOperationLevel(Resource.Type.Service, "c1", "CORE", "ZOOKEEPER", null, null),
       new HashMap<>(), false);
 
-    EasyMock.replay(hostRoleCommand, actionManager, configHelper);
+    replay(hostRoleCommand, actionManager, configHelper);
     ambariManagementController.createAction(actionRequest, requestProperties);
     Assert.fail(
         "Expected an exception since there are no hosts which can run the ZK service check");
@@ -438,7 +454,7 @@ public class AmbariCustomCommandExecutionHelperTest {
         new RequestOperationLevel(Resource.Type.Service, "c1", "CORE", "ZOOKEEPER", null, null),
       new HashMap<>(), false);
 
-    EasyMock.replay(hostRoleCommand, actionManager, configHelper);
+    replay(hostRoleCommand, actionManager, configHelper);
     ambariManagementController.createAction(actionRequest, requestProperties);
     Assert.fail("Expected an exception since there are no hosts which can run the ZK service check");
   }
@@ -473,16 +489,16 @@ public class AmbariCustomCommandExecutionHelperTest {
     ExecutionCommand execCmd = EasyMock.niceMock(ExecutionCommand.class);
     Capture<Map<String,String>> timeOutCapture = EasyMock.newCapture();
 
-    EasyMock.expect(stage.getClusterName()).andReturn("c1");
+    expect(stage.getClusterName()).andReturn("c1");
 
-    EasyMock.expect(stage.getExecutionCommandWrapper(EasyMock.eq("c1-c6401"), EasyMock.anyString())).andReturn(execCmdWrapper);
-    EasyMock.expect(execCmdWrapper.getExecutionCommand()).andReturn(execCmd);
+    expect(stage.getExecutionCommandWrapper(EasyMock.eq("c1-c6401"), EasyMock.anyString())).andReturn(execCmdWrapper);
+    expect(execCmdWrapper.getExecutionCommand()).andReturn(execCmd);
     execCmd.setCommandParams(EasyMock.capture(timeOutCapture));
     EasyMock.expectLastCall();
 
     HashSet<String> localComponents = new HashSet<>();
-    EasyMock.expect(execCmd.getLocalComponents()).andReturn(localComponents).anyTimes();
-    EasyMock.replay(configHelper, stage, execCmdWrapper, execCmd);
+    expect(execCmd.getLocalComponents()).andReturn(localComponents).anyTimes();
+    replay(configHelper, stage, execCmdWrapper, execCmd);
 
     createServiceComponentHosts("c1", "CORE", "c1");
 
@@ -525,16 +541,16 @@ public class AmbariCustomCommandExecutionHelperTest {
     ExecutionCommandWrapper execCmdWrapper = EasyMock.niceMock(ExecutionCommandWrapper.class);
     ExecutionCommand execCmd = EasyMock.niceMock(ExecutionCommand.class);
 
-    EasyMock.expect(stage.getClusterName()).andReturn("c1");
+    expect(stage.getClusterName()).andReturn("c1");
     //
-    EasyMock.expect(stage.getExecutionCommandWrapper(EasyMock.eq("c1-c6401"), EasyMock.anyString())).andReturn(execCmdWrapper);
-    EasyMock.expect(stage.getExecutionCommandWrapper(EasyMock.eq("c1-c6402"), EasyMock.anyString())).andReturn(execCmdWrapper);
-    EasyMock.expect(execCmdWrapper.getExecutionCommand()).andReturn(execCmd);
-    EasyMock.expect(execCmd.getForceRefreshConfigTagsBeforeExecution()).andReturn(true);
+    expect(stage.getExecutionCommandWrapper(EasyMock.eq("c1-c6401"), EasyMock.anyString())).andReturn(execCmdWrapper);
+    expect(stage.getExecutionCommandWrapper(EasyMock.eq("c1-c6402"), EasyMock.anyString())).andReturn(execCmdWrapper);
+    expect(execCmdWrapper.getExecutionCommand()).andReturn(execCmd);
+    expect(execCmd.getForceRefreshConfigTagsBeforeExecution()).andReturn(true);
 
     HashSet<String> localComponents = new HashSet<>();
-    EasyMock.expect(execCmd.getLocalComponents()).andReturn(localComponents).anyTimes();
-    EasyMock.replay(configHelper, stage, execCmdWrapper, execCmd);
+    expect(execCmd.getLocalComponents()).andReturn(localComponents).anyTimes();
+    replay(configHelper, stage, execCmdWrapper, execCmd);
 
     createServiceComponentHosts("c1", "CORE", "c1");
 
@@ -569,15 +585,15 @@ public class AmbariCustomCommandExecutionHelperTest {
     ExecutionCommandWrapper execCmdWrapper = EasyMock.niceMock(ExecutionCommandWrapper.class);
     ExecutionCommand execCmd = EasyMock.niceMock(ExecutionCommand.class);
 
-    EasyMock.expect(stage.getClusterName()).andReturn("c1");
+    expect(stage.getClusterName()).andReturn("c1");
     //
-    EasyMock.expect(stage.getExecutionCommandWrapper(EasyMock.eq("c1-c6403"), EasyMock.anyString())).andReturn(execCmdWrapper);
-    EasyMock.expect(execCmdWrapper.getExecutionCommand()).andReturn(execCmd);
-    EasyMock.expect(execCmd.getForceRefreshConfigTagsBeforeExecution()).andReturn(true);
+    expect(stage.getExecutionCommandWrapper(EasyMock.eq("c1-c6403"), EasyMock.anyString())).andReturn(execCmdWrapper);
+    expect(execCmdWrapper.getExecutionCommand()).andReturn(execCmd);
+    expect(execCmd.getForceRefreshConfigTagsBeforeExecution()).andReturn(true);
 
     HashSet<String> localComponents = new HashSet<>();
-    EasyMock.expect(execCmd.getLocalComponents()).andReturn(localComponents).anyTimes();
-    EasyMock.replay(configHelper, stage, execCmdWrapper, execCmd);
+    expect(execCmd.getLocalComponents()).andReturn(localComponents).anyTimes();
+    replay(configHelper, stage, execCmdWrapper, execCmd);
 
     createServiceComponentHosts("c1", "CORE", "c1");
 
@@ -644,7 +660,7 @@ public class AmbariCustomCommandExecutionHelperTest {
               }
             }, false);
     actionRequest.getResourceFilters().add(new RequestResourceFilter("CORE", "YARN", "RESOURCEMANAGER", Collections.singletonList("c1-c6401")));
-    EasyMock.replay(hostRoleCommand, actionManager, configHelper);
+    replay(hostRoleCommand, actionManager, configHelper);
 
     createServiceComponentHosts("c1", "CORE", "c1");
 
@@ -701,6 +717,106 @@ public class AmbariCustomCommandExecutionHelperTest {
     Assert.assertEquals("http://foo", repo.getBaseUrl());
   }
 
+  @Test
+  public void testCreateDefaultHostParams() throws Exception {
+    String clusterName = "c1";
+    String SOME_STACK_NAME = "SomeStackName";
+    String SOME_STACK_VERSION = "1.0";
+    String MYSQL_JAR = "MYSQL_JAR";
+    String JAVA_HOME = "javaHome";
+    String JDK_NAME = "jdkName";
+    String JCE_NAME = "jceName";
+    String OJDBC_JAR_NAME = "OjdbcJarName";
+    String SERVER_DB_NAME = "ServerDBName";
+    Map<PropertyInfo, String> notManagedHdfsPathMap = new HashMap<>();
+    PropertyInfo propertyInfo1 = new PropertyInfo();
+    propertyInfo1.setName("1");
+    PropertyInfo propertyInfo2 = new PropertyInfo();
+    propertyInfo2.setName("2");
+    notManagedHdfsPathMap.put(propertyInfo1, "/tmp");
+    notManagedHdfsPathMap.put(propertyInfo2, "/apps/falcon");
+
+    Set<String> notManagedHdfsPathSet = new HashSet<>(Arrays.asList("/tmp", "/apps/falcon"));
+    Gson gson = new Gson();
+
+    ActionManager manager = createNiceMock(ActionManager.class);
+    StackId stackId = createNiceMock(StackId.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    Injector injector = createNiceMock(Injector.class);
+    Configuration configuration = createNiceMock(Configuration.class);
+    ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
+    ClusterMetadataGenerator metadataGenerator = createNiceMock(ClusterMetadataGenerator.class);
+
+    Map<String, DesiredConfig> desiredConfigs = new HashMap<>();
+
+    expect(cluster.getClusterName()).andReturn(clusterName);
+    expect(cluster.getDesiredStackVersion()).andReturn(stackId);
+    expect(cluster.getDesiredConfigs()).andReturn(desiredConfigs);
+    expect(stackId.getStackName()).andReturn(SOME_STACK_NAME).anyTimes();
+    expect(stackId.getStackVersion()).andReturn(SOME_STACK_VERSION).anyTimes();
+    expect(configuration.getMySQLJarName()).andReturn(MYSQL_JAR);
+    expect(configuration.getJavaHome()).andReturn(JAVA_HOME);
+    expect(configuration.getJDKName()).andReturn(JDK_NAME);
+    expect(configuration.getJCEName()).andReturn(JCE_NAME);
+    expect(configuration.getOjdbcJarName()).andReturn(OJDBC_JAR_NAME);
+    expect(configuration.getServerDBName()).andReturn(SERVER_DB_NAME);
+    expect(configuration.getJavaVersion()).andReturn(8);
+    expect(configuration.areHostsSysPrepped()).andReturn("true");
+    expect(configuration.getGplLicenseAccepted()).andReturn(false);
+    expect(configuration.getDatabaseConnectorNames()).andReturn(new HashMap<>()).anyTimes();
+    expect(configuration.getPreviousDatabaseConnectorNames()).andReturn(new HashMap<>()).anyTimes();
+    expect(configHelper.getPropertiesWithPropertyType(stackId, PropertyInfo.PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs))
+      .andReturn(notManagedHdfsPathMap);
+    expect(configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST))
+      .andReturn(notManagedHdfsPathSet);
+
+    AmbariConfig ambariConfig = new AmbariConfig(configuration);
+    expect(metadataGenerator.getAmbariConfig()).andReturn(ambariConfig);
+
+    replay(manager, clusters, cluster, injector, stackId, configuration, configHelper, metadataGenerator);
+
+    AmbariManagementControllerImpl ambariManagementControllerImpl = createMockBuilder(AmbariManagementControllerImpl.class)
+      .withConstructor(manager, clusters, metadataGenerator, injector)
+      .createNiceMock();
+
+    replay(ambariManagementControllerImpl);
+
+    // Inject configuration manually
+    Class<?> amciClass = AmbariManagementControllerImpl.class;
+    Field f = amciClass.getDeclaredField("configs");
+    f.setAccessible(true);
+    f.set(ambariManagementControllerImpl, configuration);
+
+    AmbariCustomCommandExecutionHelper helper = new AmbariCustomCommandExecutionHelper();
+    Class<?> helperClass = AmbariCustomCommandExecutionHelper.class;
+    f = helperClass.getDeclaredField("managementController");
+    f.setAccessible(true);
+    f.set(helper, ambariManagementControllerImpl);
+
+    f = helperClass.getDeclaredField("configs");
+    f.setAccessible(true);
+    f.set(helper, configuration);
+
+    f = helperClass.getDeclaredField("configHelper");
+    f.setAccessible(true);
+    f.set(helper, configHelper);
+
+    f = helperClass.getDeclaredField("gson");
+    f.setAccessible(true);
+    f.set(helper, gson);
+
+    Map<String, String> defaultHostParams = helper.createDefaultHostParams(cluster, stackId);
+
+    assertEquals(16, defaultHostParams.size());
+    assertEquals(MYSQL_JAR, defaultHostParams.get(DB_DRIVER_FILENAME));
+    assertEquals(SOME_STACK_NAME, defaultHostParams.get(STACK_NAME));
+    assertEquals(SOME_STACK_VERSION, defaultHostParams.get(STACK_VERSION));
+    assertEquals("true", defaultHostParams.get(HOST_SYS_PREPPED));
+    assertEquals("8", defaultHostParams.get(JAVA_VERSION));
+    assertNotNull(defaultHostParams.get(NOT_MANAGED_HDFS_PATH_LIST));
+    assertTrue(defaultHostParams.get(NOT_MANAGED_HDFS_PATH_LIST).contains("/tmp"));
+  }
+
   private void createClusterFixture(String clusterName, StackId stackId,
     String respositoryVersion, String hostPrefix) throws AmbariException, AuthorizationException, NoSuchFieldException, IllegalAccessException {
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 3b69e7b..efeb508 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -18,12 +18,6 @@
 
 package org.apache.ambari.server.controller;
 
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_DRIVER_FILENAME;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_PREPPED;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_VERSION;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
 import static org.easymock.EasyMock.anyBoolean;
 import static org.easymock.EasyMock.anyLong;
 import static org.easymock.EasyMock.anyObject;
@@ -36,20 +30,16 @@ import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.eq;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.partialMockBuilder;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.reset;
 import static org.easymock.EasyMock.verify;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -73,8 +63,8 @@ import org.apache.ambari.server.agent.rest.AgentResource;
 import org.apache.ambari.server.agent.stomp.AgentConfigsHolder;
 import org.apache.ambari.server.agent.stomp.MetadataHolder;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.internal.RequestStageContainer;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
@@ -91,14 +81,11 @@ import org.apache.ambari.server.security.ldap.LdapBatchDto;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.Module;
 import org.apache.ambari.server.state.Mpack;
 import org.apache.ambari.server.state.OsSpecific;
-import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Service;
@@ -124,7 +111,6 @@ import com.google.gson.Gson;
 import com.google.inject.Binder;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
-import com.google.inject.Provider;
 import com.google.inject.util.Modules;
 
 import junit.framework.Assert;
@@ -146,6 +132,7 @@ public class AmbariManagementControllerImplTest {
   private static final HostComponentStateDAO hostComponentStateDAO = createMock(HostComponentStateDAO.class);
   private static final ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = createMock(ServiceComponentDesiredStateEntity.class);
   private static final ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = createMock(ServiceComponentDesiredStateDAO.class);
+  private static final ClusterMetadataGenerator metadataGenerator = createNiceMock(ClusterMetadataGenerator.class);
 
   @BeforeClass
   public static void setupAuthentication() {
@@ -156,79 +143,10 @@ public class AmbariManagementControllerImplTest {
   }
 
   @Before
-  public void before() throws Exception {
-    reset(ldapDataPopulator, clusters, actionDBAccessor, ambariMetaInfo, users, sessionManager,
+  public void before() {
+    reset(ldapDataPopulator, clusters, actionDBAccessor, ambariMetaInfo, users, sessionManager, metadataGenerator,
             hostComponentStateEntity, hostComponentStateDAO, serviceComponentDesiredStateEntity, serviceComponentDesiredStateDAO);
-  }
-
-  @Test
-  public void testgetAmbariServerURI() throws Exception {
-    // create mocks
-    Injector injector = createStrictMock(Injector.class);
-    Capture<AmbariManagementController> controllerCapture = EasyMock.newCapture();
-
-    // set expectations
-    constructorInit(injector, controllerCapture, createNiceMock(KerberosHelper.class));
-
-    expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes();
-    expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes();
-
-    //replay
-    replay(injector);
-
-
-    AmbariManagementControllerImpl controller = new AmbariManagementControllerImpl(null, null, injector);
-
-    class AmbariConfigsSetter {
-      public void setConfigs(AmbariManagementController controller, String masterProtocol, String masterHostname, Integer masterPort) throws Exception {
-        // masterProtocol
-        Class<?> c = controller.getClass();
-        Field f = c.getDeclaredField("masterProtocol");
-        f.setAccessible(true);
-
-        Field modifiersField = Field.class.getDeclaredField("modifiers");
-        modifiersField.setAccessible(true);
-        modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL);
-
-        f.set(controller, masterProtocol);
-
-        // masterHostname
-        f = c.getDeclaredField("masterHostname");
-        f.setAccessible(true);
-
-        modifiersField = Field.class.getDeclaredField("modifiers");
-        modifiersField.setAccessible(true);
-        modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL);
-
-        f.set(controller, masterHostname);
-
-        // masterPort
-        f = c.getDeclaredField("masterPort");
-        f.setAccessible(true);
-
-        modifiersField = Field.class.getDeclaredField("modifiers");
-        modifiersField.setAccessible(true);
-        modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL);
-
-        f.set(controller, masterPort);
-      }
-    }
-
-    AmbariConfigsSetter ambariConfigsSetter = new AmbariConfigsSetter();
-
-    ambariConfigsSetter.setConfigs(controller, "http", "hostname", 8080);
-    assertEquals("http://hostname:8080/jdk_path", controller.getAmbariServerURI("/jdk_path"));
-
-    ambariConfigsSetter.setConfigs(controller, "https", "somesecuredhost", 8443);
-    assertEquals("https://somesecuredhost:8443/mysql_path", controller.getAmbariServerURI("/mysql_path"));
-
-    ambariConfigsSetter.setConfigs(controller, "https", "othersecuredhost", 8443);
-    assertEquals("https://othersecuredhost:8443/oracle/ojdbc/", controller.getAmbariServerURI("/oracle/ojdbc/"));
-
-    ambariConfigsSetter.setConfigs(controller, "http", "hostname", 8080);
-    assertEquals("http://hostname:8080/jdk_path?query", controller.getAmbariServerURI("/jdk_path?query"));
-
-    verify(injector);
+    replay(metadataGenerator);
   }
 
   @Test
@@ -262,7 +180,7 @@ public class AmbariManagementControllerImplTest {
     replay(injector, clusters, cluster, response, credentialStoreService, hostComponentStateDAO, serviceComponentDesiredStateDAO);
 
     // test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
 
     Field f = controller.getClass().getDeclaredField("credentialStoreService");
     f.setAccessible(true);
@@ -306,7 +224,7 @@ public class AmbariManagementControllerImplTest {
     replay(injector, clusters, hostComponentStateDAO, serviceComponentDesiredStateDAO);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
 
     // assert that exception is thrown in case where there is a single request
     try {
@@ -370,7 +288,7 @@ public class AmbariManagementControllerImplTest {
             hostComponentStateDAO, serviceComponentDesiredStateDAO);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
 
     Field f = controller.getClass().getDeclaredField("credentialStoreService");
     f.setAccessible(true);
@@ -410,19 +328,16 @@ public class AmbariManagementControllerImplTest {
     configRequests.add(configurationRequest);
 
     KerberosHelper kerberosHelper = createStrictMock(KerberosHelper.class);
-    Provider<MetadataHolder> m_metadataHolder = createMock(Provider.class);
     MetadataHolder metadataHolder = createMock(MetadataHolder.class);
-    Provider<AgentConfigsHolder> m_agentConfigsHolder = createMock(Provider.class);
     AgentConfigsHolder agentConfigsHolder = createMockBuilder(AgentConfigsHolder.class)
         .addMockedMethod("updateData").createMock();
     // expectations
     constructorInit(injector, controllerCapture, null, null,
-        kerberosHelper, m_metadataHolder, m_agentConfigsHolder);
+        kerberosHelper, metadataHolder, agentConfigsHolder
+    );
 
-    expect(m_metadataHolder.get()).andReturn(metadataHolder).anyTimes();
     expect(metadataHolder.updateData(anyObject())).andReturn(true).anyTimes();
 
-    expect(m_agentConfigsHolder.get()).andReturn(agentConfigsHolder).anyTimes();
     agentConfigsHolder.updateData(anyLong(), anyObject(List.class));
     expectLastCall().anyTimes();
 
@@ -444,21 +359,18 @@ public class AmbariManagementControllerImplTest {
 
     // replay mocks
     replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, configurationRequest,
-            hostComponentStateDAO, serviceComponentDesiredStateDAO, m_metadataHolder, metadataHolder, m_agentConfigsHolder, agentConfigsHolder);
+            hostComponentStateDAO, serviceComponentDesiredStateDAO, metadataHolder, agentConfigsHolder);
 
 
     // test
-    AmbariManagementController controller = partialMockBuilder(AmbariManagementControllerImpl.class)
-        .withConstructor(actionManager, clusters, injector)
-        .addMockedMethod("getClusterMetadataOnConfigsUpdate")
-        .createMock();
+    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, metadataGenerator, injector);
     controller.updateClusters(setRequests, null);
 
     // assert and verify
     assertSame(controller, controllerCapture.getValue());
     verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, configurationRequest,
             hostComponentStateDAO, serviceComponentDesiredStateDAO,
-        m_metadataHolder, metadataHolder, m_agentConfigsHolder, agentConfigsHolder);
+        metadataHolder, agentConfigsHolder);
   }
 
   /**
@@ -518,7 +430,7 @@ public class AmbariManagementControllerImplTest {
             hostComponentStateDAO, serviceComponentDesiredStateDAO);
 
     // test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, metadataGenerator, injector);
     controller.updateClusters(setRequests, null);
 
     // assert and verify
@@ -544,19 +456,16 @@ public class AmbariManagementControllerImplTest {
     Set<ClusterRequest> setRequests = Collections.singleton(clusterRequest);
 
     KerberosHelper kerberosHelper = createStrictMock(KerberosHelper.class);
-    Provider<MetadataHolder> m_metadataHolder = createMock(Provider.class);
     MetadataHolder metadataHolder = createMock(MetadataHolder.class);
-    Provider<AgentConfigsHolder> m_agentConfigsHolder = createMock(Provider.class);
     AgentConfigsHolder agentConfigsHolder = createMockBuilder(AgentConfigsHolder.class)
         .addMockedMethod("updateData").createMock();
     // expectations
     constructorInit(injector, controllerCapture, null, null,
-        kerberosHelper, m_metadataHolder, m_agentConfigsHolder);
+        kerberosHelper, metadataHolder, agentConfigsHolder
+    );
 
-    expect(m_metadataHolder.get()).andReturn(metadataHolder).anyTimes();
     expect(metadataHolder.updateData(anyObject())).andReturn(true).anyTimes();
 
-    expect(m_agentConfigsHolder.get()).andReturn(agentConfigsHolder).anyTimes();
     agentConfigsHolder.updateData(anyLong(), anyObject(List.class));
     expectLastCall().anyTimes();
 
@@ -570,20 +479,17 @@ public class AmbariManagementControllerImplTest {
     // replay mocks
     replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper,
             hostComponentStateDAO, serviceComponentDesiredStateDAO,
-        m_metadataHolder, metadataHolder, m_agentConfigsHolder, agentConfigsHolder);
+        metadataHolder, agentConfigsHolder);
 
     // test
-    AmbariManagementController controller = partialMockBuilder(AmbariManagementControllerImpl.class)
-        .withConstructor(actionManager, clusters, injector)
-        .addMockedMethod("getClusterMetadataOnConfigsUpdate")
-        .createMock();
+    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, metadataGenerator, injector);
     controller.updateClusters(setRequests, null);
 
     // assert and verify
     assertSame(controller, controllerCapture.getValue());
     verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper,
             hostComponentStateDAO, serviceComponentDesiredStateDAO,
-        m_metadataHolder, metadataHolder, m_agentConfigsHolder, agentConfigsHolder);
+        metadataHolder, agentConfigsHolder);
   }
 
   /**
@@ -604,19 +510,16 @@ public class AmbariManagementControllerImplTest {
     Set<ClusterRequest> setRequests = Collections.singleton(clusterRequest);
 
     KerberosHelper kerberosHelper = createStrictMock(KerberosHelper.class);
-    Provider<MetadataHolder> m_metadataHolder = createMock(Provider.class);
     MetadataHolder metadataHolder = createMock(MetadataHolder.class);
-    Provider<AgentConfigsHolder> m_agentConfigsHolder = createMock(Provider.class);
     AgentConfigsHolder agentConfigsHolder = createMockBuilder(AgentConfigsHolder.class)
         .addMockedMethod("updateData").createMock();
     // expectations
     constructorInit(injector, controllerCapture, null, null,
-        kerberosHelper, m_metadataHolder, m_agentConfigsHolder);
+        kerberosHelper, metadataHolder, agentConfigsHolder
+    );
 
-    expect(m_metadataHolder.get()).andReturn(metadataHolder).anyTimes();
     expect(metadataHolder.updateData(anyObject())).andReturn(true).anyTimes();
 
-    expect(m_agentConfigsHolder.get()).andReturn(agentConfigsHolder).anyTimes();
     agentConfigsHolder.updateData(anyLong(), anyObject(List.class));
     expectLastCall().anyTimes();
 
@@ -640,20 +543,17 @@ public class AmbariManagementControllerImplTest {
     // replay mocks
     replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper,
             hostComponentStateDAO, serviceComponentDesiredStateDAO,
-        m_metadataHolder, metadataHolder, m_agentConfigsHolder, agentConfigsHolder);
+        metadataHolder, agentConfigsHolder);
 
     // test
-    AmbariManagementController controller = partialMockBuilder(AmbariManagementControllerImpl.class)
-        .withConstructor(actionManager, clusters, injector)
-        .addMockedMethod("getClusterMetadataOnConfigsUpdate")
-        .createMock();
+    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, metadataGenerator, injector);
     controller.updateClusters(setRequests, null);
 
     // assert and verify
     assertSame(controller, controllerCapture.getValue());
     verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper,
             hostComponentStateDAO, serviceComponentDesiredStateDAO,
-        m_metadataHolder, metadataHolder, m_agentConfigsHolder, agentConfigsHolder);
+        metadataHolder, agentConfigsHolder);
   }
   /**
    * Ensure that when the cluster security type updated from NONE to KERBEROS, KerberosHandler.toggleKerberos
@@ -672,19 +572,16 @@ public class AmbariManagementControllerImplTest {
     Set<ClusterRequest> setRequests = Collections.singleton(clusterRequest);
 
     KerberosHelper kerberosHelper = createStrictMock(KerberosHelper.class);
-    Provider<MetadataHolder> m_metadataHolder = createMock(Provider.class);
     MetadataHolder metadataHolder = createMock(MetadataHolder.class);
-    Provider<AgentConfigsHolder> m_agentConfigsHolder = createMock(Provider.class);
     AgentConfigsHolder agentConfigsHolder = createMockBuilder(AgentConfigsHolder.class)
         .addMockedMethod("updateData").createMock();
     // expectations
     constructorInit(injector, controllerCapture, null, null,
-        kerberosHelper, m_metadataHolder, m_agentConfigsHolder);
+        kerberosHelper, metadataHolder, agentConfigsHolder
+    );
 
-    expect(m_metadataHolder.get()).andReturn(metadataHolder).anyTimes();
     expect(metadataHolder.updateData(anyObject())).andReturn(true).anyTimes();
 
-    expect(m_agentConfigsHolder.get()).andReturn(agentConfigsHolder).anyTimes();
     agentConfigsHolder.updateData(anyLong(), anyObject(List.class));
     expectLastCall().anyTimes();
 
@@ -716,20 +613,17 @@ public class AmbariManagementControllerImplTest {
     // replay mocks
     replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper,
             hostComponentStateDAO, serviceComponentDesiredStateDAO,
-        m_metadataHolder, metadataHolder, m_agentConfigsHolder, agentConfigsHolder);
+        metadataHolder, agentConfigsHolder);
 
     // test
-    AmbariManagementController controller = partialMockBuilder(AmbariManagementControllerImpl.class)
-        .withConstructor(actionManager, clusters, injector)
-        .addMockedMethod("getClusterMetadataOnConfigsUpdate")
-        .createMock();
+    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, metadataGenerator, injector);
     controller.updateClusters(setRequests, null);
 
     // assert and verify
     assertSame(controller, controllerCapture.getValue());
     verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper,
             hostComponentStateDAO, serviceComponentDesiredStateDAO,
-        m_metadataHolder, metadataHolder, m_agentConfigsHolder, agentConfigsHolder);
+        metadataHolder, agentConfigsHolder);
   }
 
   /**
@@ -777,19 +671,16 @@ public class AmbariManagementControllerImplTest {
     Capture<Boolean> manageIdentitiesCapture = EasyMock.newCapture();
 
     KerberosHelper kerberosHelper = createStrictMock(KerberosHelper.class);
-    Provider<MetadataHolder> m_metadataHolder = createMock(Provider.class);
     MetadataHolder metadataHolder = createMock(MetadataHolder.class);
-    Provider<AgentConfigsHolder> m_agentConfigsHolder = createMock(Provider.class);
     AgentConfigsHolder agentConfigsHolder = createMockBuilder(AgentConfigsHolder.class)
         .addMockedMethod("updateData").createMock();
     // expectations
     constructorInit(injector, controllerCapture, null, null,
-        kerberosHelper, m_metadataHolder, m_agentConfigsHolder);
+        kerberosHelper, metadataHolder, agentConfigsHolder
+    );
 
-    expect(m_metadataHolder.get()).andReturn(metadataHolder).anyTimes();
     expect(metadataHolder.updateData(anyObject())).andReturn(true).anyTimes();
 
-    expect(m_agentConfigsHolder.get()).andReturn(agentConfigsHolder).anyTimes();
     agentConfigsHolder.updateData(anyLong(), anyObject(List.class));
     expectLastCall().anyTimes();
 
@@ -817,20 +708,17 @@ public class AmbariManagementControllerImplTest {
 
     // replay mocks
     replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper,
-        m_metadataHolder, metadataHolder, m_agentConfigsHolder, agentConfigsHolder);
+        metadataHolder, agentConfigsHolder);
 
     // test
-    AmbariManagementController controller = partialMockBuilder(AmbariManagementControllerImpl.class)
-        .withConstructor(actionManager, clusters, injector)
-        .addMockedMethod("getClusterMetadataOnConfigsUpdate")
-        .createMock();
+    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, metadataGenerator, injector);
     controller.updateClusters(setRequests, null);
 
     // assert and verify
     assertSame(controller, controllerCapture.getValue());
     assertEquals(manageIdentities, manageIdentitiesCapture.getValue());
     verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper,
-        m_metadataHolder, metadataHolder, m_agentConfigsHolder, agentConfigsHolder);
+        metadataHolder, agentConfigsHolder);
   }
 
   /**
@@ -850,19 +738,16 @@ public class AmbariManagementControllerImplTest {
     Set<ClusterRequest> setRequests = Collections.singleton(clusterRequest);
 
     KerberosHelper kerberosHelper = createStrictMock(KerberosHelper.class);
-    Provider<MetadataHolder> m_metadataHolder = createMock(Provider.class);
     MetadataHolder metadataHolder = createMock(MetadataHolder.class);
-    Provider<AgentConfigsHolder> m_agentConfigsHolder = createMock(Provider.class);
     AgentConfigsHolder agentConfigsHolder = createMockBuilder(AgentConfigsHolder.class)
         .addMockedMethod("updateData").createMock();
     // expectations
     constructorInit(injector, controllerCapture, null, null,
-        kerberosHelper, m_metadataHolder, m_agentConfigsHolder);
+        kerberosHelper, metadataHolder, agentConfigsHolder
+    );
 
-    expect(m_metadataHolder.get()).andReturn(metadataHolder);
     expect(metadataHolder.updateData(anyObject())).andReturn(true);
 
-    expect(m_agentConfigsHolder.get()).andReturn(agentConfigsHolder);
     agentConfigsHolder.updateData(anyLong(), anyObject(List.class));
     expectLastCall();
 
@@ -901,13 +786,10 @@ public class AmbariManagementControllerImplTest {
     // replay mocks
     replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper,
             hostComponentStateDAO, serviceComponentDesiredStateDAO,
-        m_agentConfigsHolder, agentConfigsHolder, m_metadataHolder, metadataHolder);
+      agentConfigsHolder, metadataHolder);
 
     // test
-    AmbariManagementController controller = partialMockBuilder(AmbariManagementControllerImpl.class)
-        .withConstructor(actionManager, clusters, injector)
-        .addMockedMethod("getClusterMetadataOnConfigsUpdate")
-        .createMock();
+    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, metadataGenerator, injector);
 
     try {
       controller.updateClusters(setRequests, null);
@@ -920,7 +802,7 @@ public class AmbariManagementControllerImplTest {
     assertSame(controller, controllerCapture.getValue());
     verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, kerberosHelper,
             hostComponentStateDAO, serviceComponentDesiredStateDAO,
-        m_agentConfigsHolder, agentConfigsHolder, m_metadataHolder, metadataHolder);
+      agentConfigsHolder, metadataHolder);
   }
 
   /**
@@ -957,7 +839,7 @@ public class AmbariManagementControllerImplTest {
             hostComponentStateDAO, serviceComponentDesiredStateDAO);
 
     // test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, metadataGenerator, injector);
     try {
       controller.updateClusters(setRequests, null);
       fail("Expected RollbackException");
@@ -994,11 +876,10 @@ public class AmbariManagementControllerImplTest {
     Set<ServiceComponentHostRequest> setRequests = new HashSet<>();
     setRequests.add(request1);
 
-    Provider<MetadataHolder> m_metadataHolder = createMock(Provider.class);
-    Provider<AgentConfigsHolder> m_agentConfigsHolder = createMock(Provider.class);
     // expectations
     constructorInit(injector, controllerCapture, null, maintHelper,
-        createNiceMock(KerberosHelper.class), m_metadataHolder, m_agentConfigsHolder);
+        createNiceMock(KerberosHelper.class), null, null
+    );
 
     expect(injector.getInstance(HostComponentStateDAO.class)).andReturn(hostComponentStateDAO).anyTimes();
     expect(hostComponentStateDAO.findById(1L)).andReturn(hostComponentStateEntity).anyTimes();
@@ -1021,7 +902,8 @@ public class AmbariManagementControllerImplTest {
           put("host1", host);
         }}).anyTimes();
 
-    expect(cluster.getService("service1")).andReturn(service);
+    expect(cluster.getServicesByServiceGroup("CORE")).andReturn(ImmutableList.of(service)).anyTimes();
+    expect(cluster.getService("service1")).andReturn(service).anyTimes();
     expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes();
     expect(service.getName()).andReturn("service1").anyTimes();
     expect(service.getServiceComponent("component1")).andReturn(component);
@@ -1037,10 +919,11 @@ public class AmbariManagementControllerImplTest {
     // replay mocks
     replay(maintHelper, injector, clusters, cluster, host, response, stack,
         ambariMetaInfo, service, component, componentHost, hostComponentStateDAO, hostComponentStateEntity,
-        serviceComponentDesiredStateDAO, serviceComponentDesiredStateEntity, m_agentConfigsHolder, m_metadataHolder);
+        serviceComponentDesiredStateDAO, serviceComponentDesiredStateEntity
+    );
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
@@ -1051,9 +934,9 @@ public class AmbariManagementControllerImplTest {
     assertTrue(setResponses.contains(response));
 
     verify(injector, clusters, cluster, host, response, stack, ambariMetaInfo, service, component, componentHost,
-            hostComponentStateDAO, hostComponentStateEntity, serviceComponentDesiredStateDAO,
-            serviceComponentDesiredStateEntity,
-        m_agentConfigsHolder, m_metadataHolder);
+      hostComponentStateDAO, hostComponentStateEntity, serviceComponentDesiredStateDAO,
+      serviceComponentDesiredStateEntity
+    );
   }
 
   @Test
@@ -1090,7 +973,8 @@ public class AmbariManagementControllerImplTest {
 //    expect(stack.getStackVersion()).andReturn("stackVersion");
 //
 //    expect(ambariMetaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
+    expect(cluster.getServicesByServiceGroup("CORE")).andReturn(ImmutableList.of(service)).anyTimes();
+    expect(cluster.getService("service1")).andReturn(service).anyTimes();
     expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes();
     expect(service.getServiceComponent("component1")).andReturn(component);
     expect(component.getId()).andReturn(1L).anyTimes();
@@ -1115,7 +999,7 @@ public class AmbariManagementControllerImplTest {
             serviceComponentDesiredStateEntity);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     try {
@@ -1178,7 +1062,8 @@ public class AmbariManagementControllerImplTest {
 //
 //    expect(ambariMetaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
     expect(cluster.getClusterName()).andReturn("cl1");
-    expect(cluster.getService("service1")).andReturn(service);
+    expect(cluster.getServicesByServiceGroup("CORE")).andReturn(ImmutableList.of(service)).anyTimes();
+    expect(cluster.getService("service1")).andReturn(service).anyTimes();
     expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes();
     expect(service.getServiceComponent("component1")).andReturn(component);
     expect(service.getName()).andReturn("service1").anyTimes();
@@ -1199,7 +1084,7 @@ public class AmbariManagementControllerImplTest {
         service, component, componentHost1, response1, hostComponentStateDAO, serviceComponentDesiredStateDAO);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     Set<ServiceComponentHostResponse> responses = controller.getHostComponents(setRequests);
@@ -1253,7 +1138,8 @@ public class AmbariManagementControllerImplTest {
         }}).anyTimes();
 
     expect(cluster.getClusterName()).andReturn("cl1");
-    expect(cluster.getService("service1")).andReturn(service);
+    expect(cluster.getServicesByServiceGroup("CORE")).andReturn(ImmutableList.of(service)).anyTimes();
+    expect(cluster.getService("service1")).andReturn(service).anyTimes();
     expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes();
     expect(service.getServiceComponent("component1")).andReturn(component);
     expect(service.getName()).andReturn("service1").anyTimes();
@@ -1273,7 +1159,7 @@ public class AmbariManagementControllerImplTest {
         service, component, componentHost1, response1, hostComponentStateDAO, serviceComponentDesiredStateDAO);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     Set<ServiceComponentHostResponse> responses = controller.getHostComponents(setRequests);
@@ -1338,7 +1224,8 @@ public class AmbariManagementControllerImplTest {
     // getHostComponent
     expect(clusters.getCluster("cluster1")).andReturn(cluster).times(3);
     expect(clusters.getClustersForHost("host1")).andReturn(Collections.singleton(cluster)).anyTimes();
-    expect(cluster.getService("service1")).andReturn(service).times(3);
+    expect(cluster.getServicesByServiceGroup("CORE")).andReturn(ImmutableList.of(service)).anyTimes();
+    expect(cluster.getService("service1")).andReturn(service).anyTimes();
 
     expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes();
     expect(service.getServiceComponent("component1")).andReturn(component1);
@@ -1408,7 +1295,7 @@ public class AmbariManagementControllerImplTest {
             hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
@@ -1480,26 +1367,20 @@ public class AmbariManagementControllerImplTest {
         }}).anyTimes();
 
 //    expect(ambariMetaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
+    expect(cluster.getServicesByServiceGroup("CORE")).andReturn(ImmutableList.of(service)).anyTimes();
+    expect(cluster.getService("service1")).andReturn(service).anyTimes();
     expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes();
     expect(service.getServiceComponent("component1")).andReturn(component1);
-    expect(component1.getServiceComponentHosts()).andReturn(new
-                                                               HashMap<String, ServiceComponentHost>() {{
-                                                                 put("host1", componentHost1);
-                                                               }});
+    expect(component1.getServiceComponentHosts()).andReturn(ImmutableMap.of("host1", componentHost1));
     expect(componentHost1.convertToResponse(null)).andReturn(response1);
     expect(componentHost1.getHostName()).andReturn("host1");
 
     expect(cluster.getService("service2")).andThrow(new ServiceNotFoundException("cluster1", "service2"));
 
-    expect(cluster.getService("service1")).andReturn(service);
     expect(service.getName()).andReturn("service1").anyTimes();
     expect(cluster.getServiceByComponentName("component3")).andReturn(service).anyTimes();
     expect(service.getServiceComponent("component3")).andReturn(component3);
-    expect(component3.getServiceComponentHosts()).andReturn(new
-                                                                HashMap<String, ServiceComponentHost>() {{
-                                                                  put("host1", componentHost2);
-                                                                }});
+    expect(component3.getServiceComponentHosts()).andReturn(ImmutableMap.of("host1", componentHost2));
     expect(componentHost2.convertToResponse(null)).andReturn(response2);
     expect(componentHost2.getHostName()).andReturn("host1");
 
@@ -1519,7 +1400,7 @@ public class AmbariManagementControllerImplTest {
 
     expect(hostComponentStateEntity2.getClusterId()).andReturn(1L).anyTimes();
     expect(hostComponentStateEntity2.getServiceGroupId()).andReturn(1L).anyTimes();
-    expect(hostComponentStateEntity2.getServiceId()).andReturn(1L).anyTimes();
+    expect(hostComponentStateEntity2.getServiceId()).andReturn(2L).anyTimes();
     expect(hostComponentStateEntity2.getComponentName()).andReturn("component2").anyTimes();
     expect(hostComponentStateEntity2.getComponentType()).andReturn("component2").anyTimes();
 
@@ -1534,7 +1415,7 @@ public class AmbariManagementControllerImplTest {
             "component1", "component1")).andReturn(serviceComponentDesiredStateEntity).anyTimes();
     ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity2 = createMock(ServiceComponentDesiredStateEntity.class);
 
-    expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L,
+    expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 2L,
             "component2", "component2")).andReturn(serviceComponentDesiredStateEntity2).anyTimes();
     ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity3 = createMock(ServiceComponentDesiredStateEntity.class);
     expect(serviceComponentDesiredStateDAO.findByName(1L, 1L, 1L,
@@ -1548,7 +1429,7 @@ public class AmbariManagementControllerImplTest {
             hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
@@ -1622,7 +1503,8 @@ public class AmbariManagementControllerImplTest {
 
 
 //    expect(ambariMetaInfo.getComponentToService("stackName", "stackVersion", "component1")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
+    expect(cluster.getServicesByServiceGroup("CORE")).andReturn(ImmutableList.of(service, service2)).anyTimes();
+    expect(cluster.getService("service1")).andReturn(service).anyTimes();
     expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes();
     expect(service.getServiceComponent("component1")).andReturn(component);
     expect(component.getServiceComponentHosts()).andReturn(ImmutableMap.<String, ServiceComponentHost>builder()
@@ -1638,7 +1520,6 @@ public class AmbariManagementControllerImplTest {
         andThrow(new ServiceComponentNotFoundException("cluster1", "service2", "service2", "CORE", "component2"));
 
 //    expect(ambariMetaInfo.getComponentToService("stackName", "stackVersion", "component3")).andReturn("service1");
-    expect(cluster.getService("service1")).andReturn(service);
     expect(cluster.getServiceByComponentName("component3")).andReturn(service).anyTimes();
     expect(service.getServiceComponent("component3")).andReturn(component3);
 
@@ -1693,7 +1574,7 @@ public class AmbariManagementControllerImplTest {
             hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
@@ -1719,7 +1600,6 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     final Host host = createNiceMock(Host.class);
     Service service = createNiceMock(Service.class);
-    Service service2 = createNiceMock(Service.class);
     ServiceComponent component = createNiceMock(ServiceComponent.class);
     ServiceComponent component2 = createNiceMock(ServiceComponent.class);
     ServiceComponent component3 = createNiceMock(ServiceComponent.class);
@@ -1765,7 +1645,8 @@ public class AmbariManagementControllerImplTest {
     expect(stack.getStackName()).andReturn("stackName").anyTimes();
     expect(stack.getStackVersion()).andReturn("stackVersion").anyTimes();
 
-    expect(cluster.getService("service1")).andReturn(service);
+    expect(cluster.getServicesByServiceGroup("CORE")).andReturn(ImmutableList.of(service)).anyTimes();
+    expect(cluster.getService("service1")).andReturn(service).anyTimes();
     expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes();
     expect(service.getServiceComponent("component1")).andReturn(component);
     expect(service.getName()).andReturn("service1").anyTimes();
@@ -1775,7 +1656,6 @@ public class AmbariManagementControllerImplTest {
 
     expect(clusters.getClustersForHost("host2")).andThrow(new HostNotFoundException("host2"));
 
-    expect(cluster.getService("service1")).andReturn(service);
     expect(cluster.getServiceByComponentName("component3")).andReturn(service).anyTimes();
     expect(service.getServiceComponent("component3")).andReturn(component3);
     expect(component3.getServiceComponentHosts()).andReturn(Collections.singletonMap("foo", componentHost2));
@@ -1822,12 +1702,12 @@ public class AmbariManagementControllerImplTest {
 
     // replay mocks
     replay(maintHelper, injector, clusters, cluster, host, stack, ambariMetaInfo,
-        service, service2, component, component2, component3, componentHost1,
+        service, component, component2, component3, componentHost1,
         componentHost2, response1, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO,
             hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
@@ -1839,7 +1719,7 @@ public class AmbariManagementControllerImplTest {
     assertTrue(setResponses.contains(response1));
     assertTrue(setResponses.contains(response2));
 
-    verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, service2, component, component2, component3,
+    verify(injector, clusters, cluster, host, stack, ambariMetaInfo, service, component, component2, component3,
         componentHost1, componentHost2, response1, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO,
             hostComponentStateEntity, hostComponentStateEntity2, hostComponentStateEntity3);
   }
@@ -1886,7 +1766,7 @@ public class AmbariManagementControllerImplTest {
     replay(maintHelper, injector, clusters, cluster, stack, ambariMetaInfo, hostComponentStateDAO, serviceComponentDesiredStateDAO);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     try {
@@ -1941,7 +1821,7 @@ public class AmbariManagementControllerImplTest {
     replay(maintHelper, injector, clusters, stack, ambariMetaInfo);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     try {
@@ -2001,7 +1881,8 @@ public class AmbariManagementControllerImplTest {
           put("host1", createNiceMock(Host.class));
         }}).anyTimes();
 
-    expect(cluster.getService("service1")).andReturn(service);
+    expect(cluster.getServicesByServiceGroup("CORE")).andReturn(ImmutableList.of(service)).anyTimes();
+    expect(cluster.getService("service1")).andReturn(service).anyTimes();
     expect(service.getName()).andReturn("service1").anyTimes();
     expect(component.getName()).andReturn("component1").anyTimes();
     expect(cluster.getServiceByComponentName("component1")).andReturn(service).anyTimes();
@@ -2021,7 +1902,7 @@ public class AmbariManagementControllerImplTest {
             hostComponentStateDAO, serviceComponentDesiredStateDAO);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
@@ -2083,12 +1964,11 @@ public class AmbariManagementControllerImplTest {
 
     // getHostComponent
     expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(clusters.getHostsForCluster((String) anyObject())).andReturn(
-        new HashMap<String, Host>() {{
-          put("host1", createNiceMock(Host.class));
-        }}).anyTimes();
+    expect(clusters.getHostsForCluster("cluster1")).andReturn(ImmutableMap.of("host1", createNiceMock(Host.class))).anyTimes();
 
-    expect(cluster.getServices()).andReturn(mapServices);
+    expect(cluster.getClusterName()).andReturn("cluster1").anyTimes();
+    expect(cluster.getServices()).andReturn(mapServices).anyTimes();
+    expect(cluster.getServicesByServiceGroup("CORE")).andReturn(ImmutableList.of(service1, service2)).anyTimes();
     expect(service1.getServiceComponents()).andReturn(Collections.singletonMap("foo", component1));
     expect(service2.getServiceComponents()).andReturn(Collections.singletonMap("bar", component2));
 
@@ -2114,7 +1994,7 @@ public class AmbariManagementControllerImplTest {
         componentHost1, componentHost2, componentHost3, hostComponentStateDAO, serviceComponentDesiredStateDAO);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     Set<ServiceComponentHostResponse> setResponses = controller.getHostComponents(setRequests);
@@ -2175,10 +2055,6 @@ public class AmbariManagementControllerImplTest {
     expect(serviceInfo.getOsSpecifics()).andReturn(osSpecificsService);
     expect(stackInfo.getOsSpecifics()).andReturn(osSpecificsStack);
 
-    injector.injectMembers(capture(controllerCapture));
-    expect(injector.getInstance(Gson.class)).andReturn(null);
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(maintHelper).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class));
     constructorInit(injector, controllerCapture, null, maintHelper, createNiceMock(KerberosHelper.class),
         null, null);
 
@@ -2203,103 +2079,6 @@ public class AmbariManagementControllerImplTest {
   }
 
   @Test
-  public void testCreateDefaultHostParams() throws Exception {
-    String clusterName = "c1";
-    String SOME_STACK_NAME = "SomeStackName";
-    String SOME_STACK_VERSION = "1.0";
-    String MYSQL_JAR = "MYSQL_JAR";
-    String JAVA_HOME = "javaHome";
-    String JDK_NAME = "jdkName";
-    String JCE_NAME = "jceName";
-    String OJDBC_JAR_NAME = "OjdbcJarName";
-    String SERVER_DB_NAME = "ServerDBName";
-    Map<PropertyInfo, String> notManagedHdfsPathMap = new HashMap<>();
-    PropertyInfo propertyInfo1 = new PropertyInfo();
-    propertyInfo1.setName("1");
-    PropertyInfo propertyInfo2 = new PropertyInfo();
-    propertyInfo2.setName("2");
-    notManagedHdfsPathMap.put(propertyInfo1, "/tmp");
-    notManagedHdfsPathMap.put(propertyInfo2, "/apps/falcon");
-
-    Set<String> notManagedHdfsPathSet = new HashSet<>(Arrays.asList("/tmp", "/apps/falcon"));
-    Gson gson = new Gson();
-
-    ActionManager manager = createNiceMock(ActionManager.class);
-    StackId stackId = createNiceMock(StackId.class);
-    Cluster cluster = createNiceMock(Cluster.class);
-    Injector injector = createNiceMock(Injector.class);
-    Configuration configuration = createNiceMock(Configuration.class);
-    ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
-
-    Map<String, DesiredConfig> desiredConfigs = new HashMap<>();
-
-    expect(cluster.getClusterName()).andReturn(clusterName);
-    expect(cluster.getDesiredStackVersion()).andReturn(stackId);
-    expect(cluster.getDesiredConfigs()).andReturn(desiredConfigs);
-    expect(stackId.getStackName()).andReturn(SOME_STACK_NAME).anyTimes();
-    expect(stackId.getStackVersion()).andReturn(SOME_STACK_VERSION).anyTimes();
-    expect(configuration.getMySQLJarName()).andReturn(MYSQL_JAR);
-    expect(configuration.getJavaHome()).andReturn(JAVA_HOME);
-    expect(configuration.getJDKName()).andReturn(JDK_NAME);
-    expect(configuration.getJCEName()).andReturn(JCE_NAME);
-    expect(configuration.getOjdbcJarName()).andReturn(OJDBC_JAR_NAME);
-    expect(configuration.getServerDBName()).andReturn(SERVER_DB_NAME);
-    expect(configuration.getJavaVersion()).andReturn(8);
-    expect(configuration.areHostsSysPrepped()).andReturn("true");
-    expect(configuration.getGplLicenseAccepted()).andReturn(false);
-    expect(configuration.getDatabaseConnectorNames()).andReturn(new HashMap<>()).anyTimes();
-    expect(configuration.getPreviousDatabaseConnectorNames()).andReturn(new HashMap<>()).anyTimes();
-    expect(configHelper.getPropertiesWithPropertyType(stackId,
-        PropertyInfo.PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredConfigs)).andReturn(
-            notManagedHdfsPathMap);
-    expect(configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap, NOT_MANAGED_HDFS_PATH_LIST)).andReturn(
-            notManagedHdfsPathSet);
-
-    replay(manager, clusters, cluster, injector, stackId, configuration, configHelper);
-
-    AmbariManagementControllerImpl ambariManagementControllerImpl = createMockBuilder(
-        AmbariManagementControllerImpl.class).withConstructor(manager, clusters,
-            injector).createNiceMock();
-
-    replay(ambariManagementControllerImpl);
-
-    // Inject configuration manually
-    Class<?> amciClass = AmbariManagementControllerImpl.class;
-    Field f = amciClass.getDeclaredField("configs");
-    f.setAccessible(true);
-    f.set(ambariManagementControllerImpl, configuration);
-
-    AmbariCustomCommandExecutionHelper helper = new AmbariCustomCommandExecutionHelper();
-    Class<?> helperClass = AmbariCustomCommandExecutionHelper.class;
-    f = helperClass.getDeclaredField("managementController");
-    f.setAccessible(true);
-    f.set(helper, ambariManagementControllerImpl);
-
-    f = helperClass.getDeclaredField("configs");
-    f.setAccessible(true);
-    f.set(helper, configuration);
-
-    f = helperClass.getDeclaredField("configHelper");
-    f.setAccessible(true);
-    f.set(helper, configHelper);
-
-    f = helperClass.getDeclaredField("gson");
-    f.setAccessible(true);
-    f.set(helper, gson);
-
-    Map<String, String> defaultHostParams = helper.createDefaultHostParams(cluster, stackId);
-
-    assertEquals(16, defaultHostParams.size());
-    assertEquals(MYSQL_JAR, defaultHostParams.get(DB_DRIVER_FILENAME));
-    assertEquals(SOME_STACK_NAME, defaultHostParams.get(STACK_NAME));
-    assertEquals(SOME_STACK_VERSION, defaultHostParams.get(STACK_VERSION));
-    assertEquals("true", defaultHostParams.get(HOST_SYS_PREPPED));
-    assertEquals("8", defaultHostParams.get(JAVA_VERSION));
-    assertNotNull(defaultHostParams.get(NOT_MANAGED_HDFS_PATH_LIST));
-    assertTrue(defaultHostParams.get(NOT_MANAGED_HDFS_PATH_LIST).contains("/tmp"));
-  }
-
-  @Test
   public void testSynchronizeLdapUsersAndGroups() throws Exception {
 
     Set<String> userSet = new HashSet<>();
@@ -2383,7 +2162,7 @@ public class AmbariManagementControllerImplTest {
   private class NestedTestClass extends AmbariManagementControllerImpl {
 
     public NestedTestClass(ActionManager actionManager, Clusters clusters, Injector injector, OsFamily osFamilyMock) throws Exception {
-      super(actionManager, clusters, injector);
+      super(actionManager, clusters, metadataGenerator, injector);
       osFamily = osFamilyMock;
     }
 
@@ -2445,7 +2224,7 @@ public class AmbariManagementControllerImplTest {
     replay(injector, cluster, clusters, ambariMetaInfo, service, serviceComponent, serviceComponentHost, stackId);
 
     // test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     controller.registerRackChange("c1");
@@ -2475,7 +2254,7 @@ public class AmbariManagementControllerImplTest {
     ambariMetaInfo.init();
     expectLastCall();
     replay(ambariMetaInfo, injector);
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
     Assert.assertEquals(mpackResponse, controller.registerMpack(mpackRequest));
   }
@@ -2496,21 +2275,22 @@ public class AmbariManagementControllerImplTest {
     expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes();
     expect(ambariMetaInfo.getModules(mpackId)).andReturn(packletArrayList).atLeastOnce();
     replay(ambariMetaInfo, injector);
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     setAmbariMetaInfo(ambariMetaInfo, controller);
 
     Assert.assertEquals(packletArrayList, controller.getModules(mpackId));
   }
 
   public static void constructorInit(Injector injector, Capture<AmbariManagementController> controllerCapture, Gson gson,
-                               MaintenanceStateHelper maintenanceStateHelper, KerberosHelper kerberosHelper,
-                               Provider<MetadataHolder> m_metadataHolder, Provider<AgentConfigsHolder> m_agentConfigsHolder) {
+    MaintenanceStateHelper maintenanceStateHelper, KerberosHelper kerberosHelper,
+    MetadataHolder metadataHolder, AgentConfigsHolder agentConfigsHolder
+  ) {
     injector.injectMembers(capture(controllerCapture));
     expect(injector.getInstance(Gson.class)).andReturn(gson);
     expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(maintenanceStateHelper);
     expect(injector.getInstance(KerberosHelper.class)).andReturn(kerberosHelper);
-    expect(injector.getProvider(MetadataHolder.class)).andReturn(m_metadataHolder);
-    expect(injector.getProvider(AgentConfigsHolder.class)).andReturn(m_agentConfigsHolder);
+    expect(injector.getProvider(MetadataHolder.class)).andReturn(() -> metadataHolder);
+    expect(injector.getProvider(AgentConfigsHolder.class)).andReturn(() -> agentConfigsHolder);
   }
 
   public static void constructorInit(Injector injector, Capture<AmbariManagementController> controllerCapture,
@@ -2519,7 +2299,7 @@ public class AmbariManagementControllerImplTest {
     expect(injector.getInstance(Gson.class)).andReturn(null);
     expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null);
     expect(injector.getInstance(KerberosHelper.class)).andReturn(kerberosHelper);
-    expect(injector.getProvider(MetadataHolder.class)).andReturn(null);
-    expect(injector.getProvider(AgentConfigsHolder.class)).andReturn(null);
+    expect(injector.getProvider(MetadataHolder.class)).andReturn(() -> null);
+    expect(injector.getProvider(AgentConfigsHolder.class)).andReturn(() -> null);
   }
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index b67185b..afaf395 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -19,7 +19,6 @@
 package org.apache.ambari.server.controller;
 
 
-
 import static java.util.stream.Collectors.toSet;
 import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.createNiceMock;
@@ -96,6 +95,7 @@ import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.customactions.ActionDefinition;
 import org.apache.ambari.server.metadata.ActionMetadata;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
@@ -8552,6 +8552,7 @@ public class AmbariManagementControllerTest {
     Capture<AmbariManagementController> controllerCapture = EasyMock.newCapture();
     Clusters clusters = createNiceMock(Clusters.class);
     MaintenanceStateHelper maintHelper = createNiceMock(MaintenanceStateHelper.class);
+    ClusterMetadataGenerator metadataGenerator = createNiceMock(ClusterMetadataGenerator.class);
 
     Cluster cluster = createNiceMock(Cluster.class);
     Service service = createNiceMock(Service.class);
@@ -8580,12 +8581,11 @@ public class AmbariManagementControllerTest {
 
     expect(service.convertToResponse()).andReturn(response);
 
-
     // replay mocks
-    replay(maintHelper, injector, clusters, cluster, service, response, hostComponentStateDAO, serviceComponentDesiredStateDAO);
+    replay(maintHelper, injector, clusters, cluster, service, response, hostComponentStateDAO, serviceComponentDesiredStateDAO, metadataGenerator);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     Set<ServiceResponse> setResponses = ServiceResourceProviderTest.getServices(controller, setRequests);
 
     // assert and verify
@@ -8606,6 +8606,7 @@ public class AmbariManagementControllerTest {
     Capture<AmbariManagementController> controllerCapture = EasyMock.newCapture();
     Clusters clusters = createNiceMock(Clusters.class);
     MaintenanceStateHelper maintHelper = createNiceMock(MaintenanceStateHelper.class);
+    ClusterMetadataGenerator metadataGenerator = createNiceMock(ClusterMetadataGenerator.class);
     Cluster cluster = createNiceMock(Cluster.class);
 
     // requests
@@ -8629,10 +8630,10 @@ public class AmbariManagementControllerTest {
     expect(injector.getInstance(ServiceComponentDesiredStateDAO.class)).andReturn(serviceComponentDesiredStateDAO).anyTimes();
 
     // replay mocks
-    replay(maintHelper, injector, clusters, cluster, hostComponentStateDAO, serviceComponentDesiredStateDAO);
+    replay(maintHelper, injector, clusters, cluster, hostComponentStateDAO, serviceComponentDesiredStateDAO, metadataGenerator);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
 
     // assert that exception is thrown in case where there is a single request
     try {
@@ -8657,6 +8658,7 @@ public class AmbariManagementControllerTest {
     Capture<AmbariManagementController> controllerCapture = EasyMock.newCapture();
     Clusters clusters = createNiceMock(Clusters.class);
     MaintenanceStateHelper maintHelper = createNiceMock(MaintenanceStateHelper.class);
+    ClusterMetadataGenerator metadataGenerator = createNiceMock(ClusterMetadataGenerator.class);
 
     Cluster cluster = createNiceMock(Cluster.class);
     Service service1 = createNiceMock(Service.class);
@@ -8700,10 +8702,10 @@ public class AmbariManagementControllerTest {
 
     // replay mocks
     replay(maintHelper, injector, clusters, cluster, service1, service2,
-      response, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO);
+      response, response2, hostComponentStateDAO, serviceComponentDesiredStateDAO, metadataGenerator);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
     Set<ServiceResponse> setResponses = ServiceResourceProviderTest.getServices(controller, setRequests);
 
     // assert and verify
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java
index 4ec5d45..b49ae01 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ComponentResourceProviderTest.java
@@ -65,6 +65,7 @@ import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
 import org.apache.ambari.server.security.TestAuthenticationFactory;
@@ -140,7 +141,7 @@ public class ComponentResourceProviderTest {
     expect(managementController.getServiceComponentFactory()).andReturn(serviceComponentFactory);
 
     expect(clusters.getCluster("Cluster100")).andReturn(cluster).anyTimes();
-    expect(cluster.getService("Service100")).andReturn(service).anyTimes();
+    expect(cluster.getService("CORE", "Service100")).andReturn(service).anyTimes();
     expect(cluster.getClusterId()).andReturn(2L).anyTimes();
 
     expect(service.getStackId()).andReturn(stackId).anyTimes();
@@ -180,7 +181,6 @@ public class ComponentResourceProviderTest {
     properties.put(ComponentResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
     properties.put(ComponentResourceProvider.COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, "CORE");
     properties.put(ComponentResourceProvider.COMPONENT_SERVICE_NAME_PROPERTY_ID, "Service100");
-    properties.put(ComponentResourceProvider.COMPONENT_SERVICE_GROUP_NAME_PROPERTY_ID, "CORE");
     properties.put(ComponentResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID, "Component100");
 
     propertySet.add(properties);
@@ -534,7 +534,7 @@ public class ComponentResourceProviderTest {
     expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo);
 
     expect(clusters.getCluster(CLUSTER_NAME)).andReturn(cluster);
-    expect(cluster.getService(SERVICE_NAME)).andReturn(service);
+    expect(cluster.getService(null, SERVICE_NAME)).andReturn(service);
     expect(cluster.getClusterId()).andReturn(CLUSTER_ID).anyTimes();
 
     expect(service.getServiceComponent("Component100")).andReturn(serviceComponent);
@@ -837,7 +837,7 @@ public class ComponentResourceProviderTest {
 
     // getComponents
     expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(cluster.getService("service1")).andReturn(service);
+    expect(cluster.getService("CORE", "service1")).andReturn(service);
     expect(service.getName()).andReturn("service1").anyTimes();
     expect(service.getServiceType()).andReturn("service1").anyTimes();
     expect(service.getServiceComponent("component1")).andReturn(component);
@@ -901,8 +901,8 @@ public class ComponentResourceProviderTest {
     // getComponents
     expect(clusters.getCluster("cluster1")).andReturn(cluster).anyTimes();
     expect(cluster.getDesiredStackVersion()).andReturn(stackId).anyTimes();
-    expect(cluster.getService("service1")).andReturn(service).anyTimes();
-    expect(cluster.getService("service2")).andThrow(new ObjectNotFoundException("service2"));
+    expect(cluster.getService("CORE", "service1")).andReturn(service).anyTimes();
+    expect(cluster.getService("CORE", "service2")).andThrow(new ObjectNotFoundException("service2"));
 
     expect(ambariMetaInfo.getComponent("stackName", "1", "service1", "component3")).andReturn(component3Info);
     expect(ambariMetaInfo.getComponent("stackName", "1", "service1", "component4")).andReturn(component4Info);
@@ -961,6 +961,7 @@ public class ComponentResourceProviderTest {
     MaintenanceStateHelper maintHelper = createNiceMock(MaintenanceStateHelper.class);
     Cluster cluster = createNiceMock(Cluster.class);
     Service service = createNiceMock(Service.class);
+    ClusterMetadataGenerator metadataGenerator = createNiceMock(ClusterMetadataGenerator.class);
     HostComponentStateDAO hostComponentStateDAO = createMock(HostComponentStateDAO.class);
     ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = createMock(ServiceComponentDesiredStateDAO.class);
 
@@ -981,14 +982,14 @@ public class ComponentResourceProviderTest {
 
     // getComponents
     expect(clusters.getCluster("cluster1")).andReturn(cluster);
-    expect(cluster.getService("service1")).andReturn(service);
+    expect(cluster.getService("CORE", "service1")).andReturn(service);
     expect(service.getServiceComponent("component1")).andThrow(
         new ServiceComponentNotFoundException("cluster1", "service1", "service1", "CORE", "component1"));
     // replay mocks
-    replay(maintHelper, injector, clusters, cluster, service);
+    replay(maintHelper, injector, clusters, cluster, service, metadataGenerator);
 
     //test
-    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, injector);
+    AmbariManagementController controller = new AmbariManagementControllerImpl(null, clusters, metadataGenerator, injector);
 
     // assert that exception is thrown in case where there is a single request
     try {
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
index 2c5c442..8f9616d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
@@ -67,6 +67,7 @@ import org.apache.ambari.server.events.MetadataUpdateEvent;
 import org.apache.ambari.server.hooks.HookService;
 import org.apache.ambari.server.hooks.users.UserHookService;
 import org.apache.ambari.server.metadata.CachedRoleCommandOrderProvider;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.metadata.RoleCommandOrderProvider;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
@@ -218,6 +219,7 @@ public class UpgradeCatalog252Test {
     final Service sparkMock = createNiceMock(Service.class);
     final Service spark2Mock = createNiceMock(Service.class);
     final AmbariManagementController controller = createMock(AmbariManagementController.class);
+    final ClusterMetadataGenerator metadataGenerator = createMock(ClusterMetadataGenerator.class);
 
     StackId stackId = new StackId("HDP", "2.2");
 
@@ -257,18 +259,18 @@ public class UpgradeCatalog252Test {
     expect(controller.createConfig(eq(cluster), eq(stackId), eq("livy2-conf"), capture(captureLivy2ConfProperties), anyString(), anyObject(Map.class), anyLong()))
         .andReturn(livy2ConfNew)
         .once();
-    expect(controller.getClusterMetadataOnConfigsUpdate(eq(cluster)))
+    expect(metadataGenerator.getClusterMetadataOnConfigsUpdate(eq(cluster)))
         .andReturn(createNiceMock(MetadataUpdateEvent.class))
         .times(2);
 
-    replay(clusters, cluster, zeppelinEnv, livy2Conf, livyConf, controller, sparkMock, spark2Mock);
+    replay(clusters, cluster, zeppelinEnv, livy2Conf, livyConf, controller, sparkMock, spark2Mock, metadataGenerator);
 
 
     Injector injector = getInjector(clusters, controller);
     UpgradeCatalog252 upgradeCatalog252 = injector.getInstance(UpgradeCatalog252.class);
     upgradeCatalog252.fixLivySuperusers();
 
-    verify(clusters, cluster, zeppelinEnv, livy2Conf, livyConf, controller, sparkMock, spark2Mock);
+    verify(clusters, cluster, zeppelinEnv, livy2Conf, livyConf, controller, sparkMock, spark2Mock, metadataGenerator);
 
     Assert.assertTrue(captureLivyConfProperties.hasCaptured());
     Assert.assertEquals("some_user,zeppelin_user", captureLivyConfProperties.getValue().get("livy.superusers"));
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java
index b68164e..647a748 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java
@@ -85,6 +85,7 @@ import org.apache.ambari.server.hooks.users.PostUserCreationHookContext;
 import org.apache.ambari.server.hooks.users.UserCreatedEvent;
 import org.apache.ambari.server.hooks.users.UserHookService;
 import org.apache.ambari.server.metadata.CachedRoleCommandOrderProvider;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.metadata.RoleCommandOrderProvider;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
@@ -654,6 +655,7 @@ public class UpgradeCatalog260Test {
     final Config coreSiteConfNew = createMock(Config.class);
     final Service hdfsMock = createNiceMock(Service.class);
     final AmbariManagementController controller = injector.getInstance(AmbariManagementController.class);
+    final ClusterMetadataGenerator metadataGenerator = createMock(ClusterMetadataGenerator.class);
 
     Capture<? extends Map<String, String>> captureCoreSiteConfProperties = newCapture();
 
@@ -679,11 +681,11 @@ public class UpgradeCatalog260Test {
     expect(controller.createConfig(eq(cluster), anyObject(StackId.class), eq("core-site"), capture(captureCoreSiteConfProperties), anyString(), anyObject(Map.class), anyLong()))
         .andReturn(coreSiteConfNew)
         .once();
-    expect(controller.getClusterMetadataOnConfigsUpdate(eq(cluster)))
+    expect(metadataGenerator.getClusterMetadataOnConfigsUpdate(eq(cluster)))
         .andReturn(createNiceMock(MetadataUpdateEvent.class))
         .once();
 
-    replay(clusters, cluster, zeppelinEnvConf, coreSiteConf, coreSiteConfNew, controller);
+    replay(clusters, cluster, zeppelinEnvConf, coreSiteConf, coreSiteConfNew, controller, metadataGenerator);
 
     UpgradeCatalog260 upgradeCatalog260 = injector.getInstance(UpgradeCatalog260.class);
     upgradeCatalog260.ensureZeppelinProxyUserConfigs();
@@ -804,15 +806,16 @@ public class UpgradeCatalog260Test {
     expect(controller.createConfig(eq(cluster), eq(stackId), eq("hive-interactive-site"), capture(captureHsiProperties), anyString(), anyObject(Map.class), eq(1L)))
             .andReturn(null)
             .anyTimes();
-    expect(controller.getClusterMetadataOnConfigsUpdate(eq(cluster)))
+    final ClusterMetadataGenerator metadataGenerator = createMock(ClusterMetadataGenerator.class);
+    expect(metadataGenerator.getClusterMetadataOnConfigsUpdate(eq(cluster)))
         .andReturn(createNiceMock(MetadataUpdateEvent.class))
         .once();
 
-    replay(artifactDAO, artifactEntity, cluster, clusters, config, newConfig, hsiConfig, newHsiConfig, response, response1, controller, stackId);
+    replay(artifactDAO, artifactEntity, cluster, clusters, config, newConfig, hsiConfig, newHsiConfig, response, response1, controller, stackId, metadataGenerator);
 
     UpgradeCatalog260 upgradeCatalog260 = injector.getInstance(UpgradeCatalog260.class);
     upgradeCatalog260.updateKerberosDescriptorArtifact(artifactDAO, artifactEntity);
-    verify(artifactDAO, artifactEntity, cluster, clusters, config, newConfig, response, controller, stackId);
+    verify(artifactDAO, artifactEntity, cluster, clusters, config, newConfig, response, controller, stackId, metadataGenerator);
     KerberosDescriptor kerberosDescriptorUpdated = new KerberosDescriptorFactory().createInstance(captureMap.getValue());
     Assert.assertNotNull(kerberosDescriptorUpdated);
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java
index 047d8cf..6601816 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog270Test.java
@@ -181,6 +181,7 @@ import org.apache.ambari.server.events.MetadataUpdateEvent;
 import org.apache.ambari.server.hooks.HookService;
 import org.apache.ambari.server.hooks.users.UserHookService;
 import org.apache.ambari.server.metadata.CachedRoleCommandOrderProvider;
+import org.apache.ambari.server.metadata.ClusterMetadataGenerator;
 import org.apache.ambari.server.metadata.RoleCommandOrderProvider;
 import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.dao.AmbariConfigurationDAO;
@@ -1128,11 +1129,11 @@ public class UpgradeCatalog270Test {
         .addMockedMethod("createConfiguration")
         .addMockedMethod("getClusters", new Class[]{})
         .addMockedMethod("createConfig")
-        .addMockedMethod("getClusterMetadataOnConfigsUpdate", Cluster.class)
         .createMock();
     expect(controller.getClusters()).andReturn(clusters).anyTimes();
     expect(controller.createConfig(eq(cluster1), eq(stackId), eq("kerberos-env"), capture(capturedProperties), anyString(), anyObject(Map.class), 1L)).andReturn(newConfig).once();
-    expect(controller.getClusterMetadataOnConfigsUpdate(eq(cluster1))).andReturn(createNiceMock(MetadataUpdateEvent.class)).once();
+    final ClusterMetadataGenerator metadataGenerator = createMock(ClusterMetadataGenerator.class);
+    expect(metadataGenerator.getClusterMetadataOnConfigsUpdate(eq(cluster1))).andReturn(createNiceMock(MetadataUpdateEvent.class)).once();
 
 
     Injector injector = createNiceMock(Injector.class);
@@ -1144,7 +1145,7 @@ public class UpgradeCatalog270Test {
     expect(kerberosHelperMock.createTemporaryDirectory()).andReturn(new File("/invalid/file/path")).times(2);
     expect(injector.getInstance(KerberosHelper.class)).andReturn(kerberosHelperMock).anyTimes();
 
-    replay(controller, clusters, cluster1, cluster2, configWithGroup, configWithoutGroup, newConfig, response, injector, kerberosHelperMock);
+    replay(controller, clusters, cluster1, cluster2, configWithGroup, configWithoutGroup, newConfig, response, injector, kerberosHelperMock, metadataGenerator);
 
     Field field = AbstractUpgradeCatalog.class.getDeclaredField("configuration");
 
@@ -1160,7 +1161,7 @@ public class UpgradeCatalog270Test {
     field.set(upgradeCatalog270, createNiceMock(Configuration.class));
     upgradeCatalog270.updateKerberosConfigurations();
 
-    verify(controller, clusters, cluster1, cluster2, configWithGroup, configWithoutGroup, newConfig, response, injector, upgradeCatalog270);
+    verify(controller, clusters, cluster1, cluster2, configWithGroup, configWithoutGroup, newConfig, response, injector, upgradeCatalog270, metadataGenerator);
 
 
     Assert.assertEquals(1, capturedProperties.getValues().size());

-- 
To stop receiving notification emails like this one, please contact
adoroszlai@apache.org.