You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ad...@apache.org on 2018/07/13 15:55:26 UTC

[ambari] branch branch-feature-AMBARI-14714 updated: AMBARI-14714. Fix few more unit tests (#1761)

This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch branch-feature-AMBARI-14714
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-feature-AMBARI-14714 by this push:
     new 1d4447e  AMBARI-14714. Fix few more unit tests (#1761)
1d4447e is described below

commit 1d4447ee2f2652f84fa17a9814aa1ebf30664b0f
Author: Doroszlai, Attila <64...@users.noreply.github.com>
AuthorDate: Fri Jul 13 17:55:23 2018 +0200

    AMBARI-14714. Fix few more unit tests (#1761)
---
 .../internal/HostComponentResourceProvider.java    |  4 +-
 .../server/state/ServiceComponentSupport.java      |  2 +-
 .../server/agent/DummyHeartbeatConstants.java      |  2 +-
 .../ambari/server/agent/HeartbeatTestHelper.java   |  7 ++-
 .../configuration/RecoveryConfigHelperTest.java    | 23 +++----
 .../controller/AmbariManagementControllerTest.java | 73 ++++++++++------------
 .../server/state/ServiceComponentSupportTest.java  |  3 +-
 .../server/upgrade/UpgradeCatalog252Test.java      | 11 +---
 8 files changed, 56 insertions(+), 69 deletions(-)

diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
index 3dab4c8..7272303 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
@@ -578,6 +578,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
     }
 
     for (ServiceComponentHostRequest request : requests) {
+      logRequestInfo("Received a updateHostComponent request", request);
+
       validateServiceComponentHostRequest(request);
 
       Cluster cluster = clusters.getCluster(request.getClusterName());
@@ -595,8 +597,6 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
       ServiceComponent sc = getServiceComponent(
           request.getClusterName(), request.getServiceGroupName(), request.getServiceName(), request.getComponentName());
 
-      logRequestInfo("Received a updateHostComponent request", request);
-
       if((clusterName == null || clusterName.isEmpty())
               && (request.getClusterName() != null
               && !request.getClusterName().isEmpty())) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentSupport.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentSupport.java
index 6332705..dc93faa 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentSupport.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentSupport.java
@@ -51,7 +51,7 @@ public class ServiceComponentSupport {
    * Collects the service names from the cluster which are not supported (service doesn't exist or was deleted) in the given stack.
    */
   public Set<String> unsupportedServices(Cluster cluster, String stackName, String stackVersion) {
-    Set<String> serviceNames = cluster.getServices().stream().map(Service::getName).collect(toSet());
+    Set<String> serviceNames = cluster.getServices().stream().map(Service::getServiceType).collect(toSet());
     return serviceNames.stream()
       .filter(serviceName -> !isServiceSupported(serviceName, stackName, stackVersion))
       .collect(toSet());
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java
index 45ec647..f869c2f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/DummyHeartbeatConstants.java
@@ -41,7 +41,7 @@ public interface DummyHeartbeatConstants {
 
   String HDFS = "HDFS";
   String HBASE = "HBASE";
-  String CORE_SERVICE_GROUP = "core";
+  String CORE_SERVICE_GROUP = "CORE";
 
   String DATANODE = Role.DATANODE.name();
   String NAMENODE = Role.NAMENODE.name();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
index 4fa806f..f552288 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
@@ -26,6 +26,7 @@ import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyOs;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyRepositoryVersion;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyStackId;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
 
 import java.lang.reflect.Method;
 import java.util.ArrayList;
@@ -211,7 +212,7 @@ public class HeartbeatTestHelper {
     serviceGroupEntity.setClusterId(clusterId);
     serviceGroupEntity.setClusterEntity(clusterEntity);
     serviceGroupEntity.setStack(stackEntity);
-    serviceGroupEntity.setServiceGroupName("CORE");
+    serviceGroupEntity.setServiceGroupName(CORE_SERVICE_GROUP);
     serviceGroupEntity.setServiceGroupId(1L);
 
     ServiceDesiredStateEntity serviceDesiredStateEntity = new ServiceDesiredStateEntity();
@@ -220,8 +221,8 @@ public class HeartbeatTestHelper {
     serviceDesiredStateEntity.setServiceId(100L);
 
     ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
-    clusterServiceEntity.setServiceType("HDFS");
-    clusterServiceEntity.setServiceName("HDFS");
+    clusterServiceEntity.setServiceType(HDFS);
+    clusterServiceEntity.setServiceName(HDFS);
     clusterServiceEntity.setServiceGroupEntity(serviceGroupEntity);
     clusterServiceEntity.setClusterId(clusterId);
     clusterServiceEntity.setServiceId(100L);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
index 09469a2..5ff64ce 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/configuration/RecoveryConfigHelperTest.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.configuration;
 
+import static java.util.stream.Collectors.toSet;
+import static org.apache.ambari.server.agent.DummyHeartbeatConstants.CORE_SERVICE_GROUP;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DATANODE;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.DummyHostname1;
 import static org.apache.ambari.server.agent.DummyHeartbeatConstants.HDFS;
@@ -52,6 +54,7 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 import com.google.common.eventbus.EventBus;
 import com.google.inject.Guice;
@@ -107,8 +110,7 @@ public class RecoveryConfigHelperTest {
   public void testServiceComponentInstalled() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
 
-    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion());
-    Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS);
+    Service hdfs = cluster.getService(CORE_SERVICE_GROUP, HDFS);
 
     hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
@@ -140,8 +142,7 @@ public class RecoveryConfigHelperTest {
   public void testServiceComponentUninstalled()
       throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
-    Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS);
+    Service hdfs = cluster.getService(CORE_SERVICE_GROUP, HDFS);
 
     hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
@@ -175,8 +176,7 @@ public class RecoveryConfigHelperTest {
   public void testClusterEnvConfigChanged()
       throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
-    Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS);
+    Service hdfs = cluster.getService(CORE_SERVICE_GROUP, HDFS);
 
     hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
@@ -184,7 +184,7 @@ public class RecoveryConfigHelperTest {
 
     // Get the recovery configuration
     RecoveryConfig recoveryConfig = recoveryConfigHelper.getRecoveryConfig(cluster.getClusterName(), DummyHostname1);
-    assertEquals(recoveryConfig.getEnabledComponents(), "DATANODE");
+    assertEquals(ImmutableSet.of("DATANODE"), recoveryConfig.getEnabledComponents().stream().map(RecoveryConfigComponent::getComponentName).collect(toSet()));
 
     // Get cluser-env config and turn off recovery for the cluster
     Config config = cluster.getDesiredConfigByType("cluster-env");
@@ -208,8 +208,7 @@ public class RecoveryConfigHelperTest {
   @Test
   public void testMaintenanceModeChanged() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
-    Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS);
+    Service hdfs = cluster.getService(CORE_SERVICE_GROUP, HDFS);
 
     hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
@@ -241,8 +240,7 @@ public class RecoveryConfigHelperTest {
   @Test
   public void testServiceComponentRecoveryChanged() throws Exception {
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
-    Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS);
+    Service hdfs = cluster.getService(CORE_SERVICE_GROUP, HDFS);
 
     hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true);
     hdfs.getServiceComponent(DATANODE).addServiceComponentHost(DummyHostname1);
@@ -279,8 +277,7 @@ public class RecoveryConfigHelperTest {
     Cluster cluster = getDummyCluster(hostNames);
 
     // Add HDFS service with DATANODE component to the cluster
-    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", cluster.getDesiredStackVersion().getStackId());
-    Service hdfs = cluster.addService(serviceGroup, HDFS, HDFS);
+    Service hdfs = cluster.getService(CORE_SERVICE_GROUP, HDFS);
 
     hdfs.addServiceComponent(DATANODE, DATANODE).setRecoveryEnabled(true);
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 302b445..9342132 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -19,6 +19,7 @@
 package org.apache.ambari.server.controller;
 
 
+import static java.util.stream.Collectors.toMap;
 import static java.util.stream.Collectors.toSet;
 import static org.easymock.EasyMock.createMock;
 import static org.easymock.EasyMock.createNiceMock;
@@ -129,6 +130,7 @@ import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.MaintenanceState;
 import org.apache.ambari.server.state.OsSpecific;
+import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Service;
@@ -359,6 +361,18 @@ public class AmbariManagementControllerTest {
   private void createCluster(String clusterName) throws Exception{
     ClusterRequest r = new ClusterRequest(null, clusterName, State.INSTALLED.name(), SecurityType.NONE, HDP_0_1, null);
     controller.createCluster(r);
+    applyDefaultClusterSettings(controller.getClusters().getCluster(clusterName));
+  }
+
+  private void applyDefaultClusterSettings(Cluster cluster) throws AmbariException {
+    // init default cluster settings
+    Map<String, String> defaultClusterSettings =
+      controller.getAmbariMetaInfo().getClusterProperties().stream()
+        .collect(toMap(PropertyInfo::getName, PropertyInfo::getValue));
+
+    for (Map.Entry<String, String> setting : defaultClusterSettings.entrySet()) {
+      cluster.addClusterSetting(setting.getKey(), setting.getValue());
+    }
   }
 
   private Service createService(String clusterName, String serviceGroupName, String serviceName, State desiredState) throws Exception {
@@ -1095,7 +1109,7 @@ public class AmbariManagementControllerTest {
     String serviceGroupName = "CORE";
     ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, HDP_0_1);
     String serviceName = "HDFS";
-    createService(cluster1, serviceGroupName, serviceName, null);
+    Service service = createService(cluster1, serviceGroupName, serviceName, null);
     String componentName1 = "NAMENODE";
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
@@ -1117,7 +1131,7 @@ public class AmbariManagementControllerTest {
 
     ConfigurationRequest cr1;
     cr1 = new ConfigurationRequest(cluster1, "cluster-env","version1",
-                                   configs, null, 1L, 1L);
+                                   configs, null, service.getServiceId(), 1L);
 
     ClusterRequest crReq = new ClusterRequest(cluster.getClusterId(), cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr1));
@@ -1184,7 +1198,7 @@ public class AmbariManagementControllerTest {
     configs.put("commands_to_retry", "START");
 
     cr1 = new ConfigurationRequest(cluster1, "cluster-env","version2",
-                                   configs, null, 1L, 1L);
+                                   configs, null, service.getServiceId(), 1L);
     crReq = new ClusterRequest(cluster.getClusterId(), cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr1));
     controller.updateClusters(Collections.singleton(crReq), null);
@@ -1215,7 +1229,7 @@ public class AmbariManagementControllerTest {
     configs.put("commands_to_retry2", "START");
 
     cr1 = new ConfigurationRequest(cluster1, "cluster-env","version3",
-                                   configs, null, 1L, 1L);
+                                   configs, null, service.getServiceId(), 1L);
     crReq = new ClusterRequest(cluster.getClusterId(), cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr1));
     controller.updateClusters(Collections.singleton(crReq), null);
@@ -2801,6 +2815,7 @@ public class AmbariManagementControllerTest {
     ClusterRequest r = new ClusterRequest(null, clusterName, stackId, null);
     controller.createCluster(r);
     Cluster c1 = clusters.getCluster(clusterName);
+    applyDefaultClusterSettings(c1);
     for (String host : hosts) {
       addHostToCluster(host, clusterName);
     }
@@ -3721,27 +3736,7 @@ public class AmbariManagementControllerTest {
     reqs.add(req3);
     reqs.add(req4);
     reqs.add(req5);
-    updateHostComponents(reqs, Collections.emptyMap(), true);
-    // Expected, now client components with STARTED status will be ignored
-
-    reqs.clear();
-    req1 = new ServiceComponentHostRequest(cluster1, serviceGroupName, null,
-        componentName1, componentName1, host1, State.INSTALLED.toString());
-    req2 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1,
-        componentName1, componentName1, host2, State.INSTALLED.toString());
-    req3 = new ServiceComponentHostRequest(cluster1, serviceGroupName, null,
-        componentName2, componentName2, host1, State.INSTALLED.toString());
-    req4 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1,
-        componentName2, componentName2, host2, State.INSTALLED.toString());
-    req5 = new ServiceComponentHostRequest(cluster1, serviceGroupName, serviceName1,
-        componentName3, componentName3, host1, State.INSTALLED.toString());
-    reqs.add(req1);
-    reqs.add(req2);
-    reqs.add(req3);
-    reqs.add(req4);
-    reqs.add(req5);
-    RequestStatusResponse trackAction = updateHostComponents(reqs,
-        Collections.emptyMap(), true);
+    RequestStatusResponse trackAction = updateHostComponents(reqs, Collections.emptyMap(), true);
     Assert.assertNotNull(trackAction);
 
     long requestId = trackAction.getRequestId();
@@ -8453,6 +8448,8 @@ public class AmbariManagementControllerTest {
 
     ClusterRequest clusterRequest = new ClusterRequest(null, CLUSTER_NAME, STACK_ID, null);
     amc.createCluster(clusterRequest);
+    Cluster cluster = clusters.getCluster(CLUSTER_NAME);
+    applyDefaultClusterSettings(cluster);
 
     String serviceGroupName = "CORE";
     ServiceGroupResourceProviderTest.createServiceGroup(controller, CLUSTER_NAME, serviceGroupName, STACK_ID);
@@ -8505,8 +8502,6 @@ public class AmbariManagementControllerTest {
 
     ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false);
 
-    Cluster cluster = clusters.getCluster(CLUSTER_NAME);
-
     for (Service service : cluster.getServices()) {
 
       for(String componentName : service.getServiceComponents().keySet()) {
@@ -9651,7 +9646,7 @@ public class AmbariManagementControllerTest {
     String serviceGroupName = "CORE";
     ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.5");
     String serviceName = "HDFS";
-    createService(cluster1, serviceGroupName, serviceName, null);
+    Service service = createService(cluster1, serviceGroupName, serviceName, null);
     String componentName1 = "NAMENODE";
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
@@ -9679,7 +9674,7 @@ public class AmbariManagementControllerTest {
       }
     };
 
-    ConfigurationRequest cr1 = new ConfigurationRequest(cluster1, "hdfs-site", "version1", hdfsConfigs, hdfsConfigAttributes, 1L, 1L);
+    ConfigurationRequest cr1 = new ConfigurationRequest(cluster1, "hdfs-site", "version1", hdfsConfigs, hdfsConfigAttributes, service.getServiceId(), 1L);
     ClusterRequest crReq1 = new ClusterRequest(clusterId, cluster1, null, null);
     crReq1.setDesiredConfig(Collections.singletonList(cr1));
 
@@ -9750,7 +9745,7 @@ public class AmbariManagementControllerTest {
     String serviceGroupName = "CORE";
     ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.5");
     String serviceName = "HDFS";
-    createService(cluster1, serviceGroupName, serviceName, null);
+    Service service = createService(cluster1, serviceGroupName, serviceName, null);
     String componentName1 = "NAMENODE";
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
@@ -9778,7 +9773,7 @@ public class AmbariManagementControllerTest {
           put("test.password", "first");
           put("test.password.empty", "");
         }},
-      new HashMap<>(), 1L, 1L
+      new HashMap<>(), service.getServiceId(), 1L
     );
     crReq = new ClusterRequest(clusterId, cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr));
@@ -9792,7 +9787,7 @@ public class AmbariManagementControllerTest {
           put("test.password", "SECRET:hdfs-site:1:test.password");
           put("new", "new");//need this to mark config as "changed"
         }},
-      new HashMap<>(), 1L, 1L
+      new HashMap<>(), service.getServiceId(), 1L
     );
     crReq = new ClusterRequest(clusterId, cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr));
@@ -9804,7 +9799,7 @@ public class AmbariManagementControllerTest {
         new HashMap<String, String>(){{
           put("test.password", "brandNewPassword");
         }},
-      new HashMap<>(), 1L, 1L
+      new HashMap<>(), service.getServiceId(), 1L
     );
     crReq = new ClusterRequest(clusterId, cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr));
@@ -9816,7 +9811,7 @@ public class AmbariManagementControllerTest {
         new HashMap<String, String>(){{
           put("test.password", "SECRET:hdfs-site:666:test.password");
         }},
-      new HashMap<>(), 1L, 1L
+      new HashMap<>(), service.getServiceId(), 1L
     );
     crReq = new ClusterRequest(clusterId, cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr));
@@ -9833,7 +9828,7 @@ public class AmbariManagementControllerTest {
         new HashMap<String, String>(){{
           put("foo", "bar");
         }},
-      new HashMap<>(), 1L, 1L
+      new HashMap<>(), service.getServiceId(), 1L
     );
     crReq = new ClusterRequest(clusterId, cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr));
@@ -9845,7 +9840,7 @@ public class AmbariManagementControllerTest {
           put("test.password", "SECRET:hdfs-site:4:test.password");
           put("new", "new");
         }},
-      new HashMap<>(), 1L, 1L
+      new HashMap<>(), service.getServiceId(), 1L
     );
     crReq = new ClusterRequest(clusterId, cluster1, null, null);
     crReq.setDesiredConfig(Collections.singletonList(cr));
@@ -9870,7 +9865,7 @@ public class AmbariManagementControllerTest {
     assertFalse(v4.getProperties().containsKey("test.password"));
 
     // check if we have masked secret in responce
-    final ConfigurationRequest configRequest = new ConfigurationRequest(cluster1, "hdfs-site", null, null, null, 1L, 1L);
+    final ConfigurationRequest configRequest = new ConfigurationRequest(cluster1, "hdfs-site", null, null, null, service.getServiceId(), 1L);
     configRequest.setIncludeProperties(true);
     Set<ConfigurationResponse> requestedConfigs = controller.getConfigurations(new HashSet<ConfigurationRequest>() {{
       add(configRequest);
@@ -9895,7 +9890,7 @@ public class AmbariManagementControllerTest {
     String serviceGroupName = "CORE";
     ServiceGroupResourceProviderTest.createServiceGroup(controller, cluster1, serviceGroupName, "HDP-2.0.5");
     String serviceName = "HDFS";
-    createService(cluster1, serviceGroupName, serviceName, null);
+    Service service = createService(cluster1, serviceGroupName, serviceName, null);
     String componentName1 = "NAMENODE";
 
     createServiceComponent(cluster1, serviceGroupName, serviceName, componentName1, State.INIT);
@@ -9915,7 +9910,7 @@ public class AmbariManagementControllerTest {
       }
     };
 
-    ConfigurationRequest cr1 = new ConfigurationRequest(cluster1, "hdfs-site", "version1", hdfsConfigs, hdfsConfigAttributes, 1L, 1L);
+    ConfigurationRequest cr1 = new ConfigurationRequest(cluster1, "hdfs-site", "version1", hdfsConfigs, hdfsConfigAttributes, service.getServiceId(), 1L);
     ClusterRequest crReq1 = new ClusterRequest(cluster.getClusterId(), cluster1, null, null);
     crReq1.setDesiredConfig(Collections.singletonList(cr1));
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentSupportTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentSupportTest.java
index 2e74302..b839830 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentSupportTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentSupportTest.java
@@ -104,8 +104,9 @@ public class ServiceComponentSupportTest extends EasyMockSupport {
     Collection<Service> services = new LinkedList<Service>();
     for (String serviceName : installedServiceNames) {
       Service service = createNiceMock(Service.class);
-      expect(service.getName()).andReturn(serviceName);
+      expect(service.getServiceType()).andReturn(serviceName);
       services.add(service);
+      replay(service);
     }
     expect(cluster.getServices()).andReturn(services);
     replay(cluster);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
index e0b0b33..481faaf 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog252Test.java
@@ -172,9 +172,6 @@ public class UpgradeCatalog252Test {
   private MetadataHolder metadataHolder;
 
   @Mock(type = MockType.NICE)
-  private ClusterMetadataGenerator metadataGenerator;
-
-  @Mock(type = MockType.NICE)
   private Injector injector;
 
   @Before
@@ -271,11 +268,8 @@ public class UpgradeCatalog252Test {
     expect(controller.createConfig(eq(cluster), eq(stackId), eq("livy2-conf"), capture(captureLivy2ConfProperties), anyString(), anyObject(Map.class), anyLong()))
         .andReturn(livy2ConfNew)
         .once();
-    expect(metadataGenerator.getClusterMetadataOnConfigsUpdate(eq(cluster)))
-        .andReturn(createNiceMock(MetadataUpdateEvent.class))
-        .times(2);
 
-    replay(clusters, cluster, zeppelinEnv, livy2Conf, livyConf, controller, sparkMock, spark2Mock, metadataGenerator);
+    replay(clusters, cluster, zeppelinEnv, livy2Conf, livyConf, controller, sparkMock, spark2Mock);
 
 
     Injector injector = getInjector(clusters, controller);
@@ -289,7 +283,7 @@ public class UpgradeCatalog252Test {
     UpgradeCatalog252 upgradeCatalog252 = injector.getInstance(UpgradeCatalog252.class);
     upgradeCatalog252.fixLivySuperusers();
 
-    verify(clusters, cluster, zeppelinEnv, livy2Conf, livyConf, controller, sparkMock, spark2Mock, metadataGenerator);
+    verify(clusters, cluster, zeppelinEnv, livy2Conf, livyConf, controller, sparkMock, spark2Mock);
 
     Assert.assertTrue(captureLivyConfProperties.hasCaptured());
     Assert.assertEquals("some_user,zeppelin_user", captureLivyConfProperties.getValue().get("livy.superusers"));
@@ -485,7 +479,6 @@ public class UpgradeCatalog252Test {
         binder.bind(AmbariMetaInfo.class).toInstance(createNiceMock(AmbariMetaInfo.class));
         binder.bind(KerberosHelper.class).toInstance(createNiceMock(KerberosHelperImpl.class));
         binder.bind(MetadataHolder.class).toInstance(metadataHolder);
-        binder.bind(ClusterMetadataGenerator.class).toInstance(metadataGenerator);
         binder.bind(AgentConfigsHolder.class).toInstance(createNiceMock(AgentConfigsHolder.class));
         binder.bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
         binder.bind(ComponentResolver.class).toInstance(createNiceMock(ComponentResolver.class));