You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2018/07/25 18:41:48 UTC

[ambari] branch branch-feature-AMBARI-14714 updated: AMBARI-24194: Fix broken Java UTs in ambari-server code -- Part 4 (#1836)

This is an automated email from the ASF dual-hosted git repository.

jonathanhurley pushed a commit to branch branch-feature-AMBARI-14714
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-feature-AMBARI-14714 by this push:
     new ca45b0c  AMBARI-24194: Fix broken Java UTs in ambari-server code -- Part 4 (#1836)
ca45b0c is described below

commit ca45b0c769fed337b8eda6c2c181b6f66d9a8790
Author: sduan <sd...@hortonworks.com>
AuthorDate: Wed Jul 25 11:41:45 2018 -0700

    AMBARI-24194: Fix broken Java UTs in ambari-server code -- Part 4 (#1836)
---
 .../ambari/server/stack/StackManagerTest.java      | 12 ++--
 .../server/stack/StackServiceDirectoryTest.java    |  3 +-
 .../ambari/server/state/cluster/ClusterTest.java   | 69 +++++++++++-----------
 .../ambari/server/state/cluster/ClustersTest.java  |  4 +-
 .../ConcurrentServiceConfigVersionTest.java        | 15 +++--
 .../server/upgrade/UpgradeCatalog260Test.java      |  3 +
 6 files changed, 60 insertions(+), 46 deletions(-)

diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index cef9666..b68fb63 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -26,6 +26,7 @@ import static org.easymock.EasyMock.verify;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
@@ -386,7 +387,8 @@ public class StackManagerTest {
     // values from base service
     assertEquals(baseSqoopService.isDeleted(), sqoopService.isDeleted());
     assertEquals(baseSqoopService.getAlertsFile(),sqoopService.getAlertsFile());
-    assertEquals(baseSqoopService.getClientComponent(), sqoopService.getClientComponent());
+    // they are different because of versions are not the same now
+    assertNotSame(baseSqoopService.getClientComponent(), sqoopService.getClientComponent());
     assertEquals(baseSqoopService.getCommandScript(), sqoopService.getCommandScript());
     assertEquals(baseSqoopService.getConfigDependencies(), sqoopService.getConfigDependencies());
     assertEquals(baseSqoopService.getConfigDir(), sqoopService.getConfigDir());
@@ -419,7 +421,8 @@ public class StackManagerTest {
     // compare components
     List<ComponentInfo> stormServiceComponents = stormService.getComponents();
     List<ComponentInfo> baseStormServiceComponents = baseStormService.getComponents();
-    assertEquals(new HashSet<>(stormServiceComponents), new HashSet<>(baseStormServiceComponents));
+    // The versions are not the same now
+    assertNotSame(new HashSet<>(stormServiceComponents), new HashSet<>(baseStormServiceComponents));
     // values from base service
     assertEquals(baseStormService.isDeleted(), stormService.isDeleted());
     //todo: specify alerts file in stack
@@ -460,11 +463,12 @@ public class StackManagerTest {
     // compare components
     List<ComponentInfo> serviceComponents = service.getComponents();
     List<ComponentInfo> baseStormServiceCompoents = baseSqoopService.getComponents();
-    assertEquals(serviceComponents, baseStormServiceCompoents);
+    // Versions are different now
+    assertNotSame(serviceComponents, baseStormServiceCompoents);
     // values from base service
     assertEquals(baseSqoopService.isDeleted(), service.isDeleted());
     assertEquals(baseSqoopService.getAlertsFile(),service.getAlertsFile());
-    assertEquals(baseSqoopService.getClientComponent(), service.getClientComponent());
+    assertNotSame(baseSqoopService.getClientComponent(), service.getClientComponent());
     assertEquals(baseSqoopService.getCommandScript(), service.getCommandScript());
     assertEquals(baseSqoopService.getConfigDependencies(), service.getConfigDependencies());
     assertEquals(baseSqoopService.getConfigDir(), service.getConfigDir());
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackServiceDirectoryTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackServiceDirectoryTest.java
index 5983dce..85add08 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackServiceDirectoryTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackServiceDirectoryTest.java
@@ -44,7 +44,8 @@ public class StackServiceDirectoryTest {
     String pathWithValidChars = "/FakeStackName/1.0/services/FAKESERVICE/";
     String serviceNameInvalidChars = "Fake-Serv.ice";
 
-    String desiredServiceAdvisorName = "FakeStackName10FakeServiceServiceAdvisor";
+    // ambari 3.0 and beyond, no stack is used in serviceadvisor name
+    String desiredServiceAdvisorName = "FakeServiceServiceAdvisor";
 
     MockStackServiceDirectory ssd1 = createStackServiceDirectory(pathWithInvalidChars);
     assertEquals(desiredServiceAdvisorName, ssd1.getAdvisorName(serviceNameValidChars));
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 8539675..c33f183 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -959,7 +959,7 @@ public class ClusterTest {
   @Test
   public void testServiceConfigVersions() throws Exception {
     createDefaultCluster();
-    c1.addService(serviceGroup, "HDFS", "HDFS");
+    Service service = c1.addService(serviceGroup, "HDFS", "HDFS");
 
     Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<>());
@@ -1000,7 +1000,7 @@ public class ClusterTest {
     assertEquals(Long.valueOf(2), hdfsResponse.getVersion());
 
     // Rollback , clonning version1 config, created new ServiceConfigVersion
-    c1.setServiceConfigVersion(1L, 1L, "admin", "test_note");
+    c1.createServiceConfigVersion(service.getServiceId(), "admin", "test_note", null);
     serviceConfigVersions = c1.getServiceConfigVersions();
     Assert.assertNotNull(serviceConfigVersions);
     // created new ServiceConfigVersion
@@ -1050,7 +1050,7 @@ public class ClusterTest {
   public void testServiceConfigVersionsForGroups() throws Exception {
     createDefaultCluster();
 
-    c1.addService(serviceGroup, "HDFS", "HDFS");
+    Service service = c1.addService(serviceGroup, "HDFS", "HDFS");
 
     Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<>());
@@ -1069,12 +1069,12 @@ public class ClusterTest {
       new HashMap<String, String>() {{ put("a", "c"); }}, new HashMap<>());
 
     ConfigGroup configGroup =
-      configGroupFactory.createNew(c1, 1L, 1L, "HDFS", "", "descr", Collections.singletonMap("hdfs-site", config2),
+      configGroupFactory.createNew(c1, serviceGroup.getServiceGroupId(), service.getServiceId(), "HDFS1", "", "descr", Collections.singletonMap("hdfs-site", config2),
         new HashMap<>());
 
     c1.addConfigGroup(configGroup);
 
-    scvResponse = c1.createServiceConfigVersion(1L, "admin", "test note", configGroup);
+    scvResponse = c1.createServiceConfigVersion(service.getServiceId(), "admin", "test note", configGroup);
     assertEquals("SCV 2 should be created", Long.valueOf(2), scvResponse.getVersion());
 
     //two scv active
@@ -1087,7 +1087,7 @@ public class ClusterTest {
 
     configGroup.setConfigurations(Collections.singletonMap("hdfs-site", config3));
 
-    scvResponse = c1.createServiceConfigVersion(1L, "admin", "test note", configGroup);
+    scvResponse = c1.createServiceConfigVersion(service.getServiceId(), "admin", "test note", configGroup);
     assertEquals("SCV 3 should be created", Long.valueOf(3), scvResponse.getVersion());
 
     //still two scv active, 3 total
@@ -1099,7 +1099,7 @@ public class ClusterTest {
 
     //rollback group
 
-    scvResponse = c1.setServiceConfigVersion(1L, 2L, "admin", "group rollback");
+    scvResponse = c1.setServiceConfigVersion(service.getServiceId(), 2L, "admin", "group rollback");
     assertEquals("SCV 4 should be created", Long.valueOf(4), scvResponse.getVersion());
 
     configGroup = c1.getConfigGroups().get(configGroup.getId()); //refresh?
@@ -1121,13 +1121,13 @@ public class ClusterTest {
         Collections.singletonMap("a", "b"), null);
 
     ConfigGroup configGroup2 =
-        configGroupFactory.createNew(c1, 1L, 1L, "HDFS", "HDFS", "descr",
+        configGroupFactory.createNew(c1, serviceGroup.getServiceGroupId(), service.getServiceId(), "HDFS2", "HDFS", "descr",
             new HashMap<>(Collections.singletonMap("hdfs-site", config4)),
             Collections.emptyMap());
 
     c1.addConfigGroup(configGroup2);
 
-    scvResponse = c1.createServiceConfigVersion(1L, "admin", "test note", configGroup2);
+    scvResponse = c1.createServiceConfigVersion(service.getServiceId(), "admin", "test note", configGroup2);
     assertEquals("SCV 5 should be created", Long.valueOf(5), scvResponse.getVersion());
 
     activeServiceConfigVersions = c1.getActiveServiceConfigVersions();
@@ -1140,7 +1140,7 @@ public class ClusterTest {
   public void testAllServiceConfigVersionsWithConfigGroups() throws Exception {
     // Given
     createDefaultCluster();
-    c1.addService(serviceGroup, "HDFS", "HDFS");
+    Service service = c1.addService(serviceGroup, "HDFS", "HDFS");
 
     Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", "version1",
         ImmutableMap.of("p1", "v1"), new HashMap<>());
@@ -1157,10 +1157,10 @@ public class ClusterTest {
     Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", "version2",
         ImmutableMap.of("p1", "v2"), new HashMap<>());
 
-    ConfigGroup configGroup = configGroupFactory.createNew(c1, 1L, 1L,"HDFS", "configGroup1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), new HashMap<>());
+    ConfigGroup configGroup = configGroupFactory.createNew(c1, serviceGroup.getServiceGroupId(), service.getServiceId(),"HDFS3", "configGroup1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), new HashMap<>());
 
     c1.addConfigGroup(configGroup);
-    ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion(1L, "admin", "test note", configGroup);
+    ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion(service.getServiceId(), "admin", "test note", configGroup);
     hdfsSiteConfigResponseV2.setConfigurations(Collections.singletonList(
       new ConfigurationResponse(c1.getClusterName(), hdfsSiteConfigV2.getStackId(),
         hdfsSiteConfigV2.getType(), hdfsSiteConfigV2.getTag(), hdfsSiteConfigV2.getVersion(),
@@ -1169,7 +1169,7 @@ public class ClusterTest {
     hdfsSiteConfigResponseV2.setIsCurrent(true); // this is the active config in 'configGroup1' config group as it's the solely service config
 
     // hdfs config v3
-    ServiceConfigVersionResponse hdfsSiteConfigResponseV3 = c1.createServiceConfigVersion(1L, "admin", "new config in default group", null);
+    ServiceConfigVersionResponse hdfsSiteConfigResponseV3 = c1.createServiceConfigVersion(service.getServiceId(), "admin", "new config in default group", null);
     hdfsSiteConfigResponseV3.setConfigurations(configResponsesDefaultGroup);
     hdfsSiteConfigResponseV3.setIsCurrent(true); // this is the active config in default config group as it's more recent than V1
 
@@ -1190,7 +1190,7 @@ public class ClusterTest {
   public void testAllServiceConfigVersionsWithDeletedConfigGroups() throws Exception {
     // Given
     createDefaultCluster();
-    c1.addService(serviceGroup, "HDFS", "HDFS");
+    Service service = c1.addService(serviceGroup, "HDFS", "HDFS");
 
     Config hdfsSiteConfigV1 = configFactory.createNew(c1, "hdfs-site", "version1",
         ImmutableMap.of("p1", "v1"), new HashMap<>());
@@ -1207,10 +1207,10 @@ public class ClusterTest {
     Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", "version2",
         ImmutableMap.of("p1", "v2"), new HashMap<>());
 
-    ConfigGroup configGroup = configGroupFactory.createNew(c1, 1L, 1L, "HDFS", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), new HashMap<>());
+    ConfigGroup configGroup = configGroupFactory.createNew(c1, serviceGroup.getServiceGroupId(), service.getServiceId(), "HDFS4", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), new HashMap<>());
 
     c1.addConfigGroup(configGroup);
-    ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion(1L, "admin", "test note", configGroup);
+    ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion(service.getServiceId(), "admin", "test note", configGroup);
     hdfsSiteConfigResponseV2.setConfigurations(Collections.singletonList(
       new ConfigurationResponse(c1.getClusterName(), hdfsSiteConfigV2.getStackId(),
         hdfsSiteConfigV2.getType(), hdfsSiteConfigV2.getTag(), hdfsSiteConfigV2.getVersion(),
@@ -1222,7 +1222,7 @@ public class ClusterTest {
 
 
     // hdfs config v3
-    ServiceConfigVersionResponse hdfsSiteConfigResponseV3 = c1.createServiceConfigVersion(1L, "admin", "new config in default group", null);
+    ServiceConfigVersionResponse hdfsSiteConfigResponseV3 = c1.createServiceConfigVersion(service.getServiceId(), "admin", "new config in default group", null);
     hdfsSiteConfigResponseV3.setConfigurations(configResponsesDefaultGroup);
     hdfsSiteConfigResponseV3.setIsCurrent(true); // this is the active config in default config group as it's more recent than V1
 
@@ -1274,7 +1274,7 @@ public class ClusterTest {
         }, new HashMap<>());
 
     Service service = cluster.addService(serviceGroup, "HDFS", "HDFS");
-    ConfigGroup configGroup = configGroupFactory.createNew(cluster, serviceGroup.getServiceGroupId(), service.getServiceId(), "HDFS", "t1", "",
+    ConfigGroup configGroup = configGroupFactory.createNew(cluster, serviceGroup.getServiceGroupId(), service.getServiceId(), "HDFS5", "t1", "",
         new HashMap<String, Config>() {
           {
             put("foo-site", originalConfig);
@@ -1338,7 +1338,7 @@ public class ClusterTest {
     // add a service
     String serviceName = "ZOOKEEPER";
     ServiceGroup serviceGroup = cluster.getServiceGroup("CORE");
-    cluster.addService(serviceGroup, serviceName, serviceName);
+    Service service = cluster.addService(serviceGroup, serviceName, serviceName);
     String configType = "zoo.cfg";
 
     ClusterConfigEntity clusterConfig1 = new ClusterConfigEntity();
@@ -1358,7 +1358,7 @@ public class ClusterTest {
     Config config = configFactory.createExisting(cluster, clusterConfig1);
     cluster.addConfig(config);
 
-    cluster.createServiceConfigVersion(1L, "", "version-1", null);
+    cluster.createServiceConfigVersion(service.getServiceId(), "", "version-1", null);
 
     ClusterConfigEntity clusterConfig2 = new ClusterConfigEntity();
     clusterConfig2.setClusterEntity(clusterEntity);
@@ -1384,7 +1384,7 @@ public class ClusterTest {
 
     serviceGroupEntity.setStack(newStack);
     serviceGroupEntity = serviceGroupDAO.merge(serviceGroupEntity);
-    cluster.createServiceConfigVersion(1L, "", "version-2", null);
+    cluster.createServiceConfigVersion(service.getServiceId(), "", "version-2", null);
 
     // check that the original config is enabled
     Collection<ClusterConfigEntity> clusterConfigs = clusterEntity.getClusterConfigEntities();
@@ -1397,7 +1397,7 @@ public class ClusterTest {
       }
     }
 
-    cluster.applyLatestConfigurations(newStackId, 1L);
+    cluster.applyLatestConfigurations(newStackId, service.getServiceId());
     clusterEntity = clusterDAO.findByName("c1");
 
     // now check that the new config is enabled
@@ -1438,7 +1438,7 @@ public class ClusterTest {
     // add a service
     String serviceName = "ZOOKEEPER";
     ServiceGroup serviceGroup = cluster.getServiceGroup("CORE");
-    cluster.addService(serviceGroup, serviceName, serviceName);
+    Service service = cluster.addService(serviceGroup, serviceName, serviceName);
     String configType = "zoo.cfg";
 
     // create 5 configurations in the current stack
@@ -1470,7 +1470,7 @@ public class ClusterTest {
     clusterEntity = clusterDAO.merge(clusterEntity);
 
     // create a service configuration for them
-    cluster.createServiceConfigVersion(1L, "", "version-1", null);
+    cluster.createServiceConfigVersion(service.getServiceId(), "", "version-1", null);
 
     // create a new configuration in the new stack and enable it
     ClusterConfigEntity clusterConfigNewStack = new ClusterConfigEntity();
@@ -1498,7 +1498,7 @@ public class ClusterTest {
     serviceGroupEntity.setStack(newStack);
     serviceGroupEntity = serviceGroupDAO.merge(serviceGroupEntity);
 
-    cluster.createServiceConfigVersion(1L, "", "version-2", null);
+    cluster.createServiceConfigVersion(service.getServiceId(), "", "version-2", null);
 
     // check that only the newest configuration is enabled
     ClusterConfigEntity clusterConfig = clusterDAO.findEnabledConfigByType(
@@ -1507,7 +1507,7 @@ public class ClusterTest {
     Assert.assertEquals(clusterConfigNewStack.getTag(), clusterConfig.getTag());
 
     // move back to the original stack
-    cluster.applyLatestConfigurations(stackId, 1L);
+    cluster.applyLatestConfigurations(stackId, service.getServiceId());
     clusterEntity = clusterDAO.findByName("c1");
 
     // now check that latest config from the original stack is enabled
@@ -1546,7 +1546,7 @@ public class ClusterTest {
     // add a service
     String serviceName = "ZOOKEEPER";
     ServiceGroup serviceGroup = cluster.getServiceGroup("CORE");
-    cluster.addService(serviceGroup, serviceName, serviceName);
+    Service service = cluster.addService(serviceGroup, serviceName, serviceName);
     String configType = "zoo.cfg";
 
     Map<String, String> properties = new HashMap<>();
@@ -1573,7 +1573,9 @@ public class ClusterTest {
 
     // make v2 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c2), "note-2");
-
+    // Whenever we add a desiredconfig, supposed it should be added to allconfigs too,
+    // not sure it is a bug in clusterImpl.java or it requires to call addconfig explicitly.
+    cluster.addConfig(c2);
     // check desired config
     Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
     DesiredConfig desiredConfig = desiredConfigs.get(configType);
@@ -1597,7 +1599,7 @@ public class ClusterTest {
     serviceGroupEntity = serviceGroupDAO.merge(serviceGroupEntity);
 
     // apply the configs for the old stack
-    cluster.applyLatestConfigurations(stackId, 1L);
+    cluster.applyLatestConfigurations(stackId, service.getServiceId());
 
     // {config-type={tag=version-1}}
     effectiveDesiredTags = configHelper.getEffectiveDesiredTags(cluster, hostName);
@@ -1634,7 +1636,7 @@ public class ClusterTest {
     // add a service
     String serviceName = "ZOOKEEPER";
     ServiceGroup serviceGroup = cluster.getServiceGroup("CORE");
-    cluster.addService(serviceGroup, serviceName, serviceName);
+    Service service = cluster.addService(serviceGroup, serviceName, serviceName);
     String configType = "zoo.cfg";
 
     ClusterConfigEntity clusterConfig = new ClusterConfigEntity();
@@ -1655,7 +1657,7 @@ public class ClusterTest {
     cluster.addConfig(config);
 
     // create the service version association
-    cluster.createServiceConfigVersion(1L, "", "version-1", null);
+    cluster.createServiceConfigVersion(service.getServiceId(), "", "version-1", null);
 
     // now un-select it and create a new config
     clusterConfig.setSelected(false);
@@ -1685,8 +1687,9 @@ public class ClusterTest {
 
     serviceGroupEntity.setStack(newStack);
     serviceGroupEntity = serviceGroupDAO.merge(serviceGroupEntity);
+    serviceGroup.setStack(newStack);
 
-    cluster.createServiceConfigVersion(1L, "", "version-2", null);
+    cluster.createServiceConfigVersion(service.getServiceId(), "", "version-2", null);
 
     cluster.applyLatestConfigurations(newStackId, 1L);
 
@@ -1697,7 +1700,7 @@ public class ClusterTest {
     Assert.assertEquals(1, clusterConfigs.size());
 
     // remove the configs
-    cluster.removeConfigurations(newStackId, 1L);
+    cluster.removeConfigurations(newStackId, service.getServiceId());
 
     clusterConfigs = clusterDAO.getAllConfigurations(cluster.getClusterId(), newStackId);
     Assert.assertEquals(0, clusterConfigs.size());
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
index 715350b..b137953 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
@@ -445,7 +445,7 @@ public class ClustersTest {
     serviceCheckNodeHost.setState(State.UNKNOWN);
 
     Assert.assertNotNull(injector.getInstance(HostComponentStateDAO.class).findByIndex(
-      nameNodeHost.getClusterId(), 1L, 1L,
+      nameNodeHost.getClusterId(), serviceGroup.getServiceGroupId(), hdfs.getServiceId(),
       nameNodeHost.getServiceComponentId(),  nameNodeHostEntity.getHostId()));
 
     Assert.assertNotNull(injector.getInstance(HostComponentDesiredStateDAO.class).findByIndex(nameNodeHost.getServiceComponentId()));
@@ -476,7 +476,7 @@ public class ClustersTest {
 
     Assert.assertEquals(2, hostDAO.findAll().size());
     Assert.assertNull(injector.getInstance(HostComponentStateDAO.class).findByIndex(
-      nameNodeHost.getClusterId(), 1L, 1L,
+      nameNodeHost.getClusterId(), serviceGroup.getServiceGroupId(), hdfs.getServiceId(),
       nameNodeHost.getServiceComponentId(), nameNodeHostEntity.getHostId()));
 
     Assert.assertNull(injector.getInstance(HostComponentDesiredStateDAO.class).findByIndex(nameNodeHost.getServiceComponentId()));
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
index ea31969..db06f69 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
@@ -102,6 +102,7 @@ public class ConcurrentServiceConfigVersionTest {
    */
   private Cluster cluster;
   private ServiceGroup serviceGroup;
+  private Service service;
 
   /**
    * Creates a cluster and installs HDFS with NN and DN.
@@ -125,7 +126,7 @@ public class ConcurrentServiceConfigVersionTest {
     setOsFamily(clusters.getHost(hostName), "redhat", "6.4");
     clusters.mapHostToCluster(hostName, "c1");
 
-    Service service = installService("HDFS");
+    service = installService("HDFS");
     addServiceComponent(service, "NAMENODE");
     addServiceComponent(service, "DATANODE");
 
@@ -147,13 +148,13 @@ public class ConcurrentServiceConfigVersionTest {
   @Test
   public void testConcurrentServiceConfigVersions() throws Exception {
     long nextVersion = serviceConfigDAO.findNextServiceConfigVersion(
-        cluster.getClusterId(), 1L);
+        cluster.getClusterId(), service.getServiceId());
 
     Assert.assertEquals(nextVersion, 1);
 
     List<Thread> threads = new ArrayList<>();
     for (int i = 0; i < NUMBER_OF_THREADS; i++) {
-      Thread thread = new ConcurrentServiceConfigThread(cluster);
+      Thread thread = new ConcurrentServiceConfigThread(cluster, service);
       threads.add(thread);
 
       thread.start();
@@ -165,7 +166,7 @@ public class ConcurrentServiceConfigVersionTest {
 
     long maxVersion = NUMBER_OF_THREADS * NUMBER_OF_SERVICE_CONFIG_VERSIONS;
     nextVersion = serviceConfigDAO.findNextServiceConfigVersion(
-        cluster.getClusterId(), 1L);
+        cluster.getClusterId(), service.getServiceId());
 
     Assert.assertEquals(maxVersion + 1, nextVersion);
   }
@@ -173,9 +174,11 @@ public class ConcurrentServiceConfigVersionTest {
   private final static class ConcurrentServiceConfigThread extends Thread {
 
     private Cluster cluster = null;
+    private Service service = null;
 
-    private ConcurrentServiceConfigThread(Cluster cluster) {
+    private ConcurrentServiceConfigThread(Cluster cluster, Service service) {
       this.cluster = cluster;
+      this.service = service;
     }
 
     /**
@@ -186,7 +189,7 @@ public class ConcurrentServiceConfigVersionTest {
       try {
         for (int i = 0; i < NUMBER_OF_SERVICE_CONFIG_VERSIONS; i++) {
           ServiceConfigVersionResponse response = cluster.createServiceConfigVersion(
-              1L, null, getName() + "-serviceConfig" + i, null);
+              service.getServiceId(), null, getName() + "-serviceConfig" + i, null);
 
           Thread.sleep(100);
         }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java
index 5b4dc49..aa32edd 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java
@@ -1011,6 +1011,9 @@ public class UpgradeCatalog260Test {
     expect(injector2.getInstance(ClusterMetadataGenerator.class)).andReturn(metadataGenerator).anyTimes();
     expect(injector2.getInstance(MetadataHolder.class)).andReturn(metadataHolder).anyTimes();
     expect(injector2.getInstance(AgentConfigsHolder.class)).andReturn(agentConfigsHolder).anyTimes();
+    ConfigHelper configHelper = getInjector().getInstance(ConfigHelper.class);
+    configHelper.updateAgentConfigs(anyObject(Set.class));
+    expect(injector2.getInstance(ConfigHelper.class)).andReturn(configHelper).anyTimes();
     replay(controller, injector2, config, cluster);
 
     // This tests the update of HSI config 'hive.llap.daemon.keytab.file'.