You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2017/07/13 19:14:29 UTC
[11/37] ambari git commit: AMBARI-21450. Initial cherry-picking for
feature branch (ncole)
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java
index 5ea0b1e..7896c34 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostComponentStateDAOTest.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -60,4 +60,4 @@ public class HostComponentStateDAOTest {
verify(entityManagerProvider, entityManager, hostDAO, hostEntity, hostComponentStateEntity);
}
-}
\ No newline at end of file
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostVersionDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostVersionDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostVersionDAOTest.java
index 6219a69..bb077d6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostVersionDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/HostVersionDAOTest.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -30,7 +30,6 @@ import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
import org.apache.ambari.server.orm.entities.HostEntity;
import org.apache.ambari.server.orm.entities.HostVersionEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
@@ -58,7 +57,6 @@ public class HostVersionDAOTest {
private ResourceTypeDAO resourceTypeDAO;
private ClusterDAO clusterDAO;
private StackDAO stackDAO;
- private ClusterVersionDAO clusterVersionDAO;
private HostDAO hostDAO;
private HostVersionDAO hostVersionDAO;
private OrmTestHelper helper;
@@ -79,7 +77,6 @@ public class HostVersionDAOTest {
resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
clusterDAO = injector.getInstance(ClusterDAO.class);
stackDAO = injector.getInstance(StackDAO.class);
- clusterVersionDAO = injector.getInstance(ClusterVersionDAO.class);
hostDAO = injector.getInstance(HostDAO.class);
hostVersionDAO = injector.getInstance(HostVersionDAO.class);
helper = injector.getInstance(OrmTestHelper.class);
@@ -116,17 +113,6 @@ public class HostVersionDAOTest {
RepositoryVersionEntity repoVersionEntity = helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2200);
- // Create the Cluster Version and link it to the cluster
- ClusterVersionEntity clusterVersionEntity = new ClusterVersionEntity(
- clusterEntity, repoVersionEntity, RepositoryVersionState.CURRENT,
- System.currentTimeMillis(), System.currentTimeMillis(), "admin");
- List<ClusterVersionEntity> clusterVersionEntities = new ArrayList<ClusterVersionEntity>();
- clusterVersionEntities.add(clusterVersionEntity);
- clusterEntity.setClusterVersionEntities(clusterVersionEntities);
-
- clusterVersionDAO.create(clusterVersionEntity);
- clusterDAO.merge(clusterEntity);
-
// Create the hosts
HostEntity host1 = new HostEntity();
HostEntity host2 = new HostEntity();
@@ -139,7 +125,7 @@ public class HostVersionDAOTest {
host2.setIpv4("192.168.0.2");
host3.setIpv4("192.168.0.3");
- List<HostEntity> hostEntities = new ArrayList<HostEntity>();
+ List<HostEntity> hostEntities = new ArrayList<>();
hostEntities.add(host1);
hostEntities.add(host2);
hostEntities.add(host3);
@@ -157,9 +143,9 @@ public class HostVersionDAOTest {
clusterDAO.merge(clusterEntity);
// Create the Host Versions
- HostVersionEntity hostVersionEntity1 = new HostVersionEntity(host1, clusterVersionEntity.getRepositoryVersion(), RepositoryVersionState.CURRENT);
- HostVersionEntity hostVersionEntity2 = new HostVersionEntity(host2, clusterVersionEntity.getRepositoryVersion(), RepositoryVersionState.INSTALLED);
- HostVersionEntity hostVersionEntity3 = new HostVersionEntity(host3, clusterVersionEntity.getRepositoryVersion(), RepositoryVersionState.INSTALLED);
+ HostVersionEntity hostVersionEntity1 = new HostVersionEntity(host1, repoVersionEntity, RepositoryVersionState.CURRENT);
+ HostVersionEntity hostVersionEntity2 = new HostVersionEntity(host2, repoVersionEntity, RepositoryVersionState.INSTALLED);
+ HostVersionEntity hostVersionEntity3 = new HostVersionEntity(host3, repoVersionEntity, RepositoryVersionState.INSTALLED);
hostVersionDAO.create(hostVersionEntity1);
hostVersionDAO.create(hostVersionEntity2);
@@ -172,20 +158,8 @@ public class HostVersionDAOTest {
private void addMoreVersions() {
ClusterEntity clusterEntity = clusterDAO.findByName("test_cluster1");
- // Create another Cluster Version and mark the old one as INSTALLED
- if (clusterEntity.getClusterVersionEntities() != null && clusterEntity.getClusterVersionEntities().size() > 0) {
- ClusterVersionEntity installedClusterVersion = clusterVersionDAO.findByClusterAndStateCurrent(clusterEntity.getClusterName());
- installedClusterVersion.setState(RepositoryVersionState.INSTALLED);
- clusterVersionDAO.merge(installedClusterVersion);
- } else {
- Assert.fail("Cluster is expected to have at least one cluster version");
- }
-
RepositoryVersionEntity repositoryVersionEnt_2_2_0_1 = helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2201);
- ClusterVersionEntity newClusterVersionEntity = new ClusterVersionEntity(clusterEntity, repositoryVersionEnt_2_2_0_1, RepositoryVersionState.CURRENT, System.currentTimeMillis(), System.currentTimeMillis(), "admin");
- clusterEntity.addClusterVersionEntity(newClusterVersionEntity);
- clusterVersionDAO.create(newClusterVersionEntity);
HostEntity[] hostEntities = clusterEntity.getHostEntities().toArray(new HostEntity[clusterEntity.getHostEntities().size()]);
// Must sort by host name in ascending order to ensure that state is accurately set later on.
@@ -193,7 +167,7 @@ public class HostVersionDAOTest {
// For each of the hosts, add a host version
for (HostEntity host : hostEntities) {
- HostVersionEntity hostVersionEntity = new HostVersionEntity(host, helper.getOrCreateRepositoryVersion(HDP_22_STACK, repoVersion_2201), RepositoryVersionState.INSTALLED);
+ HostVersionEntity hostVersionEntity = new HostVersionEntity(host, repositoryVersionEnt_2_2_0_1, RepositoryVersionState.INSTALLED);
hostVersionDAO.create(hostVersionEntity);
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
index 9760214..406349a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -29,7 +29,6 @@ import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
-import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
import org.apache.ambari.server.orm.entities.ClusterEntity;
import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity;
import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
@@ -41,7 +40,6 @@ import org.apache.ambari.server.security.authorization.ResourceType;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.cluster.ClusterImpl;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -271,23 +269,43 @@ public class ServiceConfigDAOTest {
@Test
public void testGetLastServiceConfigsForService() throws Exception {
String serviceName = "HDFS";
+ Clusters clusters = injector.getInstance(Clusters.class);
+ clusters.addCluster("c1", HDP_01);
+ ConfigGroupEntity configGroupEntity1 = new ConfigGroupEntity();
+ ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+ configGroupEntity1.setClusterEntity(clusterEntity);
+ configGroupEntity1.setClusterId(clusterEntity.getClusterId());
+ configGroupEntity1.setGroupName("group1");
+ configGroupEntity1.setDescription("group1_desc");
+ configGroupEntity1.setTag("HDFS");
+ configGroupEntity1.setServiceName("HDFS");
+ configGroupDAO.create(configGroupEntity1);
+ ConfigGroupEntity group1 = configGroupDAO.findByName("group1");
+ ConfigGroupEntity configGroupEntity2 = new ConfigGroupEntity();
+ configGroupEntity2.setClusterEntity(clusterEntity);
+ configGroupEntity2.setClusterId(clusterEntity.getClusterId());
+ configGroupEntity2.setGroupName("group2");
+ configGroupEntity2.setDescription("group2_desc");
+ configGroupEntity2.setTag("HDFS");
+ configGroupEntity2.setServiceName("HDFS");
+ configGroupDAO.create(configGroupEntity2);
+ ConfigGroupEntity group2 = configGroupDAO.findByName("group2");
createServiceConfig(serviceName, "admin", 1L, 1L, 1111L, null);
createServiceConfig(serviceName, "admin", 2L, 2L, 1010L, null);
- createServiceConfigWithGroup(serviceName, "admin", 3L, 3L, 2222L, null, 1L);
- createServiceConfigWithGroup(serviceName, "admin", 5L, 5L, 3333L, null, 2L);
- createServiceConfigWithGroup(serviceName, "admin", 4L, 4L, 3330L, null, 2L);
-
- List<ServiceConfigEntity> serviceConfigEntities =
- serviceConfigDAO.getLastServiceConfigsForService(clusterDAO.findByName("c1").getClusterId(), serviceName);
+ createServiceConfigWithGroup(serviceName, "admin", 3L, 3L, 2222L, null, group1.getGroupId());
+ createServiceConfigWithGroup(serviceName, "admin", 5L, 5L, 3333L, null, group2.getGroupId());
+ createServiceConfigWithGroup(serviceName, "admin", 4L, 4L, 3330L, null, group2.getGroupId());
+ List<ServiceConfigEntity> serviceConfigEntities = serviceConfigDAO
+ .getLastServiceConfigsForService(clusterDAO.findByName("c1").getClusterId(), serviceName);
Assert.assertNotNull(serviceConfigEntities);
Assert.assertEquals(3, serviceConfigEntities.size());
- for (ServiceConfigEntity sce: serviceConfigEntities) {
- if (sce.getGroupId() != null && sce.getGroupId().equals(2L)) {
- // Group ID with the highest version should be selected
- Assert.assertEquals(sce.getVersion(), Long.valueOf(5L));
- }
+ for (ServiceConfigEntity sce : serviceConfigEntities) {
+ if (sce.getGroupId() != null && sce.getGroupId().equals(group2.getGroupId())) {
+ // Group ID with the highest version should be selected
+ Assert.assertEquals(sce.getVersion(), Long.valueOf(5L));
+ }
}
}
@@ -370,10 +388,15 @@ public class ServiceConfigDAOTest {
long clusterId = serviceConfigEntity.getClusterId();
- List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getAllServiceConfigsForClusterAndStack(clusterId, HDP_01);
- Assert.assertEquals(4, serviceConfigs.size());
+ List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getServiceConfigsForServiceAndStack(
+ clusterId, HDP_01, "HDFS");
+
+ Assert.assertEquals(3, serviceConfigs.size());
- serviceConfigs = serviceConfigDAO.getAllServiceConfigsForClusterAndStack(clusterId, HDP_02);
+ serviceConfigs = serviceConfigDAO.getServiceConfigsForServiceAndStack(clusterId, HDP_01, "YARN");
+ Assert.assertEquals(1, serviceConfigs.size());
+
+ serviceConfigs = serviceConfigDAO.getServiceConfigsForServiceAndStack(clusterId, HDP_02, "HDFS");
Assert.assertEquals(0, serviceConfigs.size());
}
@@ -392,6 +415,16 @@ public class ServiceConfigDAOTest {
clusterEntity.setDesiredStack(stackEntity);
clusterDAO.merge(clusterEntity);
+ ConfigGroupEntity configGroupEntity1 = new ConfigGroupEntity();
+ configGroupEntity1.setClusterEntity(clusterEntity);
+ configGroupEntity1.setClusterId(clusterEntity.getClusterId());
+ configGroupEntity1.setGroupName("group1");
+ configGroupEntity1.setDescription("group1_desc");
+ configGroupEntity1.setTag("HDFS");
+ configGroupEntity1.setServiceName("HDFS");
+ configGroupDAO.create(configGroupEntity1);
+ ConfigGroupEntity group1 = configGroupDAO.findByName("group1");
+ createServiceConfigWithGroup("HDFS", "admin", 3L, 8L, 2222L, null, group1.getGroupId());
// create some for HDP 0.2
serviceConfigEntity = createServiceConfig("HDFS", "admin", 4L, 5L, 50L, null);
serviceConfigEntity = createServiceConfig("HDFS", "admin", 5L, 6L, 60L, null);
@@ -400,7 +433,8 @@ public class ServiceConfigDAOTest {
long clusterId = serviceConfigEntity.getClusterId();
List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getLatestServiceConfigs(clusterId, HDP_01);
- Assert.assertEquals(2, serviceConfigs.size());
+ Assert.assertEquals(3, serviceConfigs.size());
+ configGroupDAO.remove(configGroupEntity1);
serviceConfigs = serviceConfigDAO.getLatestServiceConfigs(clusterId, HDP_02);
Assert.assertEquals(2, serviceConfigs.size());
@@ -412,85 +446,99 @@ public class ServiceConfigDAOTest {
ClusterEntity clusterEntity = clusterDAO.findByName("c1");
Assert.assertTrue(!clusterEntity.getClusterConfigEntities().isEmpty());
- Assert.assertTrue(!clusterEntity.getConfigMappingEntities().isEmpty());
Assert.assertEquals(5, clusterEntity.getClusterConfigEntities().size());
- Assert.assertEquals(3, clusterEntity.getConfigMappingEntities().size());
}
+ /**
+ * Tests the ability to find the latest configuration by stack, regardless of
+ * whether that configuration is enabled.
+ *
+ * @throws Exception
+ */
@Test
- public void testGetClusterConfigMappingByStack() throws Exception{
+ public void testGetLatestClusterConfigsByStack() throws Exception {
initClusterEntities();
ClusterEntity clusterEntity = clusterDAO.findByName("c1");
- List<ClusterConfigMappingEntity> clusterConfigMappingEntities = clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), HDP_01);
- Assert.assertEquals(2, clusterConfigMappingEntities .size());
+ // there should be 3 configs in HDP-0.1 for this cluster, none selected
+ List<ClusterConfigEntity> clusterConfigEntities = clusterDAO.getLatestConfigurations(clusterEntity.getClusterId(), HDP_01);
+ Assert.assertEquals(1, clusterConfigEntities.size());
+
+ ClusterConfigEntity entity = clusterConfigEntities.get(0);
+ Assert.assertEquals("version3", entity.getTag());
+ Assert.assertEquals("oozie-site", entity.getType());
+ Assert.assertFalse(entity.isSelected());
- ClusterConfigMappingEntity e1 = clusterConfigMappingEntities.get(0);
- String tag1 = e1.getTag();
- Assert.assertEquals("version1", tag1);
- String type1 = e1.getType();
- Assert.assertEquals("oozie-site", type1);
+ // there should be 2 configs in HDP-0.2 for this cluster, the latest being
+ // selected
+ clusterConfigEntities = clusterDAO.getLatestConfigurations(clusterEntity.getClusterId(), HDP_02);
+ Assert.assertEquals(1, clusterConfigEntities.size());
- ClusterConfigMappingEntity e2 = clusterConfigMappingEntities.get(1);
- String tag2 = e2.getTag();
- Assert.assertEquals("version2", tag2);
- String type2 = e2.getType();
- Assert.assertEquals("oozie-site", type2);
+ entity = clusterConfigEntities.get(0);
+ Assert.assertEquals("version5", entity.getTag());
+ Assert.assertEquals("oozie-site", entity.getType());
+ Assert.assertTrue(entity.isSelected());
}
/**
- * Test the get latest configuration query against clusterconfig table with configuration groups inserted
- * */
+ * Tests getting latest and enabled configurations when there is a
+ * configuration group. Configurations for configuration groups are not
+ * "selected" as they are merged in with the selected configuration. This can
+ * cause problems if searching simply for the "latest" since it will pickup
+ * the wrong configuration.
+ *
+ */
@Test
- public void testGetClusterConfigMappingByStackCG() throws Exception{
+ public void testGetClusterConfigsByStackCG() throws Exception {
initClusterEntitiesWithConfigGroups();
ClusterEntity clusterEntity = clusterDAO.findByName("c1");
- Long clusterId = clusterEntity.getClusterId();
List<ConfigGroupEntity> configGroupEntities = configGroupDAO.findAllByTag("OOZIE");
+ Long clusterId = clusterDAO.findByName("c1").getClusterId();
+
Assert.assertNotNull(configGroupEntities);
ConfigGroupEntity configGroupEntity = configGroupEntities.get(0);
Assert.assertNotNull(configGroupEntity);
Assert.assertEquals("c1", configGroupEntity.getClusterEntity().getClusterName());
- Assert.assertEquals(clusterId, configGroupEntity.getClusterEntity()
- .getClusterId());
+ Assert.assertEquals(clusterId, configGroupEntity.getClusterEntity().getClusterId());
Assert.assertEquals("oozie_server", configGroupEntity.getGroupName());
Assert.assertEquals("OOZIE", configGroupEntity.getTag());
Assert.assertEquals("oozie server", configGroupEntity.getDescription());
- List<ClusterConfigMappingEntity> clusterConfigMappingEntities = clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), HDP_01);
- Assert.assertEquals(2, clusterConfigMappingEntities .size());
+ // all 3 are HDP-0.1, but only the 2nd one is enabled
+ List<ClusterConfigEntity> clusterConfigEntities = clusterDAO.getEnabledConfigsByStack(
+ clusterEntity.getClusterId(), HDP_01);
+
+ Assert.assertEquals(1, clusterConfigEntities.size());
+
+ ClusterConfigEntity configEntity = clusterConfigEntities.get(0);
+ Assert.assertEquals("version2", configEntity.getTag());
+ Assert.assertEquals("oozie-site", configEntity.getType());
+ Assert.assertTrue(configEntity.isSelected());
- ClusterConfigMappingEntity e1 = clusterConfigMappingEntities.get(0);
- String tag1 = e1.getTag();
- Assert.assertEquals("version1", tag1);
- String type1 = e1.getType();
- Assert.assertEquals("oozie-site", type1);
+ // this should still return the 2nd one since the 3rd one has never been
+ // selected as its only for configuration groups
+ clusterConfigEntities = clusterDAO.getLatestConfigurations(clusterEntity.getClusterId(),
+ HDP_01);
- ClusterConfigMappingEntity e2 = clusterConfigMappingEntities.get(1);
- String tag2 = e2.getTag();
- Assert.assertEquals("version2", tag2);
- String type2 = e2.getType();
- Assert.assertEquals("oozie-site", type2);
+ configEntity = clusterConfigEntities.get(0);
+ Assert.assertEquals("version2", configEntity.getTag());
+ Assert.assertEquals("oozie-site", configEntity.getType());
+ Assert.assertTrue(configEntity.isSelected());
}
+
/**
- * Test
- *
- * When the last configuration of a given configuration type to be stored into the clusterconfig table is
- * for a configuration group, there is no corresponding entry generated in the clusterconfigmapping.
- *
- * Therefore, the getlatestconfiguration query should skip configuration groups stored in the clusterconfig table.
+ * Tests that when there are multiple configurations for a stack, only the
+ * selected ones get returned.
*
- * Test to determine the latest configuration of a given type whose version_tag
- * exists in the clusterconfigmapping table.
- *
- * */
+ * @throws Exception
+ */
@Test
- public void testGetLatestClusterConfigMappingByStack() throws Exception {
+ public void testGetEnabledClusterConfigByStack() throws Exception {
Clusters clusters = injector.getInstance(Clusters.class);
clusters.addCluster("c1", HDP_01);
@@ -498,30 +546,24 @@ public class ServiceConfigDAOTest {
initClusterEntities();
- Collection<ClusterConfigMappingEntity> latestMapingEntities = ((ClusterImpl) cluster).getLatestConfigMappingsForStack(
- cluster.getClusterId(), HDP_01);
+ Collection<ClusterConfigEntity> latestConfigs = clusterDAO.getEnabledConfigsByStack(
+ cluster.getClusterId(), HDP_02);
- Assert.assertEquals(1, latestMapingEntities.size());
- for(ClusterConfigMappingEntity e: latestMapingEntities){
- Assert.assertEquals("version2", e.getTag());
+ Assert.assertEquals(1, latestConfigs.size());
+ for (ClusterConfigEntity e : latestConfigs) {
+ Assert.assertEquals("version5", e.getTag());
Assert.assertEquals("oozie-site", e.getType());
}
}
/**
- * Test
- *
- * When the last configuration of a given configuration type to be stored into the clusterconfig table is
- * for a configuration group, there is no corresponding entry generated in the clusterconfigmapping.
- *
- * Therefore, the getlatestconfiguration query should skip configuration groups stored in the clusterconfig table.
- *
- * Test to determine the latest configuration of a given type whose version_tag
- * exists in the clusterconfigmapping table.
- *
- * */
+ * When the last configuration of a given configuration type to be stored into
+ * the clusterconfig table is for a configuration group, that configuration is
+ * not enabled. Therefore, it should be skipped when getting the enabled
+ * configurations for a stack.
+ */
@Test
- public void testGetLatestClusterConfigMappingByStackCG() throws Exception{
+ public void testGetLatestClusterConfigByStackCG() throws Exception {
Clusters clusters = injector.getInstance(Clusters.class);
clusters.addCluster("c1", HDP_01);
@@ -529,16 +571,50 @@ public class ServiceConfigDAOTest {
initClusterEntitiesWithConfigGroups();
- Collection<ClusterConfigMappingEntity> latestMapingEntities = ((ClusterImpl) cluster).getLatestConfigMappingsForStack(
+ Collection<ClusterConfigEntity> latestConfigs = clusterDAO.getEnabledConfigsByStack(
cluster.getClusterId(), HDP_01);
- Assert.assertEquals(1, latestMapingEntities.size());
- for(ClusterConfigMappingEntity e: latestMapingEntities){
+ Assert.assertEquals(1, latestConfigs.size());
+ for (ClusterConfigEntity e : latestConfigs) {
Assert.assertEquals("version2", e.getTag());
Assert.assertEquals("oozie-site", e.getType());
}
}
+ @Test
+ public void testGetLastServiceConfigsForServiceWhenAConfigGroupIsDeleted() throws Exception {
+ Clusters clusters = injector.getInstance(Clusters.class);
+ clusters.addCluster("c1", HDP_01);
+ initClusterEntitiesWithConfigGroups();
+ ConfigGroupEntity configGroupEntity1 = new ConfigGroupEntity();
+ ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+ Long clusterId = clusterEntity.getClusterId();
+ configGroupEntity1.setClusterEntity(clusterEntity);
+ configGroupEntity1.setClusterId(clusterEntity.getClusterId());
+ configGroupEntity1.setGroupName("toTestDeleteGroup_OOZIE");
+ configGroupEntity1.setDescription("toTestDeleteGroup_OOZIE_DESC");
+ configGroupEntity1.setTag("OOZIE");
+ configGroupEntity1.setServiceName("OOZIE");
+ configGroupDAO.create(configGroupEntity1);
+ ConfigGroupEntity testDeleteGroup_OOZIE = configGroupDAO.findByName("toTestDeleteGroup_OOZIE");
+ createServiceConfigWithGroup("OOZIE", "", 2L, 2L, System.currentTimeMillis(), null,
+ testDeleteGroup_OOZIE.getGroupId());
+ Collection<ServiceConfigEntity> serviceConfigEntityList = serviceConfigDAO.getLastServiceConfigsForService(clusterId,
+ "OOZIE");
+ Assert.assertEquals(2, serviceConfigEntityList.size());
+ configGroupDAO.remove(configGroupEntity1);
+ serviceConfigEntityList = serviceConfigDAO.getLastServiceConfigsForService(clusterId, "OOZIE");
+ Assert.assertEquals(1, serviceConfigEntityList.size());
+ }
+
+ /**
+ * Createa a cluster with 5 configurations for Oozie. Each configuration will
+ * have a tag of "version" plus a count. 3 configs will be for
+ * {@link #HDP_01}, and 2 will be for {@link #HDP_02}. Only the most recent
+ * configuration, {@code version5}, will be enabled.
+ *
+ * @throws Exception
+ */
private void initClusterEntities() throws Exception{
String userName = "admin";
@@ -558,7 +634,9 @@ public class ServiceConfigDAOTest {
String oozieSite = "oozie-site";
- for (int i = 1; i < 6; i++){
+ // create 5 Oozie Configs, with only the latest from HDP-0.2 being enabled
+ int configsToCreate = 5;
+ for (int i = 1; i <= configsToCreate; i++) {
Thread.sleep(1);
ClusterConfigEntity entity = new ClusterConfigEntity();
entity.setClusterEntity(clusterEntity);
@@ -567,58 +645,35 @@ public class ServiceConfigDAOTest {
entity.setVersion(Long.valueOf(i));
entity.setTag("version"+i);
entity.setTimestamp(new Date().getTime());
- if(i < 4) {
- entity.setStack(stackEntityHDP01);
- } else {
+
+ // set selected to true to get the last selected timestamp populated
+ entity.setSelected(true);
+
+ // now set it to false
+ entity.setSelected(false);
+
+ entity.setStack(stackEntityHDP01);
+ if (i >= 4) {
entity.setStack(stackEntityHDP02);
+ if (i == configsToCreate) {
+ entity.setSelected(true);
+ }
}
+
entity.setData("");
clusterDAO.createConfig(entity);
clusterEntity.getClusterConfigEntities().add(entity);
clusterDAO.merge(clusterEntity);
}
-
- Collection<ClusterConfigMappingEntity> entities = clusterEntity.getConfigMappingEntities();
- if(null == entities){
- entities = new ArrayList<ClusterConfigMappingEntity>();
- clusterEntity.setConfigMappingEntities(entities);
- }
-
- ClusterConfigMappingEntity e1 = new ClusterConfigMappingEntity();
- e1.setClusterEntity(clusterEntity);
- e1.setClusterId(clusterEntity.getClusterId());
- e1.setCreateTimestamp(System.currentTimeMillis());
- e1.setSelected(0);
- e1.setUser(userName);
- e1.setType(oozieSite);
- e1.setTag("version1");
- entities.add(e1);
- clusterDAO.merge(clusterEntity);
-
- ClusterConfigMappingEntity e2 = new ClusterConfigMappingEntity();
- e2.setClusterEntity(clusterEntity);
- e2.setClusterId(clusterEntity.getClusterId());
- e2.setCreateTimestamp(System.currentTimeMillis());
- e2.setSelected(0);
- e2.setUser(userName);
- e2.setType(oozieSite);
- e2.setTag("version2");
- entities.add(e2);
- clusterDAO.merge(clusterEntity);
-
- Thread.sleep(1);
- ClusterConfigMappingEntity e3 = new ClusterConfigMappingEntity();
- e3.setClusterEntity(clusterEntity);
- e3.setClusterId(clusterEntity.getClusterId());
- e3.setCreateTimestamp(System.currentTimeMillis());
- e3.setSelected(1);
- e3.setUser(userName);
- e3.setType(oozieSite);
- e3.setTag("version4");
- entities.add(e3);
- clusterDAO.merge(clusterEntity);
}
+ /**
+ * Createa a cluster with 3 configurations for Oozie in the {@link #HDP_01}
+ * stack. Only {@code version2}, will be enabled. {@code version3} will be for
+ * a new configuration group.
+ *
+ * @throws Exception
+ */
private void initClusterEntitiesWithConfigGroups() throws Exception{
String userName = "admin";
@@ -636,8 +691,9 @@ public class ServiceConfigDAOTest {
StackEntity stackEntityHDP01 = stackDAO.find(HDP_01.getStackName(),HDP_01.getStackVersion());
String oozieSite = "oozie-site";
- int count = 3;
- for (int i = 1; i < count; i++){
+ // create 2 configurations for HDP-0.1
+ int count = 2;
+ for (int i = 1; i <= count; i++) {
Thread.sleep(1);
ClusterConfigEntity entity = new ClusterConfigEntity();
entity.setClusterEntity(clusterEntity);
@@ -648,43 +704,17 @@ public class ServiceConfigDAOTest {
entity.setTimestamp(new Date().getTime());
entity.setStack(stackEntityHDP01);
entity.setData("");
+ entity.setSelected(false);
+
+ if (i == count) {
+ entity.setSelected(true);
+ }
+
clusterDAO.createConfig(entity);
clusterEntity.getClusterConfigEntities().add(entity);
clusterDAO.merge(clusterEntity);
}
- Collection<ClusterConfigMappingEntity> entities = clusterEntity.getConfigMappingEntities();
- if(null == entities){
- entities = new ArrayList<ClusterConfigMappingEntity>();
- clusterEntity.setConfigMappingEntities(entities);
- }
-
- Thread.sleep(1);
- ClusterConfigMappingEntity e1 = new ClusterConfigMappingEntity();
- e1.setClusterEntity(clusterEntity);
- e1.setClusterId(clusterEntity.getClusterId());
- e1.setCreateTimestamp(System.currentTimeMillis());
- e1.setSelected(0);
- e1.setUser(userName);
- e1.setType(oozieSite);
- e1.setTag("version1");
- entities.add(e1);
- clusterDAO.merge(clusterEntity);
-
- Thread.sleep(1);
- ClusterConfigMappingEntity e2 = new ClusterConfigMappingEntity();
- e2.setClusterEntity(clusterEntity);
- e2.setClusterId(clusterEntity.getClusterId());
- e2.setCreateTimestamp(System.currentTimeMillis());
- e2.setSelected(1);
- e2.setUser(userName);
- e2.setType(oozieSite);
- e2.setTag("version2");
- entities.add(e2);
- clusterDAO.merge(clusterEntity);
-
- ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
-
ResourceTypeEntity resourceTypeEntity = resourceTypeDAO.findById(ResourceType.CLUSTER.getId());
if (resourceTypeEntity == null) {
resourceTypeEntity = new ResourceTypeEntity();
@@ -696,51 +726,48 @@ public class ServiceConfigDAOTest {
ResourceEntity resourceEntity = new ResourceEntity();
resourceEntity.setResourceType(resourceTypeEntity);
+ // create a configuration group for oozie
+ ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
configGroupEntity.setClusterEntity(clusterEntity);
configGroupEntity.setClusterId(clusterEntity.getClusterId());
configGroupEntity.setGroupName("oozie_server");
configGroupEntity.setDescription("oozie server");
configGroupEntity.setTag("OOZIE");
+ configGroupDAO.create(configGroupEntity);
- ClusterConfigEntity configEntity = new ClusterConfigEntity();
- configEntity.setType("oozie-site");
- configEntity.setTag("version3");
- configEntity.setData("someData");
- configEntity.setAttributes("someAttributes");
- configEntity.setStack(stackEntityHDP01);
+ // create a new configuration for oozie, for the config group
+ ClusterConfigEntity configEntityForGroup = new ClusterConfigEntity();
+ configEntityForGroup.setSelected(false);
+ configEntityForGroup.setType("oozie-site");
+ configEntityForGroup.setTag("version3");
+ configEntityForGroup.setData("someData");
+ configEntityForGroup.setAttributes("someAttributes");
+ configEntityForGroup.setStack(stackEntityHDP01);
- Thread.sleep(1);
- List<ClusterConfigEntity> configEntities = new
- ArrayList<ClusterConfigEntity>();
- configEntities.add(configEntity);
+ List<ClusterConfigEntity> configEntitiesForGroup = new ArrayList<>();
+ configEntitiesForGroup.add(configEntityForGroup);
+ List<ConfigGroupConfigMappingEntity> configMappingEntities = new ArrayList<>();
- configGroupDAO.create(configGroupEntity);
+ for (ClusterConfigEntity config : configEntitiesForGroup) {
+ config.setClusterEntity(clusterEntity);
+ config.setClusterId(clusterEntity.getClusterId());
+ clusterDAO.createConfig(config);
- Thread.sleep(1);
- if (configEntities != null && !configEntities.isEmpty()) {
- List<ConfigGroupConfigMappingEntity> configMappingEntities = new
- ArrayList<ConfigGroupConfigMappingEntity>();
-
- for (ClusterConfigEntity config : configEntities) {
- config.setClusterEntity(clusterEntity);
- config.setClusterId(clusterEntity.getClusterId());
- clusterDAO.createConfig(config);
-
- ConfigGroupConfigMappingEntity configMappingEntity = new
- ConfigGroupConfigMappingEntity();
- configMappingEntity.setClusterId(clusterEntity.getClusterId());
- configMappingEntity.setClusterConfigEntity(config);
- configMappingEntity.setConfigGroupEntity(configGroupEntity);
- configMappingEntity.setConfigGroupId(configGroupEntity.getGroupId());
- configMappingEntity.setVersionTag(config.getTag());
- configMappingEntity.setConfigType(config.getType());
- configMappingEntity.setTimestamp(System.currentTimeMillis());
- configMappingEntities.add(configMappingEntity);
- configGroupConfigMappingDAO.create(configMappingEntity);
- }
-
- configGroupEntity.setConfigGroupConfigMappingEntities(configMappingEntities);
- configGroupDAO.merge(configGroupEntity);
+ Thread.sleep(1);
+ ConfigGroupConfigMappingEntity configMappingEntity = new
+ ConfigGroupConfigMappingEntity();
+ configMappingEntity.setClusterId(clusterEntity.getClusterId());
+ configMappingEntity.setClusterConfigEntity(config);
+ configMappingEntity.setConfigGroupEntity(configGroupEntity);
+ configMappingEntity.setConfigGroupId(configGroupEntity.getGroupId());
+ configMappingEntity.setVersionTag(config.getTag());
+ configMappingEntity.setConfigType(config.getType());
+ configMappingEntity.setTimestamp(System.currentTimeMillis());
+ configMappingEntities.add(configMappingEntity);
+ configGroupConfigMappingDAO.create(configMappingEntity);
}
+
+ configGroupEntity.setConfigGroupConfigMappingEntities(configMappingEntities);
+ configGroupDAO.merge(configGroupEntity);
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index 47d2a81..d07ac15 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -31,7 +30,6 @@ import javax.persistence.EntityManager;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.H2DatabaseCleaner;
import org.apache.ambari.server.ServiceComponentNotFoundException;
-import org.apache.ambari.server.ServiceNotFoundException;
import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
import org.apache.ambari.server.actionmanager.HostRoleCommand;
import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
@@ -42,19 +40,18 @@ import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
-import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
import org.apache.ambari.server.orm.dao.HostDAO;
import org.apache.ambari.server.orm.dao.HostVersionDAO;
import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
import org.apache.ambari.server.orm.dao.RequestDAO;
import org.apache.ambari.server.orm.dao.StackDAO;
import org.apache.ambari.server.orm.dao.UpgradeDAO;
-import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
import org.apache.ambari.server.orm.entities.HostVersionEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.orm.entities.RequestEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ConfigFactory;
@@ -75,8 +72,6 @@ import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import com.google.inject.Guice;
import com.google.inject.Inject;
@@ -87,18 +82,16 @@ import com.google.inject.persist.UnitOfWork;
* Tests upgrade-related server side actions
*/
public class ComponentVersionCheckActionTest {
- private static final Logger LOG = LoggerFactory.getLogger(ComponentVersionCheckActionTest.class);
-
private static final String HDP_2_1_1_0 = "2.1.1.0-1";
private static final String HDP_2_1_1_1 = "2.1.1.1-2";
- private static final String HDP_2_2_1_0 = "2.2.0.1-3";
+ private static final String HDP_2_2_1_0 = "2.2.1.0-1";
private static final StackId HDP_21_STACK = new StackId("HDP-2.1.1");
private static final StackId HDP_22_STACK = new StackId("HDP-2.2.0");
- private static final String HDP_211_CENTOS6_REPO_URL = "http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.1.1.0-118";
+ private static final String HDP_211_CENTOS6_REPO_URL = "http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0";
private Injector m_injector;
@@ -109,9 +102,6 @@ public class ComponentVersionCheckActionTest {
private RepositoryVersionDAO repoVersionDAO;
@Inject
- private ClusterVersionDAO clusterVersionDAO;
-
- @Inject
private HostVersionDAO hostVersionDAO;
@Inject
@@ -148,6 +138,8 @@ public class ComponentVersionCheckActionTest {
String clusterName = "c1";
String hostName = "h1";
+ m_helper.createStack(sourceStack);
+
Clusters clusters = m_injector.getInstance(Clusters.class);
clusters.addCluster(clusterName, sourceStack);
@@ -174,9 +166,7 @@ public class ComponentVersionCheckActionTest {
host.setHostAttributes(hostAttributes);
// Create the starting repo version
- RepositoryVersionEntity sourceRepositoryVersion = m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
- c.createClusterVersion(sourceStack, sourceRepo, "admin", RepositoryVersionState.INSTALLING);
- c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.CURRENT);
+ m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
// Create the new repo version
String urlInfo = "[{'repositories':["
@@ -187,13 +177,8 @@ public class ComponentVersionCheckActionTest {
targetRepo, String.valueOf(System.currentTimeMillis()), urlInfo);
// Start upgrading the newer repo
- c.createClusterVersion(targetStack, targetRepo, "admin", RepositoryVersionState.INSTALLING);
- c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED);
c.setCurrentStackVersion(targetStack);
- c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
- RepositoryVersionState.CURRENT);
-
HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
HostVersionEntity entity = new HostVersionEntity();
@@ -214,16 +199,31 @@ public class ComponentVersionCheckActionTest {
upgradeEntity.setClusterId(c.getClusterId());
upgradeEntity.setRequestEntity(requestEntity);
upgradeEntity.setUpgradePackage("");
- upgradeEntity.setFromRepositoryVersion(sourceRepositoryVersion);
- upgradeEntity.setToRepositoryVersion(toRepositoryVersion);
+ upgradeEntity.setRepositoryVersion(toRepositoryVersion);
upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING);
upgradeDAO.create(upgradeEntity);
c.setUpgradeEntity(upgradeEntity);
}
- private void makeCrossStackUpgradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack,
- String targetRepo, String clusterName, String hostName) throws Exception {
+ /**
+ * Creates a cluster with a running upgrade. The upgrade will have no services
+ * attached to it, so those will need to be set after this is called.
+ *
+ * @param sourceStack
+ * @param sourceRepo
+ * @param targetStack
+ * @param targetRepo
+ * @param clusterName
+ * @param hostName
+ * @throws Exception
+ */
+ private void makeCrossStackUpgradeCluster(StackId sourceStack, String sourceRepo,
+ StackId targetStack, String targetRepo, String clusterName, String hostName)
+ throws Exception {
+
+ m_helper.createStack(sourceStack);
+ m_helper.createStack(targetStack);
Clusters clusters = m_injector.getInstance(Clusters.class);
clusters.addCluster(clusterName, sourceStack);
@@ -254,11 +254,11 @@ public class ComponentVersionCheckActionTest {
clusters.mapHostToCluster(hostName, clusterName);
// Create the starting repo version
- RepositoryVersionEntity sourceRepositoryVersion = m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
- c.createClusterVersion(sourceStack, sourceRepo, "admin", RepositoryVersionState.INSTALLING);
- c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.CURRENT);
+ m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
- RepositoryVersionEntity toRepositoryVersion = m_helper.getOrCreateRepositoryVersion(targetStack,targetRepo);
+ // create the new repo version
+ RepositoryVersionEntity toRepositoryVersion = m_helper.getOrCreateRepositoryVersion(targetStack,
+ targetRepo);
RequestEntity requestEntity = new RequestEntity();
requestEntity.setClusterId(c.getClusterId());
@@ -272,15 +272,13 @@ public class ComponentVersionCheckActionTest {
upgradeEntity.setClusterId(c.getClusterId());
upgradeEntity.setRequestEntity(requestEntity);
upgradeEntity.setUpgradePackage("");
- upgradeEntity.setFromRepositoryVersion(sourceRepositoryVersion);
- upgradeEntity.setToRepositoryVersion(toRepositoryVersion);
+ upgradeEntity.setRepositoryVersion(toRepositoryVersion);
upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING);
upgradeDAO.create(upgradeEntity);
c.setUpgradeEntity(upgradeEntity);
}
-
/**
* Creates a new {@link HostVersionEntity} instance in the
* {@link RepositoryVersionState#INSTALLED} for the specified host.
@@ -318,9 +316,6 @@ public class ComponentVersionCheckActionTest {
// Finalize the upgrade
Map<String, String> commandParams = new HashMap<>();
- commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
- commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
-
ExecutionCommand executionCommand = new ExecutionCommand();
executionCommand.setCommandParams(commandParams);
executionCommand.setClusterName("c1");
@@ -336,19 +331,19 @@ public class ComponentVersionCheckActionTest {
assertNotNull(report);
assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
assertEquals(0, report.getExitCode());
-
}
@Test
public void testMixedComponentVersions() throws Exception {
StackId sourceStack = HDP_21_STACK;
StackId targetStack = HDP_22_STACK;
- String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_2_1_0;
+ String sourceVersion = HDP_2_1_1_0;
+ String targetVersion = HDP_2_2_1_0;
String clusterName = "c1";
String hostName = "h1";
- makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo, clusterName, hostName);
+ makeCrossStackUpgradeCluster(sourceStack, sourceVersion, targetStack, targetVersion,
+ clusterName, hostName);
Clusters clusters = m_injector.getInstance(Clusters.class);
Cluster cluster = clusters.getCluster("c1");
@@ -356,20 +351,11 @@ public class ComponentVersionCheckActionTest {
RepositoryVersionEntity sourceRepoVersion = m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_1_1_0);
RepositoryVersionEntity targetRepoVersion = m_helper.getOrCreateRepositoryVersion(HDP_22_STACK, HDP_2_2_1_0);
- // Start upgrading the newer repo
- cluster.createClusterVersion(targetStack, targetRepo, "admin",
- RepositoryVersionState.INSTALLING);
- cluster.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED);
-
- cluster.mapHostVersions(Collections.singleton(hostName), cluster.getCurrentClusterVersion(),
- RepositoryVersionState.CURRENT);
-
-
- Service service = installService(cluster, "HDFS");
+ Service service = installService(cluster, "HDFS", sourceRepoVersion);
addServiceComponent(cluster, service, "NAMENODE");
addServiceComponent(cluster, service, "DATANODE");
- createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
- createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
+ createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", hostName);
+ createNewServiceComponentHost(cluster, "HDFS", "DATANODE", hostName);
// create some configs
createConfigs(cluster);
@@ -381,24 +367,36 @@ public class ComponentVersionCheckActionTest {
cluster.setCurrentStackVersion(sourceStack);
cluster.setDesiredStackVersion(targetStack);
- // set the SCH versions to the new stack so that the finalize action is
- // happy
- cluster.getServiceComponentHosts("HDFS", "NAMENODE").get(0).setVersion(targetRepo);
- // don't update DATANODE - we want to make the action complain
+ // tell the upgrade that HDFS is upgrading - without this, no services will
+ // be participating in the upgrade
+ UpgradeEntity upgrade = cluster.getUpgradeInProgress();
+ UpgradeHistoryEntity history = new UpgradeHistoryEntity();
+ history.setUpgrade(upgrade);
+ history.setServiceName("HDFS");
+ history.setComponentName("NAMENODE");
+ history.setFromRepositoryVersion(sourceRepoVersion);
+ history.setTargetRepositoryVersion(targetRepoVersion);
+ upgrade.addHistory(history);
+
+ history = new UpgradeHistoryEntity();
+ history.setUpgrade(upgrade);
+ history.setServiceName("HDFS");
+ history.setComponentName("DATANODE");
+ history.setFromRepositoryVersion(sourceRepoVersion);
+ history.setTargetRepositoryVersion(targetRepoVersion);
+ upgrade.addHistory(history);
- // inject an unhappy path where the cluster repo version is still UPGRADING
- // even though all of the hosts are UPGRADED
- ClusterVersionEntity upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion(
- "c1", HDP_22_STACK, targetRepo);
+ UpgradeDAO upgradeDAO = m_injector.getInstance(UpgradeDAO.class);
+ upgrade = upgradeDAO.merge(upgrade);
- upgradingClusterVersion.setState(RepositoryVersionState.INSTALLING);
- upgradingClusterVersion = clusterVersionDAO.merge(upgradingClusterVersion);
+ // set the SCH versions to the new stack so that the finalize action is
+ // happy - don't update DATANODE - we want to make the action complain
+ cluster.getServiceComponentHosts("HDFS", "NAMENODE").get(0).setVersion(targetVersion);
// verify the conditions for the test are met properly
- upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion("c1", HDP_22_STACK, targetRepo);
- List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion("c1", HDP_22_STACK, targetRepo);
+ List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion("c1",
+ HDP_22_STACK, targetVersion);
- assertEquals(RepositoryVersionState.INSTALLING, upgradingClusterVersion.getState());
assertTrue(hostVersions.size() > 0);
for (HostVersionEntity hostVersion : hostVersions) {
assertEquals(RepositoryVersionState.INSTALLED, hostVersion.getState());
@@ -407,11 +405,6 @@ public class ComponentVersionCheckActionTest {
// now finalize and ensure we can transition from UPGRADING to UPGRADED
// automatically before CURRENT
Map<String, String> commandParams = new HashMap<>();
- commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "upgrade");
- commandParams.put(FinalizeUpgradeAction.VERSION_KEY, targetRepo);
- commandParams.put(FinalizeUpgradeAction.ORIGINAL_STACK_KEY, sourceStack.getStackId());
- commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId());
-
ExecutionCommand executionCommand = new ExecutionCommand();
executionCommand.setCommandParams(commandParams);
executionCommand.setClusterName("c1");
@@ -428,12 +421,85 @@ public class ComponentVersionCheckActionTest {
assertNotNull(report);
assertEquals(HostRoleStatus.FAILED.name(), report.getStatus());
assertEquals(-1, report.getExitCode());
+
+ // OK, now set the datanode so it completes
+ cluster.getServiceComponentHosts("HDFS", "DATANODE").get(0).setVersion(targetVersion);
+
+ report = action.execute(null);
+ assertNotNull(report);
+ assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
+ assertEquals(0, report.getExitCode());
+ }
+
+ @Test
+ public void testMatchingPartialVersions() throws Exception {
+ StackId sourceStack = HDP_21_STACK;
+ StackId targetStack = HDP_21_STACK;
+ String sourceRepo = HDP_2_1_1_0;
+ String targetRepo = HDP_2_1_1_1;
+
+ makeUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
+
+ Clusters clusters = m_injector.getInstance(Clusters.class);
+
+ Host host = clusters.getHost("h1");
+ Assert.assertNotNull(host);
+ host.setOsInfo("redhat6");
+
+ Cluster cluster = clusters.getCluster("c1");
+ clusters.mapHostToCluster("h1", "c1");
+
+ RepositoryVersionEntity repositoryVersion2110 = m_helper.getOrCreateRepositoryVersion(
+ HDP_21_STACK, HDP_2_1_1_0);
+
+ RepositoryVersionEntity repositoryVersion2111 = m_helper.getOrCreateRepositoryVersion(
+ HDP_21_STACK, HDP_2_1_1_1);
+
+ Service service = installService(cluster, "HDFS", repositoryVersion2110);
+ addServiceComponent(cluster, service, "NAMENODE");
+ addServiceComponent(cluster, service, "DATANODE");
+
+ ServiceComponentHost sch = createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
+ sch.setVersion(HDP_2_1_1_0);
+ sch = createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
+ sch.setVersion(HDP_2_1_1_0);
+
+ service = installService(cluster, "ZOOKEEPER", repositoryVersion2111);
+ addServiceComponent(cluster, service, "ZOOKEEPER_SERVER");
+
+ sch = createNewServiceComponentHost(cluster, "ZOOKEEPER", "ZOOKEEPER_SERVER", "h1");
+ sch.setVersion(HDP_2_1_1_1);
+
+ // Verify the repo before calling Finalize
+ AmbariMetaInfo metaInfo = m_injector.getInstance(AmbariMetaInfo.class);
+
+ RepositoryInfo repo = metaInfo.getRepository(sourceStack.getStackName(), sourceStack.getStackVersion(), "redhat6", sourceStack.getStackId());
+ assertEquals(HDP_211_CENTOS6_REPO_URL, repo.getBaseUrl());
+
+ // Finalize the upgrade
+ Map<String, String> commandParams = new HashMap<>();
+ ExecutionCommand executionCommand = new ExecutionCommand();
+ executionCommand.setCommandParams(commandParams);
+ executionCommand.setClusterName("c1");
+
+ HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
+ hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
+
+ ComponentVersionCheckAction action = m_injector.getInstance(ComponentVersionCheckAction.class);
+ action.setExecutionCommand(executionCommand);
+ action.setHostRoleCommand(hostRoleCommand);
+
+ CommandReport report = action.execute(null);
+ assertNotNull(report);
+ assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
+ assertEquals(0, report.getExitCode());
+
}
private ServiceComponentHost createNewServiceComponentHost(Cluster cluster, String svc,
String svcComponent, String hostName) throws AmbariException {
Assert.assertNotNull(cluster.getConfigGroups());
- Service s = installService(cluster, svc);
+ Service s = cluster.getService(svc);
ServiceComponent sc = addServiceComponent(cluster, s, svcComponent);
ServiceComponentHost sch = serviceComponentHostFactory.createNew(sc, hostName);
@@ -441,22 +507,13 @@ public class ComponentVersionCheckActionTest {
sc.addServiceComponentHost(sch);
sch.setDesiredState(State.INSTALLED);
sch.setState(State.INSTALLED);
- sch.setDesiredStackVersion(cluster.getDesiredStackVersion());
- sch.setStackVersion(cluster.getCurrentStackVersion());
return sch;
}
- private Service installService(Cluster cluster, String serviceName) throws AmbariException {
- Service service = null;
-
- try {
- service = cluster.getService(serviceName);
- } catch (ServiceNotFoundException e) {
- service = serviceFactory.createNew(cluster, serviceName);
- cluster.addService(service);
- }
-
+ private Service installService(Cluster cluster, String serviceName, RepositoryVersionEntity repositoryVersion) throws AmbariException {
+ Service service = serviceFactory.createNew(cluster, serviceName, repositoryVersion);
+ cluster.addService(service);
return service;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/test/java/org/apache/ambari/server/stack/RepoUtilTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/RepoUtilTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/RepoUtilTest.java
index 99a34f4..9216499 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/RepoUtilTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/RepoUtilTest.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -21,22 +21,20 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
-import com.google.common.collect.ImmutableListMultimap;
-import com.google.common.collect.ImmutableMultimap;
-import com.google.common.collect.Multimaps;
import org.apache.ambari.server.controller.RepositoryResponse;
import org.apache.ambari.server.orm.entities.OperatingSystemEntity;
import org.apache.ambari.server.orm.entities.RepositoryEntity;
import org.apache.ambari.server.state.RepositoryInfo;
-import org.apache.ambari.server.state.stack.RepositoryXml;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.base.Optional;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
+import com.google.common.collect.Multimaps;
public class RepoUtilTest {
@@ -152,7 +150,7 @@ public class RepoUtilTest {
private static ListMultimap<String, RepositoryInfo> serviceRepos(List<String> operatingSystems,
String repoName, String repoId, String baseUrl) {
- ArrayListMultimap multimap = ArrayListMultimap.create();
+ ListMultimap<String, RepositoryInfo> multimap = ArrayListMultimap.create();
for (String os: operatingSystems) {
RepositoryInfo repoInfo = new RepositoryInfo();
repoInfo.setOsType(os);
http://git-wip-us.apache.org/repos/asf/ambari/blob/48f7fb22/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java
index 24ab0e8..89b2a0e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -18,32 +18,35 @@
package org.apache.ambari.server.stack;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
-import java.io.*;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.Collections;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
-import org.apache.ambari.server.orm.entities.OperatingSystemEntity;
-import org.apache.ambari.server.orm.entities.RepositoryEntity;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.state.RepositoryInfo;
import org.apache.ambari.server.state.StackInfo;
import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
-import org.junit.Assert;
import org.junit.Test;
+import org.mockito.Mockito;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableList;
import com.google.common.io.Resources;
+import com.google.gson.Gson;
import com.google.inject.Guice;
-import com.google.inject.Injector;
import com.google.inject.Provider;
/**
@@ -55,7 +58,6 @@ public class UpdateActiveRepoVersionOnStartupTest {
private static String ADD_ON_REPO_ID = "MSFT_R-8.0";
private RepositoryVersionDAO repositoryVersionDao;
- private RepositoryVersionEntity repoVersion;
private UpdateActiveRepoVersionOnStartup activeRepoUpdater;
@Test
@@ -77,25 +79,20 @@ public class UpdateActiveRepoVersionOnStartupTest {
* @throws Exception
*/
private void verifyRepoIsAdded() throws Exception {
- verify(repositoryVersionDao, times(1)).merge(repoVersion);
-
- boolean serviceRepoAddedToJson = false;
- outer:
- for (OperatingSystemEntity os: repoVersion.getOperatingSystems()) if (os.getOsType().equals("redhat6")) {
- for (RepositoryEntity repo: os.getRepositories()) if (repo.getRepositoryId().equals(ADD_ON_REPO_ID)) {
- serviceRepoAddedToJson = true;
- break outer;
- }
- }
- Assert.assertTrue(ADD_ON_REPO_ID + " is add-on repo was not added to JSON representation", serviceRepoAddedToJson);
+ verify(repositoryVersionDao, atLeast(1)).merge(Mockito.any(RepositoryVersionEntity.class));
}
public void init(boolean addClusterVersion) throws Exception {
ClusterDAO clusterDao = mock(ClusterDAO.class);
- ClusterVersionDAO clusterVersionDAO = mock(ClusterVersionDAO.class);
+
repositoryVersionDao = mock(RepositoryVersionDAO.class);
+
final RepositoryVersionHelper repositoryVersionHelper = new RepositoryVersionHelper();
- AmbariMetaInfo metaInfo = mock(AmbariMetaInfo.class);
+ Field field = RepositoryVersionHelper.class.getDeclaredField("gson");
+ field.setAccessible(true);
+ field.set(repositoryVersionHelper, new Gson());
+
+ final AmbariMetaInfo metaInfo = mock(AmbariMetaInfo.class);
StackManager stackManager = mock(StackManager.class);
when(metaInfo.getStackManager()).thenReturn(stackManager);
@@ -109,43 +106,64 @@ public class UpdateActiveRepoVersionOnStartupTest {
stackEntity.setStackVersion("2.3");
cluster.setDesiredStack(stackEntity);
+ RepositoryVersionEntity desiredRepositoryVersion = new RepositoryVersionEntity();
+ desiredRepositoryVersion.setStack(stackEntity);
+ desiredRepositoryVersion.setOperatingSystems(resourceAsString("org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest_initialRepos.json"));
+
+ ServiceDesiredStateEntity serviceDesiredStateEntity = new ServiceDesiredStateEntity();
+ serviceDesiredStateEntity.setDesiredRepositoryVersion(desiredRepositoryVersion);
+
+ ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
+ clusterServiceEntity.setServiceDesiredStateEntity(serviceDesiredStateEntity);
+ cluster.setClusterServiceEntities(Collections.singletonList(clusterServiceEntity));
+
StackInfo stackInfo = new StackInfo();
stackInfo.setName("HDP");
stackInfo.setVersion("2.3");
+
RepositoryInfo repositoryInfo = new RepositoryInfo();
repositoryInfo.setBaseUrl("http://msft.r");
repositoryInfo.setRepoId(ADD_ON_REPO_ID);
repositoryInfo.setRepoName("MSFT_R");
repositoryInfo.setOsType("redhat6");
stackInfo.getRepositories().add(repositoryInfo);
+
when(stackManager.getStack("HDP", "2.3")).thenReturn(stackInfo);
- Provider<RepositoryVersionHelper> repositoryVersionHelperProvider = mock(Provider.class);
+ final Provider<RepositoryVersionHelper> repositoryVersionHelperProvider = mock(Provider.class);
when(repositoryVersionHelperProvider.get()).thenReturn(repositoryVersionHelper);
+
+
InMemoryDefaultTestModule testModule = new InMemoryDefaultTestModule() {
@Override
protected void configure() {
- bind(RepositoryVersionHelper.class).toInstance(repositoryVersionHelper);
+ bind(RepositoryVersionHelper.class).toProvider(repositoryVersionHelperProvider);
+ bind(AmbariMetaInfo.class).toProvider(new Provider<AmbariMetaInfo>() {
+ @Override
+ public AmbariMetaInfo get() {
+ return metaInfo;
+ }
+ });
+
requestStaticInjection(RepositoryVersionEntity.class);
}
};
- Injector injector = Guice.createInjector(testModule);
+
+ Guice.createInjector(testModule);
if (addClusterVersion) {
- repoVersion = new RepositoryVersionEntity();
- repoVersion.setStack(stackEntity);
- repoVersion.setOperatingSystems(resourceAsString("org/apache/ambari/server/stack/UpdateActiveRepoVersionOnStartupTest_initialRepos.json"));
- ClusterVersionEntity clusterVersion = new ClusterVersionEntity();
- clusterVersion.setRepositoryVersion(repoVersion);
- when(clusterVersionDAO.findByClusterAndStateCurrent(CLUSTER_NAME)).thenReturn(clusterVersion);
+ RepositoryInfo info = new RepositoryInfo();
+ info.setBaseUrl("http://msft.r");
+ info.setRepoId(ADD_ON_REPO_ID);
+ info.setRepoName("MSFT_R1");
+ info.setOsType("redhat6");
+ stackInfo.getRepositories().add(info);
}
activeRepoUpdater = new UpdateActiveRepoVersionOnStartup(clusterDao,
- clusterVersionDAO, repositoryVersionDao, repositoryVersionHelper, metaInfo);
+ repositoryVersionDao, repositoryVersionHelper, metaInfo);
}
-
-
private static String resourceAsString(String resourceName) throws IOException {
return Resources.toString(Resources.getResource(resourceName), Charsets.UTF_8);
}