You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/05/23 21:35:21 UTC
[2/3] ambari git commit: AMBARI-21078 - Merging Configurations On
Service/Patch Upgrades Should Create New Configurations Only For Included
Services (jonathanhurley)
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 23b6db1..2c786b5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -1530,12 +1530,20 @@ public class ClusterImpl implements Cluster {
long nextServiceConfigVersion = serviceConfigDAO.findNextServiceConfigVersion(clusterId,
serviceName);
+ // get the correct stack ID to use when creating the service config
+ StackEntity stackEntity = clusterEntity.getDesiredStack();
+ Service service = services.get(serviceName);
+ if (null != service) {
+ StackId serviceStackId = service.getDesiredStackId();
+ stackEntity = stackDAO.find(serviceStackId);
+ }
+
serviceConfigEntity.setServiceName(serviceName);
serviceConfigEntity.setClusterEntity(clusterEntity);
serviceConfigEntity.setVersion(nextServiceConfigVersion);
serviceConfigEntity.setUser(user);
serviceConfigEntity.setNote(note);
- serviceConfigEntity.setStack(clusterEntity.getDesiredStack());
+ serviceConfigEntity.setStack(stackEntity);
serviceConfigDAO.create(serviceConfigEntity);
if (configGroup != null) {
@@ -2320,30 +2328,50 @@ public class ClusterImpl implements Cluster {
*/
@Override
@Transactional
- public void applyLatestConfigurations(StackId stackId) {
+ public void applyLatestConfigurations(StackId stackId, String serviceName) {
clusterGlobalLock.writeLock().lock();
try {
+ // grab all of the configurations and hash them so we can easily update them when picking and choosing only those from the service
ClusterEntity clusterEntity = getClusterEntity();
Collection<ClusterConfigEntity> configEntities = clusterEntity.getClusterConfigEntities();
-
- // hash them for easier retrieval later
ImmutableMap<Object, ClusterConfigEntity> clusterConfigEntityMap = Maps.uniqueIndex(
configEntities, Functions.identity());
- // disable all configs
- for (ClusterConfigEntity e : configEntities) {
- LOG.debug("Disabling configuration {} with tag {}", e.getType(), e.getTag());
- e.setSelected(false);
+ // find the latest configurations for the service
+ Set<String> configTypesForService = new HashSet<>();
+ List<ServiceConfigEntity> latestServiceConfigs = serviceConfigDAO.getLastServiceConfigsForService(
+ getClusterId(), serviceName);
+
+ // process the current service configurations
+ for (ServiceConfigEntity serviceConfig : latestServiceConfigs) {
+ List<ClusterConfigEntity> latestConfigs = serviceConfig.getClusterConfigEntities();
+ for( ClusterConfigEntity latestConfig : latestConfigs ){
+ // grab the hash'd entity from the map so we're working with the right one
+ latestConfig = clusterConfigEntityMap.get(latestConfig);
+
+ // add the config type to our list for tracking later on
+ configTypesForService.add(latestConfig.getType());
+
+ // un-select the latest configuration for the service
+ LOG.debug("Disabling configuration {} with tag {}", latestConfig.getType(), latestConfig.getTag());
+ latestConfig.setSelected(false);
+ }
}
- // work through the in-memory list, finding only the most recent mapping per type
+ // get the latest configurations for the given stack which we're going to make active
Collection<ClusterConfigEntity> latestConfigsByStack = clusterDAO.getLatestConfigurations(
clusterId, stackId);
- // pull the correct latest mapping for the stack out of the cached map
- // from the cluster entity
+ // set the service configuration for the specified stack to the latest
for (ClusterConfigEntity latestConfigByStack : latestConfigsByStack) {
+ // since we're iterating over all configuration types, only work with those that are for our service
+ if (!configTypesForService.contains(latestConfigByStack.getType())) {
+ continue;
+ }
+
+ // pull the correct latest mapping for the stack out of the cached map
+ // from the cluster entity
ClusterConfigEntity entity = clusterConfigEntityMap.get(latestConfigByStack);
entity.setSelected(true);
@@ -2358,14 +2386,15 @@ public class ClusterImpl implements Cluster {
clusterEntity = clusterDAO.merge(clusterEntity);
cacheConfigurations();
+
+ LOG.info(
+ "Applied latest configurations for {} on stack {}. The the following types were modified: {}",
+ serviceName, stackId, StringUtils.join(configTypesForService, ','));
+
} finally {
clusterGlobalLock.writeLock().unlock();
}
- LOG.info(
- "Applied latest configurations for {} on stack {}. The desired configurations are now {}",
- getClusterName(), stackId, getDesiredConfigs());
-
// publish an event to instruct entity managers to clear cached instances of
// ClusterEntity immediately - it takes EclipseLink about 1000ms to update
// the L1 caches of other threads and the action scheduler could act upon
@@ -2389,14 +2418,18 @@ public class ClusterImpl implements Cluster {
}
/**
- * Removes all configurations associated with the specified stack. The caller
- * should make sure the cluster global write lock is acquired.
+ * Removes all configurations associated with the specified stack for the
+ * specified service. The caller should make sure the cluster global write
+ * lock is acquired.
*
* @param stackId
+ * the stack to remove configurations for (not {@code null}).
+ * @param serviceName
+ * the service name (not {@code null}).
* @see #clusterGlobalLock
*/
@Transactional
- void removeAllConfigsForStack(StackId stackId) {
+ void removeAllConfigsForStack(StackId stackId, String serviceName) {
ClusterEntity clusterEntity = getClusterEntity();
// make sure the entity isn't stale in the current unit of work.
@@ -2404,53 +2437,50 @@ public class ClusterImpl implements Cluster {
long clusterId = clusterEntity.getClusterId();
+ // keep track of any types removed for logging purposes
+ Set<String> removedConfigurationTypes = new HashSet<>();
+
// this will keep track of cluster config mappings that need removal
// since there is no relationship between configs and their mappings, we
// have to do it manually
List<ClusterConfigEntity> removedClusterConfigs = new ArrayList<>(50);
- Collection<ClusterConfigEntity> clusterConfigEntities = clusterEntity.getClusterConfigEntities();
+ Collection<ClusterConfigEntity> allClusterConfigEntities = clusterEntity.getClusterConfigEntities();
+ Collection<ServiceConfigEntity> allServiceConfigEntities = clusterEntity.getServiceConfigEntities();
- List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getAllServiceConfigsForClusterAndStack(
- clusterId, stackId);
+ // get the service configs only for the service
+ List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getServiceConfigsForServiceAndStack(
+ clusterId, stackId, serviceName);
// remove all service configurations and associated configs
- Collection<ServiceConfigEntity> serviceConfigEntities = clusterEntity.getServiceConfigEntities();
-
for (ServiceConfigEntity serviceConfig : serviceConfigs) {
for (ClusterConfigEntity configEntity : serviceConfig.getClusterConfigEntities()) {
- clusterConfigEntities.remove(configEntity);
+ removedConfigurationTypes.add(configEntity.getType());
+
+ allClusterConfigEntities.remove(configEntity);
clusterDAO.removeConfig(configEntity);
removedClusterConfigs.add(configEntity);
}
serviceConfig.getClusterConfigEntities().clear();
serviceConfigDAO.remove(serviceConfig);
- serviceConfigEntities.remove(serviceConfig);
+ allServiceConfigEntities.remove(serviceConfig);
}
- // remove any leftover cluster configurations that don't have a service
- // configuration (like cluster-env)
- List<ClusterConfigEntity> clusterConfigs = clusterDAO.getAllConfigurations(
- clusterId, stackId);
-
- for (ClusterConfigEntity clusterConfig : clusterConfigs) {
- clusterConfigEntities.remove(clusterConfig);
- clusterDAO.removeConfig(clusterConfig);
- removedClusterConfigs.add(clusterConfig);
- }
-
- clusterEntity.setClusterConfigEntities(clusterConfigEntities);
+ clusterEntity.setClusterConfigEntities(allClusterConfigEntities);
clusterEntity = clusterDAO.merge(clusterEntity);
+
+ LOG.info("Removed the following configuration types for {} on stack {}: {}", serviceName,
+ stackId, StringUtils.join(removedConfigurationTypes, ','));
}
/**
* {@inheritDoc}
*/
@Override
- public void removeConfigurations(StackId stackId) {
+ public void removeConfigurations(StackId stackId, String serviceName) {
clusterGlobalLock.writeLock().lock();
try {
- removeAllConfigsForStack(stackId);
+ removeAllConfigsForStack(stackId, serviceName);
cacheConfigurations();
} finally {
clusterGlobalLock.writeLock().unlock();
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
index 60780dd..a4be480 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
@@ -124,7 +124,7 @@ public interface ConfigGroup {
* Reassign the set of configs associated with this config group
* @param configs
*/
- void setConfigurations(Map<String, Config> configs);
+ void setConfigurations(Map<String, Config> configs) throws AmbariException;
/**
* Remove host mapping
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
index 2209dc1..ae6cde9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
@@ -19,6 +19,8 @@ package org.apache.ambari.server.state.configgroup;
import java.util.Map;
+import javax.annotation.Nullable;
+
import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Config;
@@ -30,7 +32,8 @@ public interface ConfigGroupFactory {
/**
* Creates and saves a new {@link ConfigGroup}.
*/
- ConfigGroup createNew(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
+ ConfigGroup createNew(@Assisted("cluster") Cluster cluster,
+ @Assisted("serviceName") @Nullable String serviceName, @Assisted("name") String name,
@Assisted("tag") String tag, @Assisted("description") String description,
@Assisted("configs") Map<String, Config> configs, @Assisted("hosts") Map<Long, Host> hosts);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
index a04df3c..cb0d200 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
@@ -28,6 +28,8 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.ReadWriteLock;
+import javax.annotation.Nullable;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.DuplicateResourceException;
import org.apache.ambari.server.controller.ConfigGroupResponse;
@@ -50,6 +52,7 @@ import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.Service;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -64,6 +67,7 @@ public class ConfigGroupImpl implements ConfigGroup {
private ConcurrentMap<Long, Host> m_hosts;
private ConcurrentMap<String, Config> m_configurations;
private String configGroupName;
+ private String serviceName;
private long configGroupId;
/**
@@ -90,13 +94,15 @@ public class ConfigGroupImpl implements ConfigGroup {
private final ConfigFactory configFactory;
@AssistedInject
- public ConfigGroupImpl(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
+ public ConfigGroupImpl(@Assisted("cluster") Cluster cluster,
+ @Assisted("serviceName") @Nullable String serviceName, @Assisted("name") String name,
@Assisted("tag") String tag, @Assisted("description") String description,
@Assisted("configs") Map<String, Config> configurations,
@Assisted("hosts") Map<Long, Host> hosts, Clusters clusters, ConfigFactory configFactory,
ClusterDAO clusterDAO, HostDAO hostDAO, ConfigGroupDAO configGroupDAO,
ConfigGroupConfigMappingDAO configGroupConfigMappingDAO,
- ConfigGroupHostMappingDAO configGroupHostMappingDAO, LockFactory lockFactory) {
+ ConfigGroupHostMappingDAO configGroupHostMappingDAO, LockFactory lockFactory)
+ throws AmbariException {
this.configFactory = configFactory;
this.clusterDAO = clusterDAO;
@@ -108,6 +114,7 @@ public class ConfigGroupImpl implements ConfigGroup {
hostLock = lockFactory.newReadWriteLock(hostLockLabel);
this.cluster = cluster;
+ this.serviceName = serviceName;
configGroupName = name;
ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
@@ -115,6 +122,7 @@ public class ConfigGroupImpl implements ConfigGroup {
configGroupEntity.setGroupName(name);
configGroupEntity.setTag(tag);
configGroupEntity.setDescription(description);
+ configGroupEntity.setServiceName(serviceName);
m_hosts = hosts == null ? new ConcurrentHashMap<Long, Host>()
: new ConcurrentHashMap<>(hosts);
@@ -146,6 +154,7 @@ public class ConfigGroupImpl implements ConfigGroup {
this.cluster = cluster;
configGroupId = configGroupEntity.getGroupId();
configGroupName = configGroupEntity.getGroupName();
+ serviceName = configGroupEntity.getServiceName();
m_configurations = new ConcurrentHashMap<>();
m_hosts = new ConcurrentHashMap<>();
@@ -260,7 +269,7 @@ public class ConfigGroupImpl implements ConfigGroup {
* Helper method to recreate configs mapping
*/
@Override
- public void setConfigurations(Map<String, Config> configurations) {
+ public void setConfigurations(Map<String, Config> configurations) throws AmbariException {
ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
ClusterEntity clusterEntity = configGroupEntity.getClusterEntity();
@@ -323,7 +332,7 @@ public class ConfigGroupImpl implements ConfigGroup {
/**
* @param configGroupEntity
*/
- private void persist(ConfigGroupEntity configGroupEntity) {
+ private void persist(ConfigGroupEntity configGroupEntity) throws AmbariException {
persistEntities(configGroupEntity);
cluster.refresh();
}
@@ -334,7 +343,7 @@ public class ConfigGroupImpl implements ConfigGroup {
* @throws Exception
*/
@Transactional
- void persistEntities(ConfigGroupEntity configGroupEntity) {
+ void persistEntities(ConfigGroupEntity configGroupEntity) throws AmbariException {
ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
configGroupEntity.setClusterEntity(clusterEntity);
configGroupEntity.setTimestamp(System.currentTimeMillis());
@@ -396,8 +405,8 @@ public class ConfigGroupImpl implements ConfigGroup {
* @throws Exception
*/
@Transactional
- void persistConfigMapping(ClusterEntity clusterEntity,
- ConfigGroupEntity configGroupEntity, Map<String, Config> configurations) {
+ void persistConfigMapping(ClusterEntity clusterEntity, ConfigGroupEntity configGroupEntity,
+ Map<String, Config> configurations) throws AmbariException {
configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
configGroupEntity.setConfigGroupConfigMappingEntities(
new HashSet<ConfigGroupConfigMappingEntity>());
@@ -409,8 +418,11 @@ public class ConfigGroupImpl implements ConfigGroup {
(cluster.getClusterId(), config.getType(), config.getTag());
if (clusterConfigEntity == null) {
- config = configFactory.createNew(null, cluster, config.getType(), config.getTag(),
- config.getProperties(), config.getPropertiesAttributes());
+ String serviceName = getServiceName();
+ Service service = cluster.getService(serviceName);
+
+ config = configFactory.createNew(service.getDesiredStackId(), cluster, config.getType(),
+ config.getTag(), config.getProperties(), config.getPropertiesAttributes());
entry.setValue(config);
@@ -498,8 +510,7 @@ public class ConfigGroupImpl implements ConfigGroup {
@Override
public String getServiceName() {
- ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
- return configGroupEntity.getServiceName();
+ return serviceName;
}
@Override
@@ -507,6 +518,8 @@ public class ConfigGroupImpl implements ConfigGroup {
ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
configGroupEntity.setServiceName(serviceName);
configGroupDAO.merge(configGroupEntity);
+
+ this.serviceName = serviceName;
}
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeScope.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeScope.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeScope.java
index f35bd68..9a436b6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeScope.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeScope.java
@@ -48,13 +48,4 @@ public enum UpgradeScope {
@XmlEnumValue("ANY")
@SerializedName("any")
ANY;
-
- public boolean isScoped(UpgradeScope scope) {
- if (ANY == this || ANY == scope) {
- return true;
- }
-
- return this == scope;
- }
-
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
index 759d9e9..c707df3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
@@ -84,7 +84,8 @@ public class RequiredConfigPropertiesValidator implements TopologyValidator {
for (String configType : requiredPropertiesByType.keySet()) {
// We need a copy not to modify the original
- Collection<String> requiredPropertiesForType = new HashSet(requiredPropertiesByType.get(configType));
+ Collection<String> requiredPropertiesForType = new HashSet(
+ requiredPropertiesByType.get(configType));
if (!operationalConfigurations.containsKey(configType)) {
// all required configuration is missing for the config type
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index 5939fca..3f15400 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -587,7 +587,8 @@ public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
propertiesAttributes = Collections.emptyMap();
}
- controller.createConfig(cluster.getDesiredStackVersion(), cluster, configType, mergedProperties, newTag, propertiesAttributes);
+ controller.createConfig(cluster, cluster.getDesiredStackVersion(), configType,
+ mergedProperties, newTag, propertiesAttributes);
Config baseConfig = cluster.getConfig(configType, newTag);
if (baseConfig != null) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
index bc24246..7d6f066 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
@@ -34,12 +34,14 @@ import org.apache.ambari.server.events.publishers.JPAEventPublisher;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.StackId;
import org.junit.After;
import org.junit.Before;
@@ -98,37 +100,44 @@ public class TestActionSchedulerThreading {
StackId stackId = cluster.getCurrentStackVersion();
StackId newStackId = new StackId("HDP-2.2.0");
+ RepositoryVersionEntity repoVersion220 = ormTestHelper.getOrCreateRepositoryVersion(newStackId, "2.2.0-1234");
// make sure the stacks are different
Assert.assertFalse(stackId.equals(newStackId));
+ // add a service
+ String serviceName = "ZOOKEEPER";
+ RepositoryVersionEntity repositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(cluster);
+ Service service = cluster.addService(serviceName, repositoryVersion);
+ String configType = "zoo.cfg";
+
Map<String, String> properties = new HashMap<>();
Map<String, Map<String, String>> propertiesAttributes = new HashMap<>();
ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
- // foo-type for v1 on current stack
+ // zoo-cfg for v1 on current stack
properties.put("foo-property-1", "foo-value-1");
- Config c1 = configFactory.createNew(cluster, "foo-type", "version-1", properties, propertiesAttributes);
+ Config c1 = configFactory.createNew(stackId, cluster, configType, "version-1", properties, propertiesAttributes);
// make v1 "current"
cluster.addDesiredConfig("admin", Sets.newHashSet(c1), "note-1");
// bump the stack
- cluster.setDesiredStackVersion(newStackId);
+ service.setDesiredRepositoryVersion(repoVersion220);
// save v2
- // foo-type for v2 on new stack
+ // zoo-cfg for v2 on new stack
properties.put("foo-property-2", "foo-value-2");
- Config c2 = configFactory.createNew(cluster, "foo-type", "version-2", properties, propertiesAttributes);
+ Config c2 = configFactory.createNew(newStackId, cluster, configType, "version-2", properties, propertiesAttributes);
// make v2 "current"
cluster.addDesiredConfig("admin", Sets.newHashSet(c2), "note-2");
// check desired config
Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
- DesiredConfig desiredConfig = desiredConfigs.get("foo-type");
- desiredConfig = desiredConfigs.get("foo-type");
+ DesiredConfig desiredConfig = desiredConfigs.get(configType);
+ desiredConfig = desiredConfigs.get(configType);
assertNotNull(desiredConfig);
assertEquals(Long.valueOf(2), desiredConfig.getVersion());
assertEquals("version-2", desiredConfig.getTag());
@@ -136,7 +145,7 @@ public class TestActionSchedulerThreading {
final String hostName = cluster.getHosts().iterator().next().getHostName();
// move the stack back to the old stack
- cluster.setDesiredStackVersion(stackId);
+ service.setDesiredRepositoryVersion(repositoryVersion);
// create the semaphores, taking 1 from each to make them blocking from the
// start
@@ -158,7 +167,7 @@ public class TestActionSchedulerThreading {
threadInitialCachingSemaphore.acquire();
// apply the configs for the old stack
- cluster.applyLatestConfigurations(stackId);
+ cluster.applyLatestConfigurations(stackId, serviceName);
// wake the thread up and have it verify that it can see the updated configs
applyLatestConfigsSemaphore.release();
@@ -226,11 +235,11 @@ public class TestActionSchedulerThreading {
// L1 cache
Cluster cluster = clusters.getCluster(clusterId);
- // {foo-type={tag=version-2}}
+ // {zoo.cfg={tag=version-2}}
Map<String, Map<String, String>> effectiveDesiredTags = configHelper.getEffectiveDesiredTags(
cluster, hostName);
- assertEquals("version-2", effectiveDesiredTags.get("foo-type").get("tag"));
+ assertEquals("version-2", effectiveDesiredTags.get("zoo.cfg").get("tag"));
// signal the caller that we're done making our initial call to populate
// the EntityManager
@@ -239,9 +248,9 @@ public class TestActionSchedulerThreading {
// wait for the method to switch configs
applyLatestConfigsSemaphore.acquire();
- // {foo-type={tag=version-1}}
+ // {zoo.cfg={tag=version-1}}
effectiveDesiredTags = configHelper.getEffectiveDesiredTags(cluster, hostName);
- assertEquals("version-1", effectiveDesiredTags.get("foo-type").get("tag"));
+ assertEquals("version-1", effectiveDesiredTags.get("zoo.cfg").get("tag"));
} catch (Throwable throwable) {
this.throwable = throwable;
} finally {
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
index 5feb3cc..560d8a1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
@@ -35,6 +35,7 @@ import org.apache.ambari.server.actionmanager.HostRoleCommandFactoryImpl;
import org.apache.ambari.server.actionmanager.StageFactory;
import org.apache.ambari.server.agent.rest.AgentResource;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
import org.apache.ambari.server.metadata.CachedRoleCommandOrderProvider;
import org.apache.ambari.server.metadata.RoleCommandOrderProvider;
@@ -323,6 +324,7 @@ public class AgentResourceTest extends RandomPortJerseyTest {
bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
bind(PersistedState.class).toInstance(createNiceMock(PersistedState.class));
bind(RoleCommandOrderProvider.class).to(CachedRoleCommandOrderProvider.class);
+ bind(AmbariManagementController.class).toInstance(createNiceMock(AmbariManagementController.class));
}
private void installDependencies() {
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 9c723c1..a12e834 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -435,7 +435,7 @@ public class AmbariManagementControllerTest {
controller.deleteHostComponents(requests);
}
- private Long createConfigGroup(Cluster cluster, String name, String tag,
+ private Long createConfigGroup(Cluster cluster, String serviceName, String name, String tag,
List<String> hosts, List<Config> configs)
throws AmbariException {
@@ -452,9 +452,11 @@ public class AmbariManagementControllerTest {
configMap.put(config.getType(), config);
}
- ConfigGroup configGroup = configGroupFactory.createNew(cluster, name,
+ ConfigGroup configGroup = configGroupFactory.createNew(cluster, serviceName, name,
tag, "", configMap, hostMap);
+ configGroup.setServiceName(serviceName);
+
cluster.addConfigGroup(configGroup);
return configGroup.getId();
@@ -6662,8 +6664,8 @@ public class AmbariManagementControllerTest {
configs = new HashMap<>();
configs.put("a", "c");
cluster = clusters.getCluster(cluster1);
- final Config config = configFactory.createReadOnly("core-site", "version122", configs, null);
- Long groupId = createConfigGroup(cluster, group1, tag1,
+ final Config config = configFactory.createReadOnly("core-site", "version122", configs, null);
+ Long groupId = createConfigGroup(cluster, serviceName1, group1, tag1,
new ArrayList<String>() {{ add(host1); }},
new ArrayList<Config>() {{ add(config); }});
@@ -6674,7 +6676,7 @@ public class AmbariManagementControllerTest {
configs.put("a", "c");
final Config config2 = configFactory.createReadOnly("mapred-site", "version122", configs, null);
- groupId = createConfigGroup(cluster, group2, tag2,
+ groupId = createConfigGroup(cluster, serviceName2, group2, tag2,
new ArrayList<String>() {{ add(host1); }},
new ArrayList<Config>() {{ add(config2); }});
@@ -6817,7 +6819,7 @@ public class AmbariManagementControllerTest {
ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
final Config config = configFactory.createReadOnly("hdfs-site", "version122", configs, null);
- Long groupId = createConfigGroup(clusters.getCluster(cluster1), group1, tag1,
+ Long groupId = createConfigGroup(clusters.getCluster(cluster1), serviceName, group1, tag1,
new ArrayList<String>() {{
add(host1);
add(host2);
@@ -6926,7 +6928,7 @@ public class AmbariManagementControllerTest {
ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
final Config config = configFactory.createReadOnly("hdfs-site", "version122", configs, null);
- Long groupId = createConfigGroup(clusters.getCluster(cluster1), group1, tag1,
+ Long groupId = createConfigGroup(clusters.getCluster(cluster1), serviceName, group1, tag1,
new ArrayList<String>() {{ add(host1); add(host2); }},
new ArrayList<Config>() {{ add(config); }});
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
index 5b69270..12cbadf 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
@@ -171,13 +171,14 @@ public class ConfigGroupResourceProviderTest {
expect(hostEntity2.getHostId()).andReturn(2L).atLeastOnce();
Capture<Cluster> clusterCapture = newCapture();
+ Capture<String> serviceName = newCapture();
Capture<String> captureName = newCapture();
Capture<String> captureDesc = newCapture();
Capture<String> captureTag = newCapture();
Capture<Map<String, Config>> captureConfigs = newCapture();
Capture<Map<Long, Host>> captureHosts = newCapture();
- expect(configGroupFactory.createNew(capture(clusterCapture),
+ expect(configGroupFactory.createNew(capture(clusterCapture), capture(serviceName),
capture(captureName), capture(captureTag), capture(captureDesc),
capture(captureConfigs), capture(captureHosts))).andReturn(configGroup);
@@ -282,7 +283,7 @@ public class ConfigGroupResourceProviderTest {
expect(managementController.getAuthName()).andReturn("admin").anyTimes();
expect(cluster.getConfigGroups()).andReturn(configGroupMap);
- expect(configGroupFactory.createNew((Cluster) anyObject(), (String) anyObject(),
+ expect(configGroupFactory.createNew((Cluster) anyObject(), (String) anyObject(), (String) anyObject(),
(String) anyObject(), (String) anyObject(), EasyMock.<Map<String, Config>>anyObject(),
EasyMock.<Map<Long, Host>>anyObject())).andReturn(configGroup).anyTimes();
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
index 4408492..f79b1c2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
@@ -57,6 +57,7 @@ import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.UpgradeContext;
import org.apache.ambari.server.state.UpgradeContextFactory;
+import org.apache.ambari.server.state.UpgradeHelper;
import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
import org.apache.ambari.server.state.stack.OsFamily;
@@ -205,11 +206,11 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
// HDP 2.4 configs
EasyMock.expect(configHelper.getDefaultProperties(EasyMock.eq(s_currentStackId),
- EasyMock.anyObject(Cluster.class), EasyMock.anyBoolean())).andReturn(oldStackDefaultConfigurationsByType);
+ EasyMock.anyString())).andReturn(oldStackDefaultConfigurationsByType);
// HDP 2.5 configs
EasyMock.expect(configHelper.getDefaultProperties(EasyMock.eq(s_targetStackId),
- EasyMock.anyObject(Cluster.class), EasyMock.anyBoolean())).andReturn(newConfigurationsByType);
+ EasyMock.anyString())).andReturn(newConfigurationsByType);
// CURRENT HDP 2.4 configs
Config currentClusterConfigFoo = createNiceMock(Config.class);
@@ -238,6 +239,7 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
Capture<Map<String, Map<String, String>>> capturedArgument = EasyMock.newCapture();
configHelper.createConfigTypes(EasyMock.anyObject(Cluster.class),
EasyMock.anyObject(AmbariManagementController.class),
+ EasyMock.anyObject(StackId.class),
EasyMock.capture(capturedArgument),
EasyMock.anyString(), EasyMock.anyString());
@@ -252,10 +254,8 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
EasyMock.expect(upgradeContext.getTargetRepositoryVersion(EasyMock.anyString())).andReturn(repositoryVersionEntity).anyTimes();
replayAll();
- UpgradeResourceProvider upgradeResourceProvider = new UpgradeResourceProvider(amc);
- m_injector.injectMembers(upgradeResourceProvider);
-
- upgradeResourceProvider.applyStackAndProcessConfigurations(upgradeContext);
+ UpgradeHelper upgradeHelper = m_injector.getInstance(UpgradeHelper.class);
+ upgradeHelper.updateDesiredRepositoriesAndConfigs(upgradeContext);
// assertion time!
Map<String, Map<String, String>> mergedConfigurations = capturedArgument.getValue();
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index 3780ea5..04773bc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -65,7 +65,6 @@ import org.apache.ambari.server.controller.utilities.PropertyHelper;
import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.OrmTestHelper;
import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
import org.apache.ambari.server.orm.dao.RequestDAO;
@@ -79,6 +78,7 @@ import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.StageEntity;
import org.apache.ambari.server.orm.entities.UpgradeEntity;
import org.apache.ambari.server.orm.entities.UpgradeGroupEntity;
+import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
import org.apache.ambari.server.orm.entities.UpgradeItemEntity;
import org.apache.ambari.server.security.TestAuthenticationFactory;
import org.apache.ambari.server.serveraction.upgrades.AutoSkipFailedSummaryAction;
@@ -95,7 +95,6 @@ import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.UpgradeContext;
-import org.apache.ambari.server.state.UpgradeContextFactory;
import org.apache.ambari.server.state.UpgradeHelper;
import org.apache.ambari.server.state.UpgradeState;
import org.apache.ambari.server.state.stack.UpgradePack;
@@ -133,7 +132,6 @@ public class UpgradeResourceProviderTest {
private RepositoryVersionDAO repoVersionDao = null;
private Injector injector;
private Clusters clusters;
- private OrmTestHelper helper;
private AmbariManagementController amc;
private ConfigHelper configHelper;
private StackDAO stackDAO;
@@ -141,7 +139,6 @@ public class UpgradeResourceProviderTest {
private TopologyManager topologyManager;
private ConfigFactory configFactory;
private HostRoleCommandDAO hrcDAO;
- private UpgradeContextFactory upgradeContextFactory;
RepositoryVersionEntity repoVersionEntity2110;
RepositoryVersionEntity repoVersionEntity2111;
@@ -162,7 +159,7 @@ public class UpgradeResourceProviderTest {
expect(
configHelper.getDefaultProperties(EasyMock.anyObject(StackId.class),
- EasyMock.anyObject(Cluster.class), EasyMock.anyBoolean())).andReturn(
+ EasyMock.anyString())).andReturn(
new HashMap<String, Map<String, String>>()).anyTimes();
@@ -176,13 +173,9 @@ public class UpgradeResourceProviderTest {
H2DatabaseCleaner.resetSequences(injector);
injector.getInstance(GuiceJpaInitializer.class);
-
- helper = injector.getInstance(OrmTestHelper.class);
-
amc = injector.getInstance(AmbariManagementController.class);
ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
configFactory = injector.getInstance(ConfigFactory.class);
- upgradeContextFactory = injector.getInstance(UpgradeContextFactory.class);
Field field = AmbariServer.class.getDeclaredField("clusterController");
field.setAccessible(true);
@@ -203,8 +196,7 @@ public class UpgradeResourceProviderTest {
// For now, Ignore the tests that fail.
StackEntity stackEntity211 = stackDAO.find("HDP", "2.1.1");
StackEntity stackEntity220 = stackDAO.find("HDP", "2.2.0");
- StackId stack211 = new StackId("HDP-2.1.1");
- StackId stack220 = new StackId("HDP-2.2.0");
+ StackId stack211 = new StackId(stackEntity211);
repoVersionEntity2110 = new RepositoryVersionEntity();
repoVersionEntity2110.setDisplayName("My New Version 1");
@@ -232,9 +224,6 @@ public class UpgradeResourceProviderTest {
clusters.addCluster("c1", stack211);
Cluster cluster = clusters.getCluster("c1");
- helper.getOrCreateRepositoryVersion(stack211, stack211.getStackVersion());
- helper.getOrCreateRepositoryVersion(stack220, stack220.getStackVersion());
-
clusters.addHost("h1");
Host host = clusters.getHost("h1");
Map<String, String> hostAttributes = new HashMap<>();
@@ -245,9 +234,8 @@ public class UpgradeResourceProviderTest {
clusters.mapHostToCluster("h1", "c1");
- // add a single ZK server
+ // add a single ZK server and client on 2.1.1.0
Service service = cluster.addService("ZOOKEEPER", repoVersionEntity2110);
-
ServiceComponent component = service.addServiceComponent("ZOOKEEPER_SERVER");
ServiceComponentHost sch = component.addServiceComponentHost("h1");
sch.setVersion("2.1.1.0");
@@ -1113,9 +1101,21 @@ public class UpgradeResourceProviderTest {
*/
@Test
public void testMergeConfigurations() throws Exception {
+ RepositoryVersionEntity repoVersion211 = createNiceMock(RepositoryVersionEntity.class);
+ RepositoryVersionEntity repoVersion220 = createNiceMock(RepositoryVersionEntity.class);
+
StackId stack211 = new StackId("HDP-2.1.1");
StackId stack220 = new StackId("HDP-2.2.0");
+ String version211 = "2.1.1.0-1234";
+ String version220 = "2.2.0.0-1234";
+
+ EasyMock.expect(repoVersion211.getStackId()).andReturn(stack211).atLeastOnce();
+ EasyMock.expect(repoVersion211.getVersion()).andReturn(version211).atLeastOnce();
+
+ EasyMock.expect(repoVersion220.getStackId()).andReturn(stack220).atLeastOnce();
+ EasyMock.expect(repoVersion220.getVersion()).andReturn(version220).atLeastOnce();
+
Map<String, Map<String, String>> stack211Configs = new HashMap<>();
Map<String, String> stack211FooType = new HashMap<>();
Map<String, String> stack211BarType = new HashMap<>();
@@ -1174,17 +1174,18 @@ public class UpgradeResourceProviderTest {
EasyMock.reset(configHelper);
expect(
- configHelper.getDefaultProperties(EasyMock.eq(stack211), EasyMock.anyObject(Cluster.class), EasyMock.anyBoolean())).andReturn(
+ configHelper.getDefaultProperties(EasyMock.eq(stack211), EasyMock.anyString())).andReturn(
stack211Configs).anyTimes();
expect(
- configHelper.getDefaultProperties(EasyMock.eq(stack220), EasyMock.anyObject(Cluster.class), EasyMock.anyBoolean())).andReturn(
- stack220Configs).anyTimes();
+ configHelper.getDefaultProperties(EasyMock.eq(stack220), EasyMock.anyString())).andReturn(
+ stack220Configs).anyTimes();
Capture<Map<String, Map<String, String>>> expectedConfigurationsCapture = EasyMock.newCapture();
configHelper.createConfigTypes(EasyMock.anyObject(Cluster.class),
EasyMock.anyObject(AmbariManagementController.class),
+ EasyMock.anyObject(StackId.class),
EasyMock.capture(expectedConfigurationsCapture),
EasyMock.anyObject(String.class), EasyMock.anyObject(String.class));
@@ -1192,13 +1193,16 @@ public class UpgradeResourceProviderTest {
EasyMock.replay(configHelper, cluster, fooConfig, barConfig, bazConfig);
- UpgradeResourceProvider upgradeResourceProvider = createProvider(amc);
-
Map<String, UpgradePack> upgradePacks = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1");
- UpgradePack upgrade = upgradePacks.get("upgrade_to_new_stack");
+ UpgradePack upgradePack = upgradePacks.get("upgrade_to_new_stack");
UpgradeContext upgradeContext = EasyMock.createNiceMock(UpgradeContext.class);
- upgradeResourceProvider.applyStackAndProcessConfigurations(upgradeContext);
+ EasyMock.expect(upgradeContext.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+ EasyMock.expect(upgradeContext.getCluster()).andReturn(cluster).anyTimes();
+ EasyMock.expect(upgradeContext.getDirection()).andReturn(Direction.UPGRADE).anyTimes();
+ EasyMock.expect(upgradeContext.getUpgradePack()).andReturn(upgradePack).anyTimes();
+ EasyMock.expect(upgradeContext.getRepositoryVersion()).andReturn(repoVersion211).anyTimes();
+ EasyMock.expect(upgradeContext.getTargetRepositoryVersion(EasyMock.anyString())).andReturn(repoVersion220).anyTimes();
Map<String, Map<String, String>> expectedConfigurations = expectedConfigurationsCapture.getValue();
Map<String, String> expectedFooType = expectedConfigurations.get("foo-site");
@@ -1502,8 +1506,9 @@ public class UpgradeResourceProviderTest {
/**
* Exercises that a component that goes from upgrade->downgrade that switches
- * {@code versionAdvertised} between will go to UKNOWN. This exercises
- * {@link UpgradeHelper#putComponentsToUpgradingState(String, Map, StackId)}
+ * {@code versionAdvertised} between will go to UKNOWN. This exercises
+ * {@link UpgradeHelper#updateDesiredRepositoriesAndConfigs(UpgradeContext)}
+ *
* @throws Exception
*/
@Test
@@ -1617,9 +1622,67 @@ public class UpgradeResourceProviderTest {
}
}
+ /**
+ * Tests that from/to repository version history is created correctly on the
+ * upgrade.
+ *
+ * @throws Exception
+ */
@Test
public void testUpgradeHistory() throws Exception {
- Assert.fail("Implement me!");
+ Cluster cluster = clusters.getCluster("c1");
+
+ Map<String, Object> requestProps = new HashMap<>();
+ requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
+ requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId()));
+ requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test");
+ requestProps.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.ROLLING.toString());
+ requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_MANUAL_VERIFICATION, Boolean.TRUE.toString());
+ requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, Boolean.TRUE.toString());
+ requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name());
+
+ ResourceProvider upgradeResourceProvider = createProvider(amc);
+ Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null);
+ upgradeResourceProvider.createResources(request);
+
+ List<UpgradeEntity> upgrades = upgradeDao.findUpgrades(cluster.getClusterId());
+ assertEquals(1, upgrades.size());
+
+ UpgradeEntity upgrade = cluster.getUpgradeInProgress();
+ List<UpgradeHistoryEntity> histories = upgrade.getHistory();
+ assertEquals(2, histories.size());
+
+ for( UpgradeHistoryEntity history : histories){
+ assertEquals( "ZOOKEEPER", history.getServiceName() );
+ assertEquals(repoVersionEntity2110, history.getFromReposistoryVersion());
+ assertEquals(repoVersionEntity2200, history.getTargetRepositoryVersion());
+ }
+
+ // abort the upgrade and create the downgrade
+ abortUpgrade(upgrade.getRequestId());
+
+ requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
+ requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_nonrolling_new_stack");
+ requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true");
+ requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.DOWNGRADE.name());
+
+ Map<String, String> requestInfoProperties = new HashMap<>();
+
+ request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps),
+ requestInfoProperties);
+ RequestStatus status = upgradeResourceProvider.createResources(request);
+ UpgradeEntity downgrade = upgradeDao.findUpgradeByRequestId(getRequestId(status));
+ assertEquals(Direction.DOWNGRADE, downgrade.getDirection());
+
+ // check from/to history
+ histories = downgrade.getHistory();
+ assertEquals(2, histories.size());
+
+ for (UpgradeHistoryEntity history : histories) {
+ assertEquals("ZOOKEEPER", history.getServiceName());
+ assertEquals(repoVersionEntity2200, history.getFromReposistoryVersion());
+ assertEquals(repoVersionEntity2110, history.getTargetRepositoryVersion());
+ }
}
private String parseSingleMessage(String msgStr){
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
index 77593a7..7b9ff52 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
@@ -388,10 +388,15 @@ public class ServiceConfigDAOTest {
long clusterId = serviceConfigEntity.getClusterId();
- List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getAllServiceConfigsForClusterAndStack(clusterId, HDP_01);
- Assert.assertEquals(4, serviceConfigs.size());
+ List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getServiceConfigsForServiceAndStack(
+ clusterId, HDP_01, "HDFS");
- serviceConfigs = serviceConfigDAO.getAllServiceConfigsForClusterAndStack(clusterId, HDP_02);
+ Assert.assertEquals(3, serviceConfigs.size());
+
+ serviceConfigs = serviceConfigDAO.getServiceConfigsForServiceAndStack(clusterId, HDP_01, "YARN");
+ Assert.assertEquals(1, serviceConfigs.size());
+
+ serviceConfigs = serviceConfigDAO.getServiceConfigsForServiceAndStack(clusterId, HDP_02, "HDFS");
Assert.assertEquals(0, serviceConfigs.size());
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index 941c424..2f2771d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -51,6 +51,7 @@ import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.orm.entities.RequestEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ConfigFactory;
@@ -85,7 +86,7 @@ public class ComponentVersionCheckActionTest {
private static final String HDP_2_1_1_0 = "2.1.1.0-1";
private static final String HDP_2_1_1_1 = "2.1.1.1-2";
- private static final String HDP_2_2_1_0 = "2.2.0.1-3";
+ private static final String HDP_2_2_1_0 = "2.2.1.0-1";
private static final StackId HDP_21_STACK = new StackId("HDP-2.1.1");
private static final StackId HDP_22_STACK = new StackId("HDP-2.2.0");
@@ -205,8 +206,21 @@ public class ComponentVersionCheckActionTest {
c.setUpgradeEntity(upgradeEntity);
}
- private void makeCrossStackUpgradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack,
- String targetRepo, String clusterName, String hostName) throws Exception {
+ /**
+ * Creates a cluster with a running upgrade. The upgrade will have no services
+ * attached to it, so those will need to be set after this is called.
+ *
+ * @param sourceStack
+ * @param sourceRepo
+ * @param targetStack
+ * @param targetRepo
+ * @param clusterName
+ * @param hostName
+ * @throws Exception
+ */
+ private void makeCrossStackUpgradeCluster(StackId sourceStack, String sourceRepo,
+ StackId targetStack, String targetRepo, String clusterName, String hostName)
+ throws Exception {
m_helper.createStack(sourceStack);
m_helper.createStack(targetStack);
@@ -265,24 +279,22 @@ public class ComponentVersionCheckActionTest {
c.setUpgradeEntity(upgradeEntity);
}
- private void createNewRepoVersion(StackId targetStack, String targetRepo, String clusterName,
- String hostName) throws AmbariException {
- StackDAO stackDAO = m_injector.getInstance(StackDAO.class);
-
- StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion());
-
- // Create the new repo version
- String urlInfo = "[{'repositories':["
- + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'" + targetRepo + "'}"
- + "], 'OperatingSystems/os_type':'redhat6'}]";
- repoVersionDAO.create(stackEntityTarget, targetRepo, String.valueOf(System.currentTimeMillis()), urlInfo);
-
+ /**
+ * Creates a new {@link HostVersionEntity} instance in the
+ * {@link RepositoryVersionState#INSTALLED} for the specified host.
+ *
+ * @param hostName
+ * @param repositoryVersion
+ * @throws AmbariException
+ */
+ private void installRepositoryOnHost(String hostName, RepositoryVersionEntity repositoryVersion)
+ throws AmbariException {
// Start upgrading the newer repo
HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
HostVersionEntity entity = new HostVersionEntity();
entity.setHostEntity(hostDAO.findByName(hostName));
- entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo));
+ entity.setRepositoryVersion(repositoryVersion);
entity.setState(RepositoryVersionState.INSTALLED);
hostVersionDAO.create(entity);
}
@@ -325,42 +337,65 @@ public class ComponentVersionCheckActionTest {
public void testMixedComponentVersions() throws Exception {
StackId sourceStack = HDP_21_STACK;
StackId targetStack = HDP_22_STACK;
- String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_2_1_0;
+ String sourceVersion = HDP_2_1_1_0;
+ String targetVersion = HDP_2_2_1_0;
String clusterName = "c1";
String hostName = "h1";
- makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo, clusterName, hostName);
+ makeCrossStackUpgradeCluster(sourceStack, sourceVersion, targetStack, targetVersion,
+ clusterName, hostName);
Clusters clusters = m_injector.getInstance(Clusters.class);
Cluster cluster = clusters.getCluster("c1");
- RepositoryVersionEntity repositoryVersion = m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_1_1_0);
+ RepositoryVersionEntity sourceRepoVersion = m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_1_1_0);
+ RepositoryVersionEntity targetRepoVersion = m_helper.getOrCreateRepositoryVersion(HDP_22_STACK, HDP_2_2_1_0);
- Service service = installService(cluster, "HDFS", repositoryVersion);
+ Service service = installService(cluster, "HDFS", sourceRepoVersion);
addServiceComponent(cluster, service, "NAMENODE");
addServiceComponent(cluster, service, "DATANODE");
- createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
- createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
-
- createNewRepoVersion(targetStack, targetRepo, clusterName, hostName);
+ createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", hostName);
+ createNewServiceComponentHost(cluster, "HDFS", "DATANODE", hostName);
// create some configs
createConfigs(cluster);
+
+ // install the target repo
+ installRepositoryOnHost(hostName, targetRepoVersion);
+
// setup the cluster for the upgrade across stacks
cluster.setCurrentStackVersion(sourceStack);
cluster.setDesiredStackVersion(targetStack);
- // set the SCH versions to the new stack so that the finalize action is
- // happy
- cluster.getServiceComponentHosts("HDFS", "NAMENODE").get(0).setVersion(targetRepo);
- // don't update DATANODE - we want to make the action complain
+ // tell the upgrade that HDFS is upgrading - without this, no services will
+ // be participating in the upgrade
+ UpgradeEntity upgrade = cluster.getUpgradeInProgress();
+ UpgradeHistoryEntity history = new UpgradeHistoryEntity();
+ history.setUpgrade(upgrade);
+ history.setServiceName("HDFS");
+ history.setComponentName("NAMENODE");
+ history.setFromRepositoryVersion(sourceRepoVersion);
+ history.setTargetRepositoryVersion(targetRepoVersion);
+ upgrade.addHistory(history);
+
+ history = new UpgradeHistoryEntity();
+ history.setUpgrade(upgrade);
+ history.setServiceName("HDFS");
+ history.setComponentName("DATANODE");
+ history.setFromRepositoryVersion(sourceRepoVersion);
+ history.setTargetRepositoryVersion(targetRepoVersion);
+ upgrade.addHistory(history);
- // inject an unhappy path where the cluster repo version is still UPGRADING
- // even though all of the hosts are UPGRADED
+ UpgradeDAO upgradeDAO = m_injector.getInstance(UpgradeDAO.class);
+ upgrade = upgradeDAO.merge(upgrade);
+
+ // set the SCH versions to the new stack so that the finalize action is
+ // happy - don't update DATANODE - we want to make the action complain
+ cluster.getServiceComponentHosts("HDFS", "NAMENODE").get(0).setVersion(targetVersion);
// verify the conditions for the test are met properly
- List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion("c1", HDP_22_STACK, targetRepo);
+ List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion("c1",
+ HDP_22_STACK, targetVersion);
assertTrue(hostVersions.size() > 0);
for (HostVersionEntity hostVersion : hostVersions) {
@@ -386,6 +421,14 @@ public class ComponentVersionCheckActionTest {
assertNotNull(report);
assertEquals(HostRoleStatus.FAILED.name(), report.getStatus());
assertEquals(-1, report.getExitCode());
+
+ // OK, now set the datanode so it completes
+ cluster.getServiceComponentHosts("HDFS", "DATANODE").get(0).setVersion(targetVersion);
+
+ report = action.execute(null);
+ assertNotNull(report);
+ assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
+ assertEquals(0, report.getExitCode());
}
@Test
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index 0aea8b3..f306d69 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -58,13 +58,13 @@ import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.orm.entities.RequestEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
import org.apache.ambari.server.serveraction.ServerAction;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.RepositoryInfo;
import org.apache.ambari.server.state.RepositoryVersionState;
import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponent;
@@ -77,6 +77,7 @@ import org.apache.ambari.server.state.State;
import org.apache.ambari.server.state.stack.UpgradePack;
import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
import org.apache.ambari.server.utils.EventBusSynchronizer;
+import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -102,8 +103,6 @@ public class UpgradeActionTest {
private static final StackId HDP_21_STACK = new StackId("HDP-2.1.1");
private static final StackId HDP_22_STACK = new StackId("HDP-2.2.0");
- private static final String HDP_211_CENTOS6_REPO_URL = "http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.1.1.0-118";
-
private RepositoryVersionEntity sourceRepositoryVersion;
private Injector m_injector;
@@ -172,10 +171,11 @@ public class UpgradeActionTest {
H2DatabaseCleaner.clearDatabase(m_injector.getProvider(EntityManager.class).get());
}
- private void makeDowngradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack, String targetRepo) throws Exception {
+ private void makeDowngradeCluster(RepositoryVersionEntity sourceRepoVersion,
+ RepositoryVersionEntity targetRepoVersion) throws Exception {
String hostName = "h1";
- clusters.addCluster(clusterName, sourceStack);
+ clusters.addCluster(clusterName, sourceRepoVersion.getStackId());
// add a host component
clusters.addHost(hostName);
@@ -187,24 +187,17 @@ public class UpgradeActionTest {
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- // Create the starting repo version
- m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
-
- // Start upgrading the newer repo
- m_helper.getOrCreateRepositoryVersion(targetStack, targetRepo);
-
HostVersionEntity entity = new HostVersionEntity();
entity.setHostEntity(hostDAO.findByName(hostName));
- entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo));
+ entity.setRepositoryVersion(targetRepoVersion);
entity.setState(RepositoryVersionState.INSTALLING);
hostVersionDAO.create(entity);
}
- private void makeTwoUpgradesWhereLastDidNotComplete(StackId sourceStack, String sourceRepo, StackId midStack, String midRepo, StackId targetStack, String targetRepo) throws Exception {
- String hostName = "h1";
-
- clusters.addCluster(clusterName, sourceStack);
+ private void createUpgradeCluster(
+ RepositoryVersionEntity sourceRepoVersion, String hostName) throws Exception {
+ clusters.addCluster(clusterName, sourceRepoVersion.getStackId());
Cluster c = clusters.getCluster(clusterName);
// add a host component
@@ -217,113 +210,33 @@ public class UpgradeActionTest {
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- // Create the starting repo version
- m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
-
- // Start upgrading the mid repo
- m_helper.getOrCreateRepositoryVersion(midStack, midRepo);
- c.setDesiredStackVersion(midStack);
-
- // Notice that we have not yet changed the cluster current stack to the mid stack to simulate
- // the user skipping this step.
-
- m_helper.getOrCreateRepositoryVersion(targetStack, targetRepo);
- c.setDesiredStackVersion(targetStack);
-
- // Create a host version for the starting repo in INSTALLED
- HostVersionEntity entitySource = new HostVersionEntity();
- entitySource.setHostEntity(hostDAO.findByName(hostName));
- entitySource.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(sourceStack, sourceRepo));
- entitySource.setState(RepositoryVersionState.INSTALL_FAILED);
- hostVersionDAO.create(entitySource);
-
- // Create a host version for the target repo in UPGRADED
- HostVersionEntity entityTarget = new HostVersionEntity();
- entityTarget.setHostEntity(hostDAO.findByName(hostName));
- entityTarget.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo));
- entityTarget.setState(RepositoryVersionState.INSTALLED);
- hostVersionDAO.create(entityTarget);
- }
-
- private RepositoryVersionEntity createUpgradeClusterAndSourceRepo(StackId sourceStack,
- String sourceRepo,
- String hostName) throws Exception {
-
- clusters.addCluster(clusterName, sourceStack);
-
- StackEntity stackEntitySource = stackDAO.find(sourceStack.getStackName(), sourceStack.getStackVersion());
- assertNotNull(stackEntitySource);
-
- Cluster c = clusters.getCluster(clusterName);
- c.setDesiredStackVersion(sourceStack);
-
- // add a host component
- clusters.addHost(hostName);
-
- Host host = clusters.getHost(hostName);
-
- Map<String, String> hostAttributes = new HashMap<>();
- hostAttributes.put("os_family", "redhat");
- hostAttributes.put("os_release_version", "6");
- host.setHostAttributes(hostAttributes);
-
// without this, HostEntity will not have a relation to ClusterEntity
clusters.mapHostToCluster(hostName, clusterName);
- // Create the starting repo version
- sourceRepositoryVersion = m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
- sourceRepositoryVersion.setOperatingSystems("[\n" +
- " {\n" +
- " \"repositories\":[\n" +
- " {\n" +
- " \"Repositories/base_url\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.2.0.0\",\n" +
- " \"Repositories/repo_name\":\"HDP\",\n" +
- " \"Repositories/repo_id\":\"HDP-2.2\"\n" +
- " }\n" +
- " ],\n" +
- " \"OperatingSystems/os_type\":\"redhat6\"\n" +
- " }\n" +
- "]");
- repoVersionDAO.merge(sourceRepositoryVersion);
-
- return sourceRepositoryVersion;
+ HostVersionEntity entity = new HostVersionEntity(hostDAO.findByName(hostName),
+ sourceRepoVersion, RepositoryVersionState.INSTALLED);
+
+ hostVersionDAO.create(entity);
}
- private RepositoryVersionEntity createUpgradeClusterTargetRepo(StackId targetStack, String targetRepo,
- String hostName) throws AmbariException {
+ private void createHostVersions(RepositoryVersionEntity targetRepoVersion,
+ String hostName) throws AmbariException {
Cluster c = clusters.getCluster(clusterName);
- StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion());
- assertNotNull(stackEntityTarget);
-
- // Create the new repo version
- String urlInfo = "[{'repositories':["
- + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'" + targetStack.getStackId() + "-1'}"
- + "], 'OperatingSystems/os_type':'redhat6'}]";
-
- repoVersionDAO.create(stackEntityTarget, targetRepo, String.valueOf(System.currentTimeMillis()), urlInfo);
-
- // Start upgrading the newer repo
- c.setCurrentStackVersion(targetStack);
// create a single host with the UPGRADED HostVersionEntity
HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
- RepositoryVersionEntity repositoryVersionEntity = repoVersionDAO.findByStackAndVersion(
- targetStack, targetRepo);
-
HostVersionEntity entity = new HostVersionEntity(hostDAO.findByName(hostName),
- repositoryVersionEntity, RepositoryVersionState.INSTALLED);
+ targetRepoVersion, RepositoryVersionState.INSTALLED);
hostVersionDAO.create(entity);
// verify the UPGRADED host versions were created successfully
- List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion(clusterName,
- targetStack, targetRepo);
+ List<HostVersionEntity> hostVersions = hostVersionDAO.findHostVersionByClusterAndRepository(
+ c.getClusterId(), targetRepoVersion);
assertEquals(1, hostVersions.size());
assertEquals(RepositoryVersionState.INSTALLED, hostVersions.get(0).getState());
-
- return repositoryVersionEntity;
}
private void makeCrossStackUpgradeClusterAndSourceRepo(StackId sourceStack, String sourceRepo,
@@ -358,11 +271,6 @@ public class UpgradeActionTest {
StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion());
assertNotNull(stackEntityTarget);
- // Create the new repo version
- String urlInfo = "[{'repositories':["
- + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'" + targetRepo + "'}"
- + "], 'OperatingSystems/os_type':'redhat6'}]";
-
m_helper.getOrCreateRepositoryVersion(new StackId(stackEntityTarget), targetRepo);
// Start upgrading the newer repo
@@ -386,7 +294,6 @@ public class UpgradeActionTest {
StackId sourceStack = HDP_21_STACK;
StackId targetStack = HDP_22_STACK;
String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_2_0_1;
String hostName = "h1";
// Must be a NON_ROLLING upgrade that jumps stacks in order for it to apply config changes.
@@ -400,8 +307,6 @@ public class UpgradeActionTest {
Cluster cluster = clusters.getCluster(clusterName);
- createUpgrade(cluster, repositoryVersion2201);
-
// Install ZK and HDFS with some components
Service zk = installService(cluster, "ZOOKEEPER");
addServiceComponent(cluster, zk, "ZOOKEEPER_SERVER");
@@ -415,10 +320,10 @@ public class UpgradeActionTest {
createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
- makeCrossStackUpgradeTargetRepo(targetStack, targetRepo, hostName);
+ makeCrossStackUpgradeTargetRepo(targetStack, repositoryVersion2201.getVersion(), hostName);
+ createUpgrade(cluster, repositoryVersion2201);
- RepositoryVersionEntity targetRve = repoVersionDAO.findByStackNameAndVersion("HDP", targetRepo);
- Assert.assertNotNull(targetRve);
+ Assert.assertNotNull(repositoryVersion2201);
// Create some configs
createConfigs(cluster);
@@ -459,12 +364,7 @@ public class UpgradeActionTest {
@Test
public void testFinalizeDowngrade() throws Exception {
- StackId sourceStack = HDP_21_STACK;
- StackId targetStack = HDP_21_STACK;
- String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_1_1_1;
-
- makeDowngradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
+ makeDowngradeCluster(repositoryVersion2110, repositoryVersion2111);
Cluster cluster = clusters.getCluster(clusterName);
@@ -486,74 +386,25 @@ public class UpgradeActionTest {
assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
for (HostVersionEntity entity : hostVersionDAO.findByClusterAndHost(clusterName, "h1")) {
- if (entity.getRepositoryVersion().getVersion().equals(sourceRepo)) {
+ if (StringUtils.equals(entity.getRepositoryVersion().getVersion(), repositoryVersion2110.getVersion())) {
assertEquals(RepositoryVersionState.CURRENT, entity.getState());
- } else if (entity.getRepositoryVersion().getVersion().equals(targetRepo)) {
+ } else if (StringUtils.equals(entity.getRepositoryVersion().getVersion(), repositoryVersion2111.getVersion())) {
assertEquals(RepositoryVersionState.INSTALLED, entity.getState());
}
}
}
- /**
- * Test a case in which a customer performs an upgrade from HDP 2.1 to 2.2 (e.g., 2.2.0.0), but skips the step to
- * finalize, which calls "Save DB State". Therefore, the cluster's current stack is still on HDP 2.1.
- * They can still modify the database manually to mark HDP 2.2 as CURRENT in the cluster_version and then begin
- * another upgrade to 2.2.0.2 and then downgrade.
- * In the downgrade, the original stack is still 2.1 but the stack for the version marked as CURRENT is 2.2; this
- * mismatch means that the downgrade should not delete configs and will report a warning.
- * @throws Exception
- */
- @Test
- public void testFinalizeDowngradeWhenDidNotFinalizePreviousUpgrade() throws Exception {
- StackId sourceStack = HDP_21_STACK;
- StackId midStack = HDP_22_STACK;
- StackId targetStack = HDP_22_STACK;
-
- String sourceRepo = HDP_2_1_1_0;
- String midRepo = HDP_2_2_0_1;
- String targetRepo = HDP_2_2_0_2;
-
- makeTwoUpgradesWhereLastDidNotComplete(sourceStack, sourceRepo, midStack, midRepo, targetStack, targetRepo);
-
- Cluster cluster = clusters.getCluster(clusterName);
-
- createUpgrade(cluster, repositoryVersion2202);
-
- Map<String, String> commandParams = new HashMap<>();
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName(clusterName);
-
- HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
- hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
-
- finalizeUpgradeAction.setExecutionCommand(executionCommand);
- finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand);
-
- CommandReport report = finalizeUpgradeAction.execute(null);
- assertNotNull(report);
- assertEquals(HostRoleStatus.FAILED.name(), report.getStatus());
- assertTrue(report.getStdErr().contains(FinalizeUpgradeAction.PREVIOUS_UPGRADE_NOT_COMPLETED_MSG));
- }
-
@Test
public void testFinalizeUpgrade() throws Exception {
- StackId sourceStack = HDP_21_STACK;
- StackId targetStack = HDP_21_STACK;
- String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_1_1_1;
String hostName = "h1";
- createUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
- createUpgradeClusterTargetRepo(targetStack, targetRepo, hostName);
+ createUpgradeCluster(repositoryVersion2110, hostName);
+ createHostVersions(repositoryVersion2111, hostName);
Cluster cluster = clusters.getCluster(clusterName);
createUpgrade(cluster, repositoryVersion2111);
- RepositoryInfo repo = ambariMetaInfo.getRepository(sourceStack.getStackName(), sourceStack.getStackVersion(), "redhat6", sourceStack.getStackId());
- assertEquals(HDP_211_CENTOS6_REPO_URL, repo.getBaseUrl());
-
// Finalize the upgrade
Map<String, String> commandParams = new HashMap<>();
ExecutionCommand executionCommand = new ExecutionCommand();
@@ -579,14 +430,10 @@ public class UpgradeActionTest {
*/
@Test
public void testFinalizeWithHostsAlreadyCurrent() throws Exception {
- StackId sourceStack = HDP_21_STACK;
- StackId targetStack = HDP_21_STACK;
- String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_1_1_1;
String hostName = "h1";
- createUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
- createUpgradeClusterTargetRepo(targetStack, targetRepo, hostName);
+ createUpgradeCluster(repositoryVersion2110, hostName);
+ createHostVersions(repositoryVersion2111, hostName);
// move the old version from CURRENT to INSTALLED and the new version from
// UPGRADED to CURRENT - this will simulate what happens when a host is
@@ -607,10 +454,6 @@ public class UpgradeActionTest {
createUpgrade(cluster, repositoryVersion2111);
- RepositoryInfo repo = ambariMetaInfo.getRepository(sourceStack.getStackName(),
- sourceStack.getStackVersion(), "redhat6", sourceStack.getStackId());
- assertEquals(HDP_211_CENTOS6_REPO_URL, repo.getBaseUrl());
-
// Finalize the upgrade
Map<String, String> commandParams = new HashMap<>();
@@ -935,10 +778,23 @@ public class UpgradeActionTest {
upgradeEntity.setRepositoryVersion(repositoryVersion);
upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING);
- upgradeDAO.create(upgradeEntity);
+ Map<String, Service> services = cluster.getServices();
+ for (String serviceName : services.keySet()) {
+ Service service = services.get(serviceName);
+ Map<String, ServiceComponent> components = service.getServiceComponents();
+ for (String componentName : components.keySet()) {
+ UpgradeHistoryEntity history = new UpgradeHistoryEntity();
+ history.setUpgrade(upgradeEntity);
+ history.setServiceName(serviceName);
+ history.setComponentName(componentName);
+ history.setFromRepositoryVersion(service.getDesiredRepositoryVersion());
+ history.setTargetRepositoryVersion(repositoryVersion);
+ upgradeEntity.addHistory(history);
+ }
+ }
+ upgradeDAO.create(upgradeEntity);
cluster.setUpgradeEntity(upgradeEntity);
-
return upgradeEntity;
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
index f43dbd8..c6f3276 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
@@ -105,7 +105,7 @@ public class ConfigGroupTest {
configs.put(config.getType(), config);
hosts.put(host.getHostId(), host);
- ConfigGroup configGroup = configGroupFactory.createNew(cluster, "cg-test",
+ ConfigGroup configGroup = configGroupFactory.createNew(cluster, "HDFS", "cg-test",
"HDFS", "New HDFS configs for h1", configs, hosts);
cluster.addConfigGroup(configGroup);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index dd0a840..e9e5399 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -257,7 +257,7 @@ public class ConfigHelperTest {
configMap.put(config.getType(), config);
}
- ConfigGroup configGroup = configGroupFactory.createNew(cluster, name,
+ ConfigGroup configGroup = configGroupFactory.createNew(cluster, null, name,
tag, "", configMap, hostMap);
LOG.info("Config group created with tag " + tag);
configGroup.setTag(tag);