You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/05/23 21:35:20 UTC
[1/3] ambari git commit: AMBARI-21078 - Merging Configurations On
Service/Patch Upgrades Should Create New Configurations Only For Included
Services (jonathanhurley)
Repository: ambari
Updated Branches:
refs/heads/branch-feature-AMBARI-12556 a45f5427b -> c4148d805
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
index 98f5228..24c529d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
@@ -42,6 +42,7 @@ import org.apache.ambari.annotations.Experimental;
import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.H2DatabaseCleaner;
+import org.apache.ambari.server.actionmanager.HostRoleCommandFactory;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.controller.ClusterRequest;
@@ -55,6 +56,7 @@ import org.apache.ambari.server.security.authorization.AuthorizationException;
import org.apache.ambari.server.stack.HostsType;
import org.apache.ambari.server.stack.MasterHostResolver;
import org.apache.ambari.server.stack.StackManagerMock;
+import org.apache.ambari.server.stageplanner.RoleGraphFactory;
import org.apache.ambari.server.state.UpgradeHelper.UpgradeGroupHolder;
import org.apache.ambari.server.state.stack.ConfigUpgradePack;
import org.apache.ambari.server.state.stack.UpgradePack;
@@ -100,6 +102,7 @@ import com.google.inject.util.Modules;
*/
public class UpgradeHelperTest {
+ private static final StackId STACK_ID_HDP_211 = new StackId("HDP-2.1.1");
private static final StackId STACK_ID_HDP_220 = new StackId("HDP-2.2.0");
private static final String UPGRADE_VERSION = "2.2.1.0-1234";
private static final String DOWNGRADE_VERSION = "2.2.0.0-1234";
@@ -113,8 +116,8 @@ public class UpgradeHelperTest {
private ConfigHelper m_configHelper;
private AmbariManagementController m_managementController;
private Gson m_gson = new Gson();
- private UpgradeContextFactory m_upgradeContextFactory;
+ private RepositoryVersionEntity repositoryVersion2110;
private RepositoryVersionEntity repositoryVersion2200;
private RepositoryVersionEntity repositoryVersion2210;
@@ -160,8 +163,8 @@ public class UpgradeHelperTest {
m_upgradeHelper = injector.getInstance(UpgradeHelper.class);
m_masterHostResolver = EasyMock.createMock(MasterHostResolver.class);
m_managementController = injector.getInstance(AmbariManagementController.class);
- m_upgradeContextFactory = injector.getInstance(UpgradeContextFactory.class);
+ repositoryVersion2110 = helper.getOrCreateRepositoryVersion(STACK_ID_HDP_211, "2.1.1.0-1234");
repositoryVersion2200 = helper.getOrCreateRepositoryVersion(STACK_ID_HDP_220, DOWNGRADE_VERSION);
repositoryVersion2210 = helper.getOrCreateRepositoryVersion(STACK_ID_HDP_220, UPGRADE_VERSION);
@@ -294,14 +297,9 @@ public class UpgradeHelperTest {
Cluster cluster = makeCluster();
Set<String> services = Collections.singleton("ZOOKEEPER");
- UpgradeContext context = EasyMock.createNiceMock(UpgradeContext.class);
- EasyMock.expect(context.getCluster()).andReturn(cluster).anyTimes();
- EasyMock.expect(context.getType()).andReturn(UpgradeType.ROLLING).anyTimes();
- EasyMock.expect(context.getDirection()).andReturn(Direction.UPGRADE).anyTimes();
- EasyMock.expect(context.getRepositoryVersion()).andReturn(repositoryVersion2210).anyTimes();
- EasyMock.expect(context.getSupportedServices()).andReturn(services).anyTimes();
- EasyMock.expect(context.getRepositoryType()).andReturn(RepositoryType.PATCH).anyTimes();
- EasyMock.replay(context);
+
+ UpgradeContext context = getMockUpgradeContext(cluster, Direction.UPGRADE, UpgradeType.ROLLING,
+ repositoryVersion2210, RepositoryType.PATCH, services);
List<Grouping> groupings = upgrade.getGroups(Direction.UPGRADE);
assertEquals(8, groupings.size());
@@ -460,7 +458,7 @@ public class UpgradeHelperTest {
UpgradeType.ROLLING, repositoryVersion2210);
// use a "real" master host resolver here so that we can actually test MM
- MasterHostResolver masterHostResolver = new MasterHostResolver(null, context);
+ MasterHostResolver masterHostResolver = new MasterHostResolver(cluster, null, context);
EasyMock.expect(context.getResolver()).andReturn(masterHostResolver).anyTimes();
replay(context);
@@ -1525,14 +1523,10 @@ public class UpgradeHelperTest {
String clusterName = "c1";
- String version = "2.1.1.0-1234";
StackId stackId = new StackId("HDP-2.1.1");
clusters.addCluster(clusterName, stackId);
Cluster c = clusters.getCluster(clusterName);
- RepositoryVersionEntity repositoryVersion211 = helper.getOrCreateRepositoryVersion(stackId,
- version);
-
for (int i = 0; i < 2; i++) {
String hostName = "h" + (i+1);
clusters.addHost(hostName);
@@ -1548,24 +1542,24 @@ public class UpgradeHelperTest {
}
// !!! add services
- c.addService(serviceFactory.createNew(c, "ZOOKEEPER", repositoryVersion211));
+ c.addService(serviceFactory.createNew(c, "ZOOKEEPER", repositoryVersion2110));
Service s = c.getService("ZOOKEEPER");
ServiceComponent sc = s.addServiceComponent("ZOOKEEPER_SERVER");
ServiceComponentHost sch1 = sc.addServiceComponentHost("h1");
- sch1.setVersion(repositoryVersion211.getVersion());
+ sch1.setVersion(repositoryVersion2110.getVersion());
ServiceComponentHost sch2 = sc.addServiceComponentHost("h2");
- sch2.setVersion(repositoryVersion211.getVersion());
+ sch2.setVersion(repositoryVersion2110.getVersion());
List<ServiceComponentHost> schs = c.getServiceComponentHosts("ZOOKEEPER", "ZOOKEEPER_SERVER");
assertEquals(2, schs.size());
UpgradeContext context = getMockUpgradeContextNoReplay(c, Direction.UPGRADE,
- UpgradeType.HOST_ORDERED, repositoryVersion211);
+ UpgradeType.HOST_ORDERED, repositoryVersion2110);
- MasterHostResolver resolver = new MasterHostResolver(m_configHelper, context);
+ MasterHostResolver resolver = new MasterHostResolver(c, m_configHelper, context);
EasyMock.expect(context.getResolver()).andReturn(resolver).anyTimes();
replay(context);
@@ -1639,7 +1633,7 @@ public class UpgradeHelperTest {
UpgradeType.NON_ROLLING, repositoryVersion211);
// use a "real" master host resolver here so that we can actually test MM
- MasterHostResolver mhr = new MockMasterHostResolver(m_configHelper, context);
+ MasterHostResolver mhr = new MockMasterHostResolver(c, m_configHelper, context);
EasyMock.expect(context.getResolver()).andReturn(mhr).anyTimes();
replay(context);
@@ -1708,7 +1702,7 @@ public class UpgradeHelperTest {
UpgradeType.NON_ROLLING, repositoryVersion211);
// use a "real" master host resolver here so that we can actually test MM
- MasterHostResolver mhr = new BadMasterHostResolver(m_configHelper, context);
+ MasterHostResolver mhr = new BadMasterHostResolver(c, m_configHelper, context);
EasyMock.expect(context.getResolver()).andReturn(mhr).anyTimes();
replay(context);
@@ -1846,7 +1840,7 @@ public class UpgradeHelperTest {
UpgradeType.NON_ROLLING, repoVersion220);
// use a "real" master host resolver here so that we can actually test MM
- MasterHostResolver masterHostResolver = new MasterHostResolver(m_configHelper, context);
+ MasterHostResolver masterHostResolver = new MasterHostResolver(c, m_configHelper, context);
EasyMock.expect(context.getResolver()).andReturn(masterHostResolver).anyTimes();
replay(context);
@@ -1862,7 +1856,7 @@ public class UpgradeHelperTest {
repoVersion211);
// use a "real" master host resolver here so that we can actually test MM
- masterHostResolver = new MasterHostResolver(m_configHelper, context);
+ masterHostResolver = new MasterHostResolver(c, m_configHelper, context);
EasyMock.expect(context.getResolver()).andReturn(masterHostResolver).anyTimes();
replay(context);
@@ -2129,7 +2123,7 @@ public class UpgradeHelperTest {
UpgradeContext context = getMockUpgradeContextNoReplay(c, Direction.UPGRADE,
UpgradeType.HOST_ORDERED, repoVersion220);
- MasterHostResolver resolver = new MasterHostResolver(m_configHelper, context);
+ MasterHostResolver resolver = new MasterHostResolver(c, m_configHelper, context);
EasyMock.expect(context.getResolver()).andReturn(resolver).anyTimes();
replay(context);
@@ -2173,7 +2167,7 @@ public class UpgradeHelperTest {
context = getMockUpgradeContextNoReplay(c, Direction.DOWNGRADE, UpgradeType.HOST_ORDERED,
repoVersion211);
- resolver = new MasterHostResolver(m_configHelper, context);
+ resolver = new MasterHostResolver(c, m_configHelper, context);
EasyMock.expect(context.getResolver()).andReturn(resolver).anyTimes();
replay(context);
@@ -2190,7 +2184,7 @@ public class UpgradeHelperTest {
context = getMockUpgradeContextNoReplay(c, Direction.DOWNGRADE, UpgradeType.HOST_ORDERED,
repoVersion211);
- resolver = new MasterHostResolver(m_configHelper, context);
+ resolver = new MasterHostResolver(c, m_configHelper, context);
EasyMock.expect(context.getResolver()).andReturn(resolver).anyTimes();
replay(context);
@@ -2281,7 +2275,7 @@ public class UpgradeHelperTest {
UpgradeType type, RepositoryVersionEntity repositoryVersion, RepositoryType repositoryType,
Set<String> services) {
return getMockUpgradeContext(cluster, direction, type, repositoryVersion,
- repositoryType, services, m_masterHostResolver);
+ repositoryType, services, m_masterHostResolver, true);
}
/**
@@ -2294,15 +2288,8 @@ public class UpgradeHelperTest {
UpgradeType type, RepositoryVersionEntity repositoryVersion) {
Set<String> allServices = cluster.getServices().keySet();
- UpgradeContext context = EasyMock.createNiceMock(UpgradeContext.class);
- EasyMock.expect(context.getCluster()).andReturn(cluster).anyTimes();
- EasyMock.expect(context.getType()).andReturn(type).anyTimes();
- EasyMock.expect(context.getDirection()).andReturn(direction).anyTimes();
- EasyMock.expect(context.getRepositoryVersion()).andReturn(repositoryVersion).anyTimes();
- EasyMock.expect(context.getSupportedServices()).andReturn(allServices).anyTimes();
- EasyMock.expect(context.getRepositoryType()).andReturn(RepositoryType.STANDARD).anyTimes();
- EasyMock.expect(context.isScoped(EasyMock.anyObject(UpgradeScope.class))).andReturn(true).anyTimes();
- return context;
+ return getMockUpgradeContext(cluster, direction, type, repositoryVersion,
+ RepositoryType.STANDARD, allServices, null, false);
}
/**
@@ -2314,8 +2301,8 @@ public class UpgradeHelperTest {
* @return
*/
private UpgradeContext getMockUpgradeContext(Cluster cluster, Direction direction,
- UpgradeType type, RepositoryVersionEntity repositoryVersion, RepositoryType repositoryType,
- Set<String> services, MasterHostResolver resolver) {
+ UpgradeType type, RepositoryVersionEntity repositoryVersion, final RepositoryType repositoryType,
+ Set<String> services, MasterHostResolver resolver, boolean replay) {
UpgradeContext context = EasyMock.createNiceMock(UpgradeContext.class);
EasyMock.expect(context.getCluster()).andReturn(cluster).anyTimes();
EasyMock.expect(context.getType()).andReturn(type).anyTimes();
@@ -2323,9 +2310,14 @@ public class UpgradeHelperTest {
EasyMock.expect(context.getRepositoryVersion()).andReturn(repositoryVersion).anyTimes();
EasyMock.expect(context.getSupportedServices()).andReturn(services).anyTimes();
EasyMock.expect(context.getRepositoryType()).andReturn(repositoryType).anyTimes();
- EasyMock.expect(context.getResolver()).andReturn(resolver).anyTimes();
- EasyMock.expect(context.isScoped(EasyMock.anyObject(UpgradeScope.class))).andReturn(true).anyTimes();
EasyMock.expect(context.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+ EasyMock.expect(context.getHostRoleCommandFactory()).andStubReturn(injector.getInstance(HostRoleCommandFactory.class));
+ EasyMock.expect(context.getRoleGraphFactory()).andStubReturn(injector.getInstance(RoleGraphFactory.class));
+
+ // only set this if supplied
+ if (null != resolver) {
+ EasyMock.expect(context.getResolver()).andReturn(resolver).anyTimes();
+ }
final Map<String, RepositoryVersionEntity> targetRepositoryVersions = new HashMap<>();
for( String serviceName : services ){
@@ -2353,8 +2345,6 @@ public class UpgradeHelperTest {
final Map<String, String> serviceNames = new HashMap<>();
-
-
final Capture<String> serviceDisplayNameArg1 = EasyMock.newCapture();
final Capture<String> serviceDisplayNameArg2 = EasyMock.newCapture();
@@ -2408,7 +2398,28 @@ public class UpgradeHelperTest {
}
}).anyTimes();
- replay(context);
+ final Capture<UpgradeScope> isScopedCapture = EasyMock.newCapture();
+ EasyMock.expect(context.isScoped(EasyMock.capture(isScopedCapture))).andStubAnswer(
+ new IAnswer<Boolean>() {
+ @Override
+ public Boolean answer() throws Throwable {
+ UpgradeScope scope = isScopedCapture.getValue();
+ if (scope == UpgradeScope.ANY) {
+ return true;
+ }
+
+ if (scope == UpgradeScope.PARTIAL) {
+ return repositoryType != RepositoryType.STANDARD;
+ }
+
+ return repositoryType == RepositoryType.STANDARD;
+ }
+ });
+
+ if (replay) {
+ replay(context);
+ }
+
return context;
}
@@ -2418,8 +2429,8 @@ public class UpgradeHelperTest {
*/
private class MockMasterHostResolver extends MasterHostResolver {
- public MockMasterHostResolver(ConfigHelper configHelper, UpgradeContext context) {
- super(configHelper, context);
+ public MockMasterHostResolver(Cluster cluster, ConfigHelper configHelper, UpgradeContext context) {
+ super(cluster, configHelper, context);
}
/**
@@ -2461,8 +2472,8 @@ public class UpgradeHelperTest {
private static class BadMasterHostResolver extends MasterHostResolver {
- public BadMasterHostResolver(ConfigHelper configHelper, UpgradeContext context) {
- super(configHelper, context);
+ public BadMasterHostResolver(Cluster cluster, ConfigHelper configHelper, UpgradeContext context) {
+ super(cluster, configHelper, context);
}
@Override
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 38c9d1c..8c4cb93 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -1293,6 +1293,9 @@ public class ClusterTest {
public void testServiceConfigVersionsForGroups() throws Exception {
createDefaultCluster();
+ RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(c1);
+ c1.addService("HDFS", repositoryVersion);
+
Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<String, Map<String,String>>());
@@ -1310,7 +1313,7 @@ public class ClusterTest {
new HashMap<String, String>() {{ put("a", "c"); }}, new HashMap<String, Map<String,String>>());
ConfigGroup configGroup =
- configGroupFactory.createNew(c1, "test group", "HDFS", "descr", Collections.singletonMap("hdfs-site", config2),
+ configGroupFactory.createNew(c1, "HDFS", "test group", "HDFS", "descr", Collections.singletonMap("hdfs-site", config2),
Collections.<Long, Host>emptyMap());
c1.addConfigGroup(configGroup);
@@ -1362,7 +1365,7 @@ public class ClusterTest {
Collections.singletonMap("a", "b"), null);
ConfigGroup configGroup2 =
- configGroupFactory.createNew(c1, "test group 2", "HDFS", "descr",
+ configGroupFactory.createNew(c1, "HDFS", "test group 2", "HDFS", "descr",
new HashMap<>(Collections.singletonMap("hdfs-site", config4)),
Collections.<Long, Host>emptyMap());
@@ -1397,7 +1400,7 @@ public class ClusterTest {
Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", "version2",
ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
- ConfigGroup configGroup = configGroupFactory.createNew(c1, "configGroup1", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), ImmutableMap.<Long, Host>of());
+ ConfigGroup configGroup = configGroupFactory.createNew(c1, "HDFS", "configGroup1", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), ImmutableMap.<Long, Host>of());
c1.addConfigGroup(configGroup);
ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
@@ -1456,7 +1459,7 @@ public class ClusterTest {
Config hdfsSiteConfigV2 = configFactory.createNew(c1, "hdfs-site", "version2",
ImmutableMap.of("p1", "v2"), ImmutableMap.<String, Map<String,String>>of());
- ConfigGroup configGroup = configGroupFactory.createNew(c1, "configGroup1", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), ImmutableMap.<Long, Host>of());
+ ConfigGroup configGroup = configGroupFactory.createNew(c1, "HDFS", "configGroup1", "version1", "test description", ImmutableMap.of(hdfsSiteConfigV2.getType(), hdfsSiteConfigV2), ImmutableMap.<Long, Host>of());
c1.addConfigGroup(configGroup);
ServiceConfigVersionResponse hdfsSiteConfigResponseV2 = c1.createServiceConfigVersion("HDFS", "admin", "test note", configGroup);
@@ -1934,7 +1937,7 @@ public class ClusterTest {
}
}, new HashMap<String, Map<String, String>>());
- ConfigGroup configGroup = configGroupFactory.createNew(cluster, "g1", "t1", "",
+ ConfigGroup configGroup = configGroupFactory.createNew(cluster, "HDFS", "g1", "t1", "",
new HashMap<String, Config>() {
{
put("foo-site", originalConfig);
@@ -1974,25 +1977,31 @@ public class ClusterTest {
}
/**
- * Tests that {@link Cluster#applyLatestConfigurations(StackId)} sets the
+ * Tests that {@link Cluster#applyLatestConfigurations(StackId, String)} sets the
* right configs to enabled.
*
* @throws Exception
*/
@Test
public void testApplyLatestConfigurations() throws Exception {
- createDefaultCluster();
+ StackId stackId = new StackId("HDP-2.0.6");
+ StackId newStackId = new StackId("HDP-2.2.0");
+ createDefaultCluster(Sets.newHashSet("host-1"), stackId);
+
Cluster cluster = clusters.getCluster("c1");
ClusterEntity clusterEntity = clusterDAO.findByName("c1");
- StackId stackId = cluster.getCurrentStackVersion();
- StackId newStackId = new StackId("HDP-2.0.6");
+ RepositoryVersionEntity repoVersion220 = helper.getOrCreateRepositoryVersion(newStackId, "2.2.0-1234");
StackEntity currentStack = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
StackEntity newStack = stackDAO.find(newStackId.getStackName(), newStackId.getStackVersion());
- Assert.assertFalse( stackId.equals(newStackId) );
+ Assert.assertFalse(stackId.equals(newStackId));
- String configType = "foo-type";
+ // add a service
+ String serviceName = "ZOOKEEPER";
+ RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(c1);
+ Service service = cluster.addService(serviceName, repositoryVersion);
+ String configType = "zoo.cfg";
ClusterConfigEntity clusterConfig1 = new ClusterConfigEntity();
clusterConfig1.setClusterEntity(clusterEntity);
@@ -2009,6 +2018,8 @@ public class ClusterTest {
clusterEntity.getClusterConfigEntities().add(clusterConfig1);
clusterEntity = clusterDAO.merge(clusterEntity);
+ cluster.createServiceConfigVersion(serviceName, "", "version-1", null);
+
ClusterConfigEntity clusterConfig2 = new ClusterConfigEntity();
clusterConfig2.setClusterEntity(clusterEntity);
clusterConfig2.setConfigId(2L);
@@ -2024,6 +2035,11 @@ public class ClusterTest {
clusterEntity.getClusterConfigEntities().add(clusterConfig2);
clusterEntity = clusterDAO.merge(clusterEntity);
+ // before creating the new service config version, we need to push the
+ // service's desired repository forward
+ service.setDesiredRepositoryVersion(repoVersion220);
+ cluster.createServiceConfigVersion(serviceName, "", "version-2", null);
+
// check that the original config is enabled
Collection<ClusterConfigEntity> clusterConfigs = clusterEntity.getClusterConfigEntities();
Assert.assertEquals(2, clusterConfigs.size());
@@ -2035,7 +2051,7 @@ public class ClusterTest {
}
}
- cluster.applyLatestConfigurations(newStackId);
+ cluster.applyLatestConfigurations(newStackId, serviceName);
clusterEntity = clusterDAO.findByName("c1");
// now check that the new config is enabled
@@ -2059,18 +2075,24 @@ public class ClusterTest {
*/
@Test
public void testApplyLatestConfigurationsToPreviousStack() throws Exception {
- createDefaultCluster();
+ StackId stackId = new StackId("HDP-2.0.6");
+ StackId newStackId = new StackId("HDP-2.2.0");
+ createDefaultCluster(Sets.newHashSet("host-1"), stackId);
+
Cluster cluster = clusters.getCluster("c1");
ClusterEntity clusterEntity = clusterDAO.findByName("c1");
- StackId stackId = cluster.getCurrentStackVersion();
- StackId newStackId = new StackId("HDP-2.0.6");
+ RepositoryVersionEntity repoVersion220 = helper.getOrCreateRepositoryVersion(newStackId, "2.2.0-1234");
StackEntity currentStack = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
StackEntity newStack = stackDAO.find(newStackId.getStackName(), newStackId.getStackVersion());
Assert.assertFalse(stackId.equals(newStackId));
- String configType = "foo-type";
+ // add a service
+ String serviceName = "ZOOKEEPER";
+ RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(c1);
+ Service service = cluster.addService(serviceName, repositoryVersion);
+ String configType = "zoo.cfg";
// create 5 configurations in the current stack
for (int i = 1; i <= 5; i++) {
@@ -2100,6 +2122,9 @@ public class ClusterTest {
// save them all
clusterEntity = clusterDAO.merge(clusterEntity);
+ // create a service configuration for them
+ cluster.createServiceConfigVersion(serviceName, "", "version-1", null);
+
// create a new configuration in the new stack and enable it
ClusterConfigEntity clusterConfigNewStack = new ClusterConfigEntity();
clusterConfigNewStack.setClusterEntity(clusterEntity);
@@ -2116,6 +2141,11 @@ public class ClusterTest {
clusterEntity.getClusterConfigEntities().add(clusterConfigNewStack);
clusterEntity = clusterDAO.merge(clusterEntity);
+ // before creating the new service config version, we need to push the
+ // service's desired repository forward
+ service.setDesiredRepositoryVersion(repoVersion220);
+ cluster.createServiceConfigVersion(serviceName, "", "version-2", null);
+
// check that only the newest configuration is enabled
ClusterConfigEntity clusterConfig = clusterDAO.findEnabledConfigByType(
clusterEntity.getClusterId(), configType);
@@ -2123,7 +2153,7 @@ public class ClusterTest {
Assert.assertEquals(clusterConfigNewStack.getTag(), clusterConfig.getTag());
// move back to the original stack
- cluster.applyLatestConfigurations(stackId);
+ cluster.applyLatestConfigurations(stackId, serviceName);
clusterEntity = clusterDAO.findByName("c1");
// now check that latest config from the original stack is enabled
@@ -2138,65 +2168,73 @@ public class ClusterTest {
*/
@Test
public void testDesiredConfigurationsAfterApplyingLatestForStack() throws Exception {
- createDefaultCluster();
- Cluster cluster = clusters.getCluster("c1");
- StackId stackId = cluster.getCurrentStackVersion();
+ StackId stackId = new StackId("HDP-2.0.6");
StackId newStackId = new StackId("HDP-2.2.0");
+ createDefaultCluster(Sets.newHashSet("host-1"), stackId);
+
+ Cluster cluster = clusters.getCluster("c1");
+ RepositoryVersionEntity repoVersion220 = helper.getOrCreateRepositoryVersion(newStackId, "2.2.0-1234");
ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
// make sure the stacks are different
Assert.assertFalse(stackId.equals(newStackId));
+ // add a service
+ String serviceName = "ZOOKEEPER";
+ RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(c1);
+ Service service = cluster.addService(serviceName, repositoryVersion);
+ String configType = "zoo.cfg";
+
Map<String, String> properties = new HashMap<>();
Map<String, Map<String, String>> propertiesAttributes = new HashMap<>();
- // foo-type for v1 on current stack
+ // config for v1 on current stack
properties.put("foo-property-1", "foo-value-1");
- Config c1 = configFactory.createNew(cluster, "foo-type", "version-1", properties, propertiesAttributes);
+ Config c1 = configFactory.createNew(stackId, cluster, configType, "version-1", properties, propertiesAttributes);
// make v1 "current"
cluster.addDesiredConfig("admin", Sets.newHashSet(c1), "note-1");
- // bump the stack
- cluster.setDesiredStackVersion(newStackId);
+ // bump the repo version
+ service.setDesiredRepositoryVersion(repoVersion220);
// save v2
- // foo-type for v2 on new stack
+ // config for v2 on new stack
properties.put("foo-property-2", "foo-value-2");
- Config c2 = configFactory.createNew(cluster, "foo-type", "version-2", properties, propertiesAttributes);
+ Config c2 = configFactory.createNew(newStackId, cluster, configType, "version-2", properties, propertiesAttributes);
// make v2 "current"
cluster.addDesiredConfig("admin", Sets.newHashSet(c2), "note-2");
// check desired config
Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
- DesiredConfig desiredConfig = desiredConfigs.get("foo-type");
- desiredConfig = desiredConfigs.get("foo-type");
+ DesiredConfig desiredConfig = desiredConfigs.get(configType);
+ desiredConfig = desiredConfigs.get(configType);
assertNotNull(desiredConfig);
assertEquals(Long.valueOf(2), desiredConfig.getVersion());
assertEquals("version-2", desiredConfig.getTag());
String hostName = cluster.getHosts().iterator().next().getHostName();
- // {foo-type={tag=version-2}}
+ // {config-type={tag=version-2}}
Map<String, Map<String, String>> effectiveDesiredTags = configHelper.getEffectiveDesiredTags(
cluster, hostName);
- assertEquals("version-2", effectiveDesiredTags.get("foo-type").get("tag"));
+ assertEquals("version-2", effectiveDesiredTags.get(configType).get("tag"));
- // move the stack back to the old stack
- cluster.setDesiredStackVersion(stackId);
+ // move the service back to the old repo version / stack
+ service.setDesiredRepositoryVersion(repositoryVersion);
// apply the configs for the old stack
- cluster.applyLatestConfigurations(stackId);
+ cluster.applyLatestConfigurations(stackId, serviceName);
- // {foo-type={tag=version-1}}
+ // {config-type={tag=version-1}}
effectiveDesiredTags = configHelper.getEffectiveDesiredTags(cluster, hostName);
- assertEquals("version-1", effectiveDesiredTags.get("foo-type").get("tag"));
+ assertEquals("version-1", effectiveDesiredTags.get(configType).get("tag"));
desiredConfigs = cluster.getDesiredConfigs();
- desiredConfig = desiredConfigs.get("foo-type");
+ desiredConfig = desiredConfigs.get(configType);
assertNotNull(desiredConfig);
assertEquals(Long.valueOf(1), desiredConfig.getVersion());
assertEquals("version-1", desiredConfig.getTag());
@@ -2209,18 +2247,24 @@ public class ClusterTest {
*/
@Test
public void testRemoveConfigurations() throws Exception {
- createDefaultCluster();
+ StackId stackId = new StackId("HDP-2.0.6");
+ StackId newStackId = new StackId("HDP-2.2.0");
+ createDefaultCluster(Sets.newHashSet("host-1"), stackId);
+
Cluster cluster = clusters.getCluster("c1");
ClusterEntity clusterEntity = clusterDAO.findByName("c1");
- StackId stackId = cluster.getCurrentStackVersion();
- StackId newStackId = new StackId("HDP-2.0.6");
+ RepositoryVersionEntity repoVersion220 = helper.getOrCreateRepositoryVersion(newStackId, "2.2.0-1234");
StackEntity currentStack = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
StackEntity newStack = stackDAO.find(newStackId.getStackName(), newStackId.getStackVersion());
Assert.assertFalse(stackId.equals(newStackId));
- String configType = "foo-type";
+ // add a service
+ String serviceName = "ZOOKEEPER";
+ RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(c1);
+ Service service = cluster.addService(serviceName, repositoryVersion);
+ String configType = "zoo.cfg";
ClusterConfigEntity clusterConfig = new ClusterConfigEntity();
clusterConfig.setClusterEntity(clusterEntity);
@@ -2237,6 +2281,13 @@ public class ClusterTest {
clusterEntity.getClusterConfigEntities().add(clusterConfig);
clusterEntity = clusterDAO.merge(clusterEntity);
+ // create the service version association
+ cluster.createServiceConfigVersion(serviceName, "", "version-1", null);
+
+ // now un-select it and create a new config
+ clusterConfig.setSelected(false);
+ clusterConfig = clusterDAO.merge(clusterConfig);
+
ClusterConfigEntity newClusterConfig = new ClusterConfigEntity();
newClusterConfig.setClusterEntity(clusterEntity);
newClusterConfig.setConfigId(2L);
@@ -2246,12 +2297,19 @@ public class ClusterTest {
newClusterConfig.setType(configType);
newClusterConfig.setTimestamp(2L);
newClusterConfig.setVersion(2L);
- newClusterConfig.setSelected(false);
+ newClusterConfig.setSelected(true);
clusterDAO.createConfig(newClusterConfig);
clusterEntity.getClusterConfigEntities().add(newClusterConfig);
clusterEntity = clusterDAO.merge(clusterEntity);
+ // before creating the new service config version, we need to push the
+ // service's desired repository forward
+ service.setDesiredRepositoryVersion(repoVersion220);
+ cluster.createServiceConfigVersion(serviceName, "", "version-2", null);
+
+ cluster.applyLatestConfigurations(newStackId, serviceName);
+
// get back the cluster configs for the new stack
List<ClusterConfigEntity> clusterConfigs = clusterDAO.getAllConfigurations(
cluster.getClusterId(), newStackId);
@@ -2259,7 +2317,7 @@ public class ClusterTest {
Assert.assertEquals(1, clusterConfigs.size());
// remove the configs
- cluster.removeConfigurations(newStackId);
+ cluster.removeConfigurations(newStackId, serviceName);
clusterConfigs = clusterDAO.getAllConfigurations(cluster.getClusterId(), newStackId);
Assert.assertEquals(0, clusterConfigs.size());
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
index b8c0e7c..c851419 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
@@ -545,7 +545,7 @@ public class ServiceComponentHostTest {
Cluster cluster = clusters.getCluster(clusterName);
- final ConfigGroup configGroup = configGroupFactory.createNew(cluster,
+ final ConfigGroup configGroup = configGroupFactory.createNew(cluster, "HDFS",
"cg1", "t1", "", new HashMap<String, Config>(), new HashMap<Long, Host>());
cluster.addConfigGroup(configGroup);
@@ -799,7 +799,7 @@ public class ServiceComponentHostTest {
new HashMap<String, Map<String,String>>());
host.addDesiredConfig(cluster.getClusterId(), true, "user", c);
- ConfigGroup configGroup = configGroupFactory.createNew(cluster, "g1",
+ ConfigGroup configGroup = configGroupFactory.createNew(cluster, "HDFS", "g1",
"t1", "", new HashMap<String, Config>() {{ put("hdfs-site", c); }},
new HashMap<Long, Host>() {{ put(hostEntity.getHostId(), host); }});
cluster.addConfigGroup(configGroup);
@@ -855,7 +855,7 @@ public class ServiceComponentHostTest {
final Config c1 = configFactory.createNew(cluster, "core-site", "version2",
new HashMap<String, String>() {{ put("fs.trash.interval", "400"); }},
new HashMap<String, Map<String,String>>());
- configGroup = configGroupFactory.createNew(cluster, "g2",
+ configGroup = configGroupFactory.createNew(cluster, "HDFS", "g2",
"t2", "", new HashMap<String, Config>() {{ put("core-site", c1); }},
new HashMap<Long, Host>() {{ put(hostEntity.getHostId(), host); }});
cluster.addConfigGroup(configGroup);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalogTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalogTest.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalogTest.java
index 26df0d2..066ec34 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalogTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalogTest.java
@@ -131,7 +131,7 @@ public class AbstractUpgradeCatalogTest {
mergedProperties.put("prop1", "v1-old");
mergedProperties.put("prop4", "v4");
- expect(amc.createConfig(anyObject(StackId.class), eq(cluster), eq("hdfs-site"), eq(mergedProperties), anyString(), eq(tags))).andReturn(null);
+ expect(amc.createConfig(eq(cluster), anyObject(StackId.class), eq("hdfs-site"), eq(mergedProperties), anyString(), eq(tags))).andReturn(null);
replay(injector, configHelper, amc, cluster, clusters, serviceInfo, oldConfig);
@@ -153,7 +153,7 @@ public class AbstractUpgradeCatalogTest {
mergedProperties.put("prop2", "v2");
mergedProperties.put("prop3", "v3-old");
- expect(amc.createConfig(anyObject(StackId.class), eq(cluster), eq("hdfs-site"), eq(mergedProperties), anyString(), eq(tags))).andReturn(null);
+ expect(amc.createConfig(eq(cluster), anyObject(StackId.class), eq("hdfs-site"), eq(mergedProperties), anyString(), eq(tags))).andReturn(null);
replay(injector, configHelper, amc, cluster, clusters, serviceInfo, oldConfig);
@@ -172,7 +172,7 @@ public class AbstractUpgradeCatalogTest {
Map<String, String> mergedProperties = new HashMap<>();
mergedProperties.put("prop1", "v1-old");
- expect(amc.createConfig(anyObject(StackId.class), eq(cluster), eq("hdfs-site"), eq(mergedProperties), anyString(), eq(tags))).andReturn(null);
+ expect(amc.createConfig(eq(cluster), anyObject(StackId.class), eq("hdfs-site"), eq(mergedProperties), anyString(), eq(tags))).andReturn(null);
replay(injector, configHelper, amc, cluster, clusters, serviceInfo, oldConfig);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
index a8f5f62..7218578 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
@@ -517,7 +517,7 @@ public class UpgradeCatalog210Test {
expect(mockClusterExpected.getDesiredConfigByType("hive-site")).andReturn(mockHiveSite).atLeastOnce();
expect(mockHiveSite.getProperties()).andReturn(propertiesExpectedHiveSite).anyTimes();
expect(mockClusterExpected.getServices()).andReturn(servicesExpected).atLeastOnce();
- expect(mockAmbariManagementController.createConfig(anyObject(StackId.class), (Cluster)anyObject(),
+ expect(mockAmbariManagementController.createConfig((Cluster)anyObject(), anyObject(StackId.class),
anyString(),
capture(configCreation),
anyString(),
@@ -601,7 +601,7 @@ public class UpgradeCatalog210Test {
expect(mockHiveSite.getProperties()).andReturn(propertiesExpectedHiveSite).anyTimes();
expect(mockHivePluginProperies.getProperties()).andReturn(propertiesExpectedPluginProperies).anyTimes();
expect(mockClusterExpected.getServices()).andReturn(servicesExpected).atLeastOnce();
- expect(mockAmbariManagementController.createConfig(anyObject(StackId.class), (Cluster) anyObject(),
+ expect(mockAmbariManagementController.createConfig((Cluster) anyObject(), anyObject(StackId.class),
anyString(),
capture(configCreation),
anyString(),
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog211Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog211Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog211Test.java
index f2e9974..14fb598 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog211Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog211Test.java
@@ -269,7 +269,7 @@ public class UpgradeCatalog211Test extends EasyMockSupport {
Capture<Map<String, Map<String, String>>> attributesCapture = newCapture();
- expect(controller.createConfig(anyObject(StackId.class), capture(clusterCapture), capture(typeCapture),
+ expect(controller.createConfig(capture(clusterCapture), anyObject(StackId.class),capture(typeCapture),
capture(propertiesCapture), capture(tagCapture), capture(attributesCapture) ))
.andReturn(createNiceMock(Config.class))
.once();
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
index 4c9f661..1c3d34b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
@@ -605,7 +605,7 @@ public class UpgradeCatalog220Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
@@ -666,7 +666,7 @@ public class UpgradeCatalog220Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java
index 102c629..ff859f0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java
@@ -455,7 +455,7 @@ public class UpgradeCatalog221Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).anyTimes();
replay(controller, injector2);
@@ -511,7 +511,7 @@ public class UpgradeCatalog221Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog222Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog222Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog222Test.java
index ba2cf79..9611334 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog222Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog222Test.java
@@ -553,7 +553,7 @@ public class UpgradeCatalog222Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
@@ -612,7 +612,7 @@ public class UpgradeCatalog222Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
index f4903fe..46ce2d5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
@@ -765,9 +765,9 @@ public class UpgradeCatalog240Test {
Capture<Map<String, String>> oozieCapture = newCapture();
Capture<Map<String, String>> hiveCapture = newCapture();
- expect(mockAmbariManagementController.createConfig(anyObject(StackId.class), eq(mockClusterExpected), eq("oozie-env"),
+ expect(mockAmbariManagementController.createConfig(eq(mockClusterExpected), anyObject(StackId.class), eq("oozie-env"),
capture(oozieCapture), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
- expect(mockAmbariManagementController.createConfig(anyObject(StackId.class), eq(mockClusterExpected), eq("hive-env"),
+ expect(mockAmbariManagementController.createConfig(eq(mockClusterExpected), anyObject(StackId.class), eq("hive-env"),
capture(hiveCapture), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
easyMockSupport.replayAll();
@@ -849,15 +849,15 @@ public class UpgradeCatalog240Test {
expect(falconStartupConfig.getProperties()).andReturn(falconStartupConfigProperties).anyTimes();
Capture<Map<String, String>> falconCapture = newCapture();
- expect(mockAmbariManagementController.createConfig(anyObject(StackId.class), eq(mockClusterExpected), eq("falcon-env"),
+ expect(mockAmbariManagementController.createConfig(eq(mockClusterExpected), anyObject(StackId.class), eq("falcon-env"),
capture(falconCapture), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
Capture<Map<String, String>> falconCapture2 = newCapture();
- expect(mockAmbariManagementController.createConfig(anyObject(StackId.class), eq(mockClusterExpected), eq("falcon-env"),
+ expect(mockAmbariManagementController.createConfig(eq(mockClusterExpected), anyObject(StackId.class), eq("falcon-env"),
capture(falconCapture2), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
Capture<Map<String, String>> falconStartupCapture = newCapture();
- expect(mockAmbariManagementController.createConfig(anyObject(StackId.class), eq(mockClusterExpected), eq("falcon-startup.properties"),
+ expect(mockAmbariManagementController.createConfig(eq(mockClusterExpected), anyObject(StackId.class), eq("falcon-startup.properties"),
capture(falconStartupCapture), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
easyMockSupport.replayAll();
@@ -939,7 +939,7 @@ public class UpgradeCatalog240Test {
Capture<Map<String, String>> hbaseCapture = newCapture();
- expect(mockAmbariManagementController.createConfig(anyObject(StackId.class), eq(mockCluster), eq("hbase-site"),
+ expect(mockAmbariManagementController.createConfig(eq(mockCluster), anyObject(StackId.class), eq("hbase-site"),
capture(hbaseCapture), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
easyMockSupport.replayAll();
@@ -1025,7 +1025,7 @@ public class UpgradeCatalog240Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
@@ -1101,9 +1101,9 @@ public class UpgradeCatalog240Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), eq("hdfs-site"), capture(propertiesCaptureHdfsSite), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), eq("hdfs-site"), capture(propertiesCaptureHdfsSite), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), eq("hadoop-env"), capture(propertiesCaptureHadoopEnv), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), eq("hadoop-env"), capture(propertiesCaptureHadoopEnv), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
@@ -1169,7 +1169,7 @@ public class UpgradeCatalog240Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
@@ -1301,9 +1301,9 @@ public class UpgradeCatalog240Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), eq("spark-defaults"), capture(propertiesSparkDefaultsCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), eq("spark-defaults"), capture(propertiesSparkDefaultsCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), eq("spark-javaopts-properties"), capture(propertiesSparkJavaOptsCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), eq("spark-javaopts-properties"), capture(propertiesSparkJavaOptsCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
@@ -1362,7 +1362,7 @@ public class UpgradeCatalog240Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
@@ -1421,7 +1421,7 @@ public class UpgradeCatalog240Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
@@ -1478,7 +1478,7 @@ public class UpgradeCatalog240Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
@@ -1583,7 +1583,7 @@ public class UpgradeCatalog240Test {
Capture<String> tagCapture = newCapture(CaptureType.ALL);
Capture<Map<String, Map<String, String>>> attributesCapture = newCapture(CaptureType.ALL);
- expect(controller.createConfig(anyObject(StackId.class), capture(clusterCapture), capture(typeCapture),
+ expect(controller.createConfig(capture(clusterCapture), anyObject(StackId.class), capture(typeCapture),
capture(propertiesCapture), capture(tagCapture), capture(attributesCapture) ))
.andReturn(createNiceMock(Config.class))
.anyTimes();
@@ -1739,7 +1739,7 @@ public class UpgradeCatalog240Test {
Capture<String> tagCapture = newCapture(CaptureType.ALL);
Capture<Map<String, Map<String, String>>> attributesCapture = newCapture(CaptureType.ALL);
- expect(controller.createConfig(anyObject(StackId.class), capture(clusterCapture), capture(typeCapture),
+ expect(controller.createConfig(capture(clusterCapture), anyObject(StackId.class), capture(typeCapture),
capture(propertiesCapture), capture(tagCapture), capture(attributesCapture)))
.andReturn(createNiceMock(Config.class))
.anyTimes();
@@ -2586,7 +2586,7 @@ public class UpgradeCatalog240Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
replay(controller, injector2);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
index 118d5f1..0663049 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
@@ -737,7 +737,7 @@ public class UpgradeCatalog250Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
replay(controller, injector2);
@@ -824,7 +824,7 @@ public class UpgradeCatalog250Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
replay(controller, injector2);
@@ -905,7 +905,7 @@ public class UpgradeCatalog250Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).times(2);
replay(controller, injector2);
@@ -959,7 +959,7 @@ public class UpgradeCatalog250Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
replay(controller, injector2);
@@ -1064,7 +1064,7 @@ public class UpgradeCatalog250Test {
expect(cluster.getDesiredConfigByType("ams-log4j")).andReturn(mockAmsLog4j).atLeastOnce();
expect(mockAmsLog4j.getProperties()).andReturn(oldAmsLog4j).anyTimes();
Capture<Map<String, String>> AmsLog4jCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(AmsLog4jCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(AmsLog4jCapture), anyString(),
anyObject(Map.class))).andReturn(config).once();
Map<String, String> oldAmsHbaseLog4j = ImmutableMap.of(
@@ -1299,7 +1299,7 @@ public class UpgradeCatalog250Test {
expect(cluster.getDesiredConfigByType("ams-hbase-log4j")).andReturn(mockAmsHbaseLog4j).atLeastOnce();
expect(mockAmsHbaseLog4j.getProperties()).andReturn(oldAmsHbaseLog4j).anyTimes();
Capture<Map<String, String>> AmsHbaseLog4jCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(AmsHbaseLog4jCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(AmsHbaseLog4jCapture), anyString(),
anyObject(Map.class))).andReturn(config).once();
replay(clusters, cluster);
@@ -1348,7 +1348,7 @@ public class UpgradeCatalog250Test {
expect(cluster.getDesiredConfigByType("logsearch-properties")).andReturn(mockLogSearchProperties).atLeastOnce();
expect(mockLogSearchProperties.getProperties()).andReturn(oldLogSearchProperties).anyTimes();
Capture<Map<String, String>> logSearchPropertiesCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(logSearchPropertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchPropertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
Map<String, String> oldLogFeederEnv = ImmutableMap.of(
@@ -1361,7 +1361,7 @@ public class UpgradeCatalog250Test {
expect(cluster.getDesiredConfigByType("logfeeder-env")).andReturn(mockLogFeederEnv).atLeastOnce();
expect(mockLogFeederEnv.getProperties()).andReturn(oldLogFeederEnv).anyTimes();
Capture<Map<String, String>> logFeederEnvCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(logFeederEnvCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logFeederEnvCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
Map<String, String> oldLogSearchEnv = new HashMap<>();
@@ -1383,7 +1383,7 @@ public class UpgradeCatalog250Test {
expect(cluster.getDesiredConfigByType("logsearch-env")).andReturn(mockLogSearchEnv).atLeastOnce();
expect(mockLogSearchEnv.getProperties()).andReturn(oldLogSearchEnv).anyTimes();
Capture<Map<String, String>> logSearchEnvCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(logSearchEnvCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchEnvCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
Map<String, String> oldLogFeederLog4j = ImmutableMap.of(
@@ -1436,7 +1436,7 @@ public class UpgradeCatalog250Test {
expect(cluster.getDesiredConfigByType("logfeeder-log4j")).andReturn(mockLogFeederLog4j).atLeastOnce();
expect(mockLogFeederLog4j.getProperties()).andReturn(oldLogFeederLog4j).anyTimes();
Capture<Map<String, String>> logFeederLog4jCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(logFeederLog4jCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logFeederLog4jCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
Map<String, String> oldLogSearchLog4j = ImmutableMap.of(
@@ -1554,7 +1554,7 @@ public class UpgradeCatalog250Test {
expect(cluster.getDesiredConfigByType("logsearch-log4j")).andReturn(mockLogSearchLog4j).atLeastOnce();
expect(mockLogSearchLog4j.getProperties()).andReturn(oldLogSearchLog4j).anyTimes();
Capture<Map<String, String>> logSearchLog4jCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(logSearchLog4jCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchLog4jCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
replay(clusters, cluster);
@@ -1613,7 +1613,7 @@ public class UpgradeCatalog250Test {
expect(cluster.getDesiredConfigByType("infra-solr-env")).andReturn(mockInfraSolrEnv).atLeastOnce();
expect(mockInfraSolrEnv.getProperties()).andReturn(oldInfraSolrEnv).anyTimes();
Capture<Map<String, String>> infraSolrEnvCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(infraSolrEnvCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(infraSolrEnvCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
Map<String, String> oldInfraSolrLog4j = ImmutableMap.of(
@@ -1630,7 +1630,7 @@ public class UpgradeCatalog250Test {
expect(cluster.getDesiredConfigByType("infra-solr-log4j")).andReturn(mockInfraSolrLog4j).atLeastOnce();
expect(mockInfraSolrLog4j.getProperties()).andReturn(oldInfraSolrLog4j).anyTimes();
Capture<Map<String, String>> infraSolrLog4jCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(infraSolrLog4jCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(infraSolrLog4jCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
Map<String, String> oldInfraSolrClientLog4j = ImmutableMap.of(
@@ -1649,7 +1649,7 @@ public class UpgradeCatalog250Test {
expect(cluster.getDesiredConfigByType("infra-solr-client-log4j")).andReturn(mockInfraSolrClientLog4j).atLeastOnce();
expect(mockInfraSolrClientLog4j.getProperties()).andReturn(oldInfraSolrClientLog4j).anyTimes();
Capture<Map<String, String>> infraSolrClientLog4jCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(infraSolrClientLog4jCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(infraSolrClientLog4jCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
replay(clusters, cluster);
@@ -1708,7 +1708,7 @@ public class UpgradeCatalog250Test {
expect(cluster.getDesiredConfigByType("hive-interactive-env")).andReturn(mockHsiEnv).atLeastOnce();
expect(mockHsiEnv.getProperties()).andReturn(oldHsiEnv).anyTimes();
Capture<Map<String, String>> hsiEnvCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(hsiEnvCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(hsiEnvCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
replay(clusters, cluster);
@@ -1789,7 +1789,7 @@ public class UpgradeCatalog250Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
replay(controller, injector2);
@@ -2076,7 +2076,7 @@ public class UpgradeCatalog250Test {
expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
expect(controller.getClusters()).andReturn(clusters).anyTimes();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(propertiesCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
replay(controller, injector2);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
index b5f0e09..43707dd 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
@@ -300,7 +300,7 @@ public class UpgradeCatalog300Test {
expect(confLogSearchConf1.getProperties()).andReturn(oldLogSearchConf).once();
expect(confLogSearchConf2.getProperties()).andReturn(oldLogSearchConf).once();
Capture<Map<String, String>> logSearchConfCapture = EasyMock.newCapture(CaptureType.ALL);
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), anyString(), capture(logSearchConfCapture), anyString(),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchConfCapture), anyString(),
EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).times(2);
Map<String, String> oldLogSearchProperties = ImmutableMap.of(
@@ -315,14 +315,14 @@ public class UpgradeCatalog300Test {
expect(cluster.getDesiredConfigByType("logfeeder-properties")).andReturn(logFeederPropertiesConf).times(2);
expect(logFeederPropertiesConf.getProperties()).andReturn(Collections.<String, String> emptyMap()).once();
Capture<Map<String, String>> logFeederPropertiesCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), eq("logfeeder-properties"), capture(logFeederPropertiesCapture),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), eq("logfeeder-properties"), capture(logFeederPropertiesCapture),
anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
Config logSearchPropertiesConf = easyMockSupport.createNiceMock(Config.class);
expect(cluster.getDesiredConfigByType("logsearch-properties")).andReturn(logSearchPropertiesConf).times(2);
expect(logSearchPropertiesConf.getProperties()).andReturn(oldLogSearchProperties).times(2);
Capture<Map<String, String>> logSearchPropertiesCapture = EasyMock.newCapture();
- expect(controller.createConfig(anyObject(StackId.class), anyObject(Cluster.class), eq("logsearch-properties"), capture(logSearchPropertiesCapture),
+ expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), eq("logsearch-properties"), capture(logSearchPropertiesCapture),
anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
replay(clusters, cluster);
[2/3] ambari git commit: AMBARI-21078 - Merging Configurations On
Service/Patch Upgrades Should Create New Configurations Only For Included
Services (jonathanhurley)
Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 23b6db1..2c786b5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -1530,12 +1530,20 @@ public class ClusterImpl implements Cluster {
long nextServiceConfigVersion = serviceConfigDAO.findNextServiceConfigVersion(clusterId,
serviceName);
+ // get the correct stack ID to use when creating the service config
+ StackEntity stackEntity = clusterEntity.getDesiredStack();
+ Service service = services.get(serviceName);
+ if (null != service) {
+ StackId serviceStackId = service.getDesiredStackId();
+ stackEntity = stackDAO.find(serviceStackId);
+ }
+
serviceConfigEntity.setServiceName(serviceName);
serviceConfigEntity.setClusterEntity(clusterEntity);
serviceConfigEntity.setVersion(nextServiceConfigVersion);
serviceConfigEntity.setUser(user);
serviceConfigEntity.setNote(note);
- serviceConfigEntity.setStack(clusterEntity.getDesiredStack());
+ serviceConfigEntity.setStack(stackEntity);
serviceConfigDAO.create(serviceConfigEntity);
if (configGroup != null) {
@@ -2320,30 +2328,50 @@ public class ClusterImpl implements Cluster {
*/
@Override
@Transactional
- public void applyLatestConfigurations(StackId stackId) {
+ public void applyLatestConfigurations(StackId stackId, String serviceName) {
clusterGlobalLock.writeLock().lock();
try {
+ // grab all of the configurations and hash them so we can easily update them when picking and choosing only those from the service
ClusterEntity clusterEntity = getClusterEntity();
Collection<ClusterConfigEntity> configEntities = clusterEntity.getClusterConfigEntities();
-
- // hash them for easier retrieval later
ImmutableMap<Object, ClusterConfigEntity> clusterConfigEntityMap = Maps.uniqueIndex(
configEntities, Functions.identity());
- // disable all configs
- for (ClusterConfigEntity e : configEntities) {
- LOG.debug("Disabling configuration {} with tag {}", e.getType(), e.getTag());
- e.setSelected(false);
+ // find the latest configurations for the service
+ Set<String> configTypesForService = new HashSet<>();
+ List<ServiceConfigEntity> latestServiceConfigs = serviceConfigDAO.getLastServiceConfigsForService(
+ getClusterId(), serviceName);
+
+ // process the current service configurations
+ for (ServiceConfigEntity serviceConfig : latestServiceConfigs) {
+ List<ClusterConfigEntity> latestConfigs = serviceConfig.getClusterConfigEntities();
+ for( ClusterConfigEntity latestConfig : latestConfigs ){
+ // grab the hash'd entity from the map so we're working with the right one
+ latestConfig = clusterConfigEntityMap.get(latestConfig);
+
+ // add the config type to our list for tracking later on
+ configTypesForService.add(latestConfig.getType());
+
+ // un-select the latest configuration for the service
+ LOG.debug("Disabling configuration {} with tag {}", latestConfig.getType(), latestConfig.getTag());
+ latestConfig.setSelected(false);
+ }
}
- // work through the in-memory list, finding only the most recent mapping per type
+ // get the latest configurations for the given stack which we're going to make active
Collection<ClusterConfigEntity> latestConfigsByStack = clusterDAO.getLatestConfigurations(
clusterId, stackId);
- // pull the correct latest mapping for the stack out of the cached map
- // from the cluster entity
+ // set the service configuration for the specified stack to the latest
for (ClusterConfigEntity latestConfigByStack : latestConfigsByStack) {
+ // since we're iterating over all configuration types, only work with those that are for our service
+ if (!configTypesForService.contains(latestConfigByStack.getType())) {
+ continue;
+ }
+
+ // pull the correct latest mapping for the stack out of the cached map
+ // from the cluster entity
ClusterConfigEntity entity = clusterConfigEntityMap.get(latestConfigByStack);
entity.setSelected(true);
@@ -2358,14 +2386,15 @@ public class ClusterImpl implements Cluster {
clusterEntity = clusterDAO.merge(clusterEntity);
cacheConfigurations();
+
+ LOG.info(
+ "Applied latest configurations for {} on stack {}. The the following types were modified: {}",
+ serviceName, stackId, StringUtils.join(configTypesForService, ','));
+
} finally {
clusterGlobalLock.writeLock().unlock();
}
- LOG.info(
- "Applied latest configurations for {} on stack {}. The desired configurations are now {}",
- getClusterName(), stackId, getDesiredConfigs());
-
// publish an event to instruct entity managers to clear cached instances of
// ClusterEntity immediately - it takes EclipseLink about 1000ms to update
// the L1 caches of other threads and the action scheduler could act upon
@@ -2389,14 +2418,18 @@ public class ClusterImpl implements Cluster {
}
/**
- * Removes all configurations associated with the specified stack. The caller
- * should make sure the cluster global write lock is acquired.
+ * Removes all configurations associated with the specified stack for the
+ * specified service. The caller should make sure the cluster global write
+ * lock is acquired.
*
* @param stackId
+ * the stack to remove configurations for (not {@code null}).
+ * @param serviceName
+ * the service name (not {@code null}).
* @see #clusterGlobalLock
*/
@Transactional
- void removeAllConfigsForStack(StackId stackId) {
+ void removeAllConfigsForStack(StackId stackId, String serviceName) {
ClusterEntity clusterEntity = getClusterEntity();
// make sure the entity isn't stale in the current unit of work.
@@ -2404,53 +2437,50 @@ public class ClusterImpl implements Cluster {
long clusterId = clusterEntity.getClusterId();
+ // keep track of any types removed for logging purposes
+ Set<String> removedConfigurationTypes = new HashSet<>();
+
// this will keep track of cluster config mappings that need removal
// since there is no relationship between configs and their mappings, we
// have to do it manually
List<ClusterConfigEntity> removedClusterConfigs = new ArrayList<>(50);
- Collection<ClusterConfigEntity> clusterConfigEntities = clusterEntity.getClusterConfigEntities();
+ Collection<ClusterConfigEntity> allClusterConfigEntities = clusterEntity.getClusterConfigEntities();
+ Collection<ServiceConfigEntity> allServiceConfigEntities = clusterEntity.getServiceConfigEntities();
- List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getAllServiceConfigsForClusterAndStack(
- clusterId, stackId);
+ // get the service configs only for the service
+ List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getServiceConfigsForServiceAndStack(
+ clusterId, stackId, serviceName);
// remove all service configurations and associated configs
- Collection<ServiceConfigEntity> serviceConfigEntities = clusterEntity.getServiceConfigEntities();
-
for (ServiceConfigEntity serviceConfig : serviceConfigs) {
for (ClusterConfigEntity configEntity : serviceConfig.getClusterConfigEntities()) {
- clusterConfigEntities.remove(configEntity);
+ removedConfigurationTypes.add(configEntity.getType());
+
+ allClusterConfigEntities.remove(configEntity);
clusterDAO.removeConfig(configEntity);
removedClusterConfigs.add(configEntity);
}
serviceConfig.getClusterConfigEntities().clear();
serviceConfigDAO.remove(serviceConfig);
- serviceConfigEntities.remove(serviceConfig);
+ allServiceConfigEntities.remove(serviceConfig);
}
- // remove any leftover cluster configurations that don't have a service
- // configuration (like cluster-env)
- List<ClusterConfigEntity> clusterConfigs = clusterDAO.getAllConfigurations(
- clusterId, stackId);
-
- for (ClusterConfigEntity clusterConfig : clusterConfigs) {
- clusterConfigEntities.remove(clusterConfig);
- clusterDAO.removeConfig(clusterConfig);
- removedClusterConfigs.add(clusterConfig);
- }
-
- clusterEntity.setClusterConfigEntities(clusterConfigEntities);
+ clusterEntity.setClusterConfigEntities(allClusterConfigEntities);
clusterEntity = clusterDAO.merge(clusterEntity);
+
+ LOG.info("Removed the following configuration types for {} on stack {}: {}", serviceName,
+ stackId, StringUtils.join(removedConfigurationTypes, ','));
}
/**
* {@inheritDoc}
*/
@Override
- public void removeConfigurations(StackId stackId) {
+ public void removeConfigurations(StackId stackId, String serviceName) {
clusterGlobalLock.writeLock().lock();
try {
- removeAllConfigsForStack(stackId);
+ removeAllConfigsForStack(stackId, serviceName);
cacheConfigurations();
} finally {
clusterGlobalLock.writeLock().unlock();
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
index 60780dd..a4be480 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroup.java
@@ -124,7 +124,7 @@ public interface ConfigGroup {
* Reassign the set of configs associated with this config group
* @param configs
*/
- void setConfigurations(Map<String, Config> configs);
+ void setConfigurations(Map<String, Config> configs) throws AmbariException;
/**
* Remove host mapping
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
index 2209dc1..ae6cde9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupFactory.java
@@ -19,6 +19,8 @@ package org.apache.ambari.server.state.configgroup;
import java.util.Map;
+import javax.annotation.Nullable;
+
import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Config;
@@ -30,7 +32,8 @@ public interface ConfigGroupFactory {
/**
* Creates and saves a new {@link ConfigGroup}.
*/
- ConfigGroup createNew(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
+ ConfigGroup createNew(@Assisted("cluster") Cluster cluster,
+ @Assisted("serviceName") @Nullable String serviceName, @Assisted("name") String name,
@Assisted("tag") String tag, @Assisted("description") String description,
@Assisted("configs") Map<String, Config> configs, @Assisted("hosts") Map<Long, Host> hosts);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
index a04df3c..cb0d200 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/configgroup/ConfigGroupImpl.java
@@ -28,6 +28,8 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.ReadWriteLock;
+import javax.annotation.Nullable;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.DuplicateResourceException;
import org.apache.ambari.server.controller.ConfigGroupResponse;
@@ -50,6 +52,7 @@ import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.Service;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -64,6 +67,7 @@ public class ConfigGroupImpl implements ConfigGroup {
private ConcurrentMap<Long, Host> m_hosts;
private ConcurrentMap<String, Config> m_configurations;
private String configGroupName;
+ private String serviceName;
private long configGroupId;
/**
@@ -90,13 +94,15 @@ public class ConfigGroupImpl implements ConfigGroup {
private final ConfigFactory configFactory;
@AssistedInject
- public ConfigGroupImpl(@Assisted("cluster") Cluster cluster, @Assisted("name") String name,
+ public ConfigGroupImpl(@Assisted("cluster") Cluster cluster,
+ @Assisted("serviceName") @Nullable String serviceName, @Assisted("name") String name,
@Assisted("tag") String tag, @Assisted("description") String description,
@Assisted("configs") Map<String, Config> configurations,
@Assisted("hosts") Map<Long, Host> hosts, Clusters clusters, ConfigFactory configFactory,
ClusterDAO clusterDAO, HostDAO hostDAO, ConfigGroupDAO configGroupDAO,
ConfigGroupConfigMappingDAO configGroupConfigMappingDAO,
- ConfigGroupHostMappingDAO configGroupHostMappingDAO, LockFactory lockFactory) {
+ ConfigGroupHostMappingDAO configGroupHostMappingDAO, LockFactory lockFactory)
+ throws AmbariException {
this.configFactory = configFactory;
this.clusterDAO = clusterDAO;
@@ -108,6 +114,7 @@ public class ConfigGroupImpl implements ConfigGroup {
hostLock = lockFactory.newReadWriteLock(hostLockLabel);
this.cluster = cluster;
+ this.serviceName = serviceName;
configGroupName = name;
ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
@@ -115,6 +122,7 @@ public class ConfigGroupImpl implements ConfigGroup {
configGroupEntity.setGroupName(name);
configGroupEntity.setTag(tag);
configGroupEntity.setDescription(description);
+ configGroupEntity.setServiceName(serviceName);
m_hosts = hosts == null ? new ConcurrentHashMap<Long, Host>()
: new ConcurrentHashMap<>(hosts);
@@ -146,6 +154,7 @@ public class ConfigGroupImpl implements ConfigGroup {
this.cluster = cluster;
configGroupId = configGroupEntity.getGroupId();
configGroupName = configGroupEntity.getGroupName();
+ serviceName = configGroupEntity.getServiceName();
m_configurations = new ConcurrentHashMap<>();
m_hosts = new ConcurrentHashMap<>();
@@ -260,7 +269,7 @@ public class ConfigGroupImpl implements ConfigGroup {
* Helper method to recreate configs mapping
*/
@Override
- public void setConfigurations(Map<String, Config> configurations) {
+ public void setConfigurations(Map<String, Config> configurations) throws AmbariException {
ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
ClusterEntity clusterEntity = configGroupEntity.getClusterEntity();
@@ -323,7 +332,7 @@ public class ConfigGroupImpl implements ConfigGroup {
/**
* @param configGroupEntity
*/
- private void persist(ConfigGroupEntity configGroupEntity) {
+ private void persist(ConfigGroupEntity configGroupEntity) throws AmbariException {
persistEntities(configGroupEntity);
cluster.refresh();
}
@@ -334,7 +343,7 @@ public class ConfigGroupImpl implements ConfigGroup {
* @throws Exception
*/
@Transactional
- void persistEntities(ConfigGroupEntity configGroupEntity) {
+ void persistEntities(ConfigGroupEntity configGroupEntity) throws AmbariException {
ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
configGroupEntity.setClusterEntity(clusterEntity);
configGroupEntity.setTimestamp(System.currentTimeMillis());
@@ -396,8 +405,8 @@ public class ConfigGroupImpl implements ConfigGroup {
* @throws Exception
*/
@Transactional
- void persistConfigMapping(ClusterEntity clusterEntity,
- ConfigGroupEntity configGroupEntity, Map<String, Config> configurations) {
+ void persistConfigMapping(ClusterEntity clusterEntity, ConfigGroupEntity configGroupEntity,
+ Map<String, Config> configurations) throws AmbariException {
configGroupConfigMappingDAO.removeAllByGroup(configGroupEntity.getGroupId());
configGroupEntity.setConfigGroupConfigMappingEntities(
new HashSet<ConfigGroupConfigMappingEntity>());
@@ -409,8 +418,11 @@ public class ConfigGroupImpl implements ConfigGroup {
(cluster.getClusterId(), config.getType(), config.getTag());
if (clusterConfigEntity == null) {
- config = configFactory.createNew(null, cluster, config.getType(), config.getTag(),
- config.getProperties(), config.getPropertiesAttributes());
+ String serviceName = getServiceName();
+ Service service = cluster.getService(serviceName);
+
+ config = configFactory.createNew(service.getDesiredStackId(), cluster, config.getType(),
+ config.getTag(), config.getProperties(), config.getPropertiesAttributes());
entry.setValue(config);
@@ -498,8 +510,7 @@ public class ConfigGroupImpl implements ConfigGroup {
@Override
public String getServiceName() {
- ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
- return configGroupEntity.getServiceName();
+ return serviceName;
}
@Override
@@ -507,6 +518,8 @@ public class ConfigGroupImpl implements ConfigGroup {
ConfigGroupEntity configGroupEntity = getConfigGroupEntity();
configGroupEntity.setServiceName(serviceName);
configGroupDAO.merge(configGroupEntity);
+
+ this.serviceName = serviceName;
}
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeScope.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeScope.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeScope.java
index f35bd68..9a436b6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeScope.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/UpgradeScope.java
@@ -48,13 +48,4 @@ public enum UpgradeScope {
@XmlEnumValue("ANY")
@SerializedName("any")
ANY;
-
- public boolean isScoped(UpgradeScope scope) {
- if (ANY == this || ANY == scope) {
- return true;
- }
-
- return this == scope;
- }
-
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
index 759d9e9..c707df3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
@@ -84,7 +84,8 @@ public class RequiredConfigPropertiesValidator implements TopologyValidator {
for (String configType : requiredPropertiesByType.keySet()) {
// We need a copy not to modify the original
- Collection<String> requiredPropertiesForType = new HashSet(requiredPropertiesByType.get(configType));
+ Collection<String> requiredPropertiesForType = new HashSet(
+ requiredPropertiesByType.get(configType));
if (!operationalConfigurations.containsKey(configType)) {
// all required configuration is missing for the config type
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index 5939fca..3f15400 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -587,7 +587,8 @@ public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
propertiesAttributes = Collections.emptyMap();
}
- controller.createConfig(cluster.getDesiredStackVersion(), cluster, configType, mergedProperties, newTag, propertiesAttributes);
+ controller.createConfig(cluster, cluster.getDesiredStackVersion(), configType,
+ mergedProperties, newTag, propertiesAttributes);
Config baseConfig = cluster.getConfig(configType, newTag);
if (baseConfig != null) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
index bc24246..7d6f066 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
@@ -34,12 +34,14 @@ import org.apache.ambari.server.events.publishers.JPAEventPublisher;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
import org.apache.ambari.server.orm.OrmTestHelper;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.StackId;
import org.junit.After;
import org.junit.Before;
@@ -98,37 +100,44 @@ public class TestActionSchedulerThreading {
StackId stackId = cluster.getCurrentStackVersion();
StackId newStackId = new StackId("HDP-2.2.0");
+ RepositoryVersionEntity repoVersion220 = ormTestHelper.getOrCreateRepositoryVersion(newStackId, "2.2.0-1234");
// make sure the stacks are different
Assert.assertFalse(stackId.equals(newStackId));
+ // add a service
+ String serviceName = "ZOOKEEPER";
+ RepositoryVersionEntity repositoryVersion = ormTestHelper.getOrCreateRepositoryVersion(cluster);
+ Service service = cluster.addService(serviceName, repositoryVersion);
+ String configType = "zoo.cfg";
+
Map<String, String> properties = new HashMap<>();
Map<String, Map<String, String>> propertiesAttributes = new HashMap<>();
ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
- // foo-type for v1 on current stack
+ // zoo-cfg for v1 on current stack
properties.put("foo-property-1", "foo-value-1");
- Config c1 = configFactory.createNew(cluster, "foo-type", "version-1", properties, propertiesAttributes);
+ Config c1 = configFactory.createNew(stackId, cluster, configType, "version-1", properties, propertiesAttributes);
// make v1 "current"
cluster.addDesiredConfig("admin", Sets.newHashSet(c1), "note-1");
// bump the stack
- cluster.setDesiredStackVersion(newStackId);
+ service.setDesiredRepositoryVersion(repoVersion220);
// save v2
- // foo-type for v2 on new stack
+ // zoo-cfg for v2 on new stack
properties.put("foo-property-2", "foo-value-2");
- Config c2 = configFactory.createNew(cluster, "foo-type", "version-2", properties, propertiesAttributes);
+ Config c2 = configFactory.createNew(newStackId, cluster, configType, "version-2", properties, propertiesAttributes);
// make v2 "current"
cluster.addDesiredConfig("admin", Sets.newHashSet(c2), "note-2");
// check desired config
Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
- DesiredConfig desiredConfig = desiredConfigs.get("foo-type");
- desiredConfig = desiredConfigs.get("foo-type");
+ DesiredConfig desiredConfig = desiredConfigs.get(configType);
+ desiredConfig = desiredConfigs.get(configType);
assertNotNull(desiredConfig);
assertEquals(Long.valueOf(2), desiredConfig.getVersion());
assertEquals("version-2", desiredConfig.getTag());
@@ -136,7 +145,7 @@ public class TestActionSchedulerThreading {
final String hostName = cluster.getHosts().iterator().next().getHostName();
// move the stack back to the old stack
- cluster.setDesiredStackVersion(stackId);
+ service.setDesiredRepositoryVersion(repositoryVersion);
// create the semaphores, taking 1 from each to make them blocking from the
// start
@@ -158,7 +167,7 @@ public class TestActionSchedulerThreading {
threadInitialCachingSemaphore.acquire();
// apply the configs for the old stack
- cluster.applyLatestConfigurations(stackId);
+ cluster.applyLatestConfigurations(stackId, serviceName);
// wake the thread up and have it verify that it can see the updated configs
applyLatestConfigsSemaphore.release();
@@ -226,11 +235,11 @@ public class TestActionSchedulerThreading {
// L1 cache
Cluster cluster = clusters.getCluster(clusterId);
- // {foo-type={tag=version-2}}
+ // {zoo.cfg={tag=version-2}}
Map<String, Map<String, String>> effectiveDesiredTags = configHelper.getEffectiveDesiredTags(
cluster, hostName);
- assertEquals("version-2", effectiveDesiredTags.get("foo-type").get("tag"));
+ assertEquals("version-2", effectiveDesiredTags.get("zoo.cfg").get("tag"));
// signal the caller that we're done making our initial call to populate
// the EntityManager
@@ -239,9 +248,9 @@ public class TestActionSchedulerThreading {
// wait for the method to switch configs
applyLatestConfigsSemaphore.acquire();
- // {foo-type={tag=version-1}}
+ // {zoo.cfg={tag=version-1}}
effectiveDesiredTags = configHelper.getEffectiveDesiredTags(cluster, hostName);
- assertEquals("version-1", effectiveDesiredTags.get("foo-type").get("tag"));
+ assertEquals("version-1", effectiveDesiredTags.get("zoo.cfg").get("tag"));
} catch (Throwable throwable) {
this.throwable = throwable;
} finally {
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
index 5feb3cc..560d8a1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/agent/AgentResourceTest.java
@@ -35,6 +35,7 @@ import org.apache.ambari.server.actionmanager.HostRoleCommandFactoryImpl;
import org.apache.ambari.server.actionmanager.StageFactory;
import org.apache.ambari.server.agent.rest.AgentResource;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
import org.apache.ambari.server.metadata.CachedRoleCommandOrderProvider;
import org.apache.ambari.server.metadata.RoleCommandOrderProvider;
@@ -323,6 +324,7 @@ public class AgentResourceTest extends RandomPortJerseyTest {
bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
bind(PersistedState.class).toInstance(createNiceMock(PersistedState.class));
bind(RoleCommandOrderProvider.class).to(CachedRoleCommandOrderProvider.class);
+ bind(AmbariManagementController.class).toInstance(createNiceMock(AmbariManagementController.class));
}
private void installDependencies() {
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 9c723c1..a12e834 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -435,7 +435,7 @@ public class AmbariManagementControllerTest {
controller.deleteHostComponents(requests);
}
- private Long createConfigGroup(Cluster cluster, String name, String tag,
+ private Long createConfigGroup(Cluster cluster, String serviceName, String name, String tag,
List<String> hosts, List<Config> configs)
throws AmbariException {
@@ -452,9 +452,11 @@ public class AmbariManagementControllerTest {
configMap.put(config.getType(), config);
}
- ConfigGroup configGroup = configGroupFactory.createNew(cluster, name,
+ ConfigGroup configGroup = configGroupFactory.createNew(cluster, serviceName, name,
tag, "", configMap, hostMap);
+ configGroup.setServiceName(serviceName);
+
cluster.addConfigGroup(configGroup);
return configGroup.getId();
@@ -6662,8 +6664,8 @@ public class AmbariManagementControllerTest {
configs = new HashMap<>();
configs.put("a", "c");
cluster = clusters.getCluster(cluster1);
- final Config config = configFactory.createReadOnly("core-site", "version122", configs, null);
- Long groupId = createConfigGroup(cluster, group1, tag1,
+ final Config config = configFactory.createReadOnly("core-site", "version122", configs, null);
+ Long groupId = createConfigGroup(cluster, serviceName1, group1, tag1,
new ArrayList<String>() {{ add(host1); }},
new ArrayList<Config>() {{ add(config); }});
@@ -6674,7 +6676,7 @@ public class AmbariManagementControllerTest {
configs.put("a", "c");
final Config config2 = configFactory.createReadOnly("mapred-site", "version122", configs, null);
- groupId = createConfigGroup(cluster, group2, tag2,
+ groupId = createConfigGroup(cluster, serviceName2, group2, tag2,
new ArrayList<String>() {{ add(host1); }},
new ArrayList<Config>() {{ add(config2); }});
@@ -6817,7 +6819,7 @@ public class AmbariManagementControllerTest {
ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
final Config config = configFactory.createReadOnly("hdfs-site", "version122", configs, null);
- Long groupId = createConfigGroup(clusters.getCluster(cluster1), group1, tag1,
+ Long groupId = createConfigGroup(clusters.getCluster(cluster1), serviceName, group1, tag1,
new ArrayList<String>() {{
add(host1);
add(host2);
@@ -6926,7 +6928,7 @@ public class AmbariManagementControllerTest {
ConfigFactory configFactory = injector.getInstance(ConfigFactory.class);
final Config config = configFactory.createReadOnly("hdfs-site", "version122", configs, null);
- Long groupId = createConfigGroup(clusters.getCluster(cluster1), group1, tag1,
+ Long groupId = createConfigGroup(clusters.getCluster(cluster1), serviceName, group1, tag1,
new ArrayList<String>() {{ add(host1); add(host2); }},
new ArrayList<Config>() {{ add(config); }});
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
index 5b69270..12cbadf 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
@@ -171,13 +171,14 @@ public class ConfigGroupResourceProviderTest {
expect(hostEntity2.getHostId()).andReturn(2L).atLeastOnce();
Capture<Cluster> clusterCapture = newCapture();
+ Capture<String> serviceName = newCapture();
Capture<String> captureName = newCapture();
Capture<String> captureDesc = newCapture();
Capture<String> captureTag = newCapture();
Capture<Map<String, Config>> captureConfigs = newCapture();
Capture<Map<Long, Host>> captureHosts = newCapture();
- expect(configGroupFactory.createNew(capture(clusterCapture),
+ expect(configGroupFactory.createNew(capture(clusterCapture), capture(serviceName),
capture(captureName), capture(captureTag), capture(captureDesc),
capture(captureConfigs), capture(captureHosts))).andReturn(configGroup);
@@ -282,7 +283,7 @@ public class ConfigGroupResourceProviderTest {
expect(managementController.getAuthName()).andReturn("admin").anyTimes();
expect(cluster.getConfigGroups()).andReturn(configGroupMap);
- expect(configGroupFactory.createNew((Cluster) anyObject(), (String) anyObject(),
+ expect(configGroupFactory.createNew((Cluster) anyObject(), (String) anyObject(), (String) anyObject(),
(String) anyObject(), (String) anyObject(), EasyMock.<Map<String, Config>>anyObject(),
EasyMock.<Map<Long, Host>>anyObject())).andReturn(configGroup).anyTimes();
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
index 4408492..f79b1c2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackUpgradeConfigurationMergeTest.java
@@ -57,6 +57,7 @@ import org.apache.ambari.server.state.ServiceInfo;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.UpgradeContext;
import org.apache.ambari.server.state.UpgradeContextFactory;
+import org.apache.ambari.server.state.UpgradeHelper;
import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
import org.apache.ambari.server.state.scheduler.RequestExecutionFactory;
import org.apache.ambari.server.state.stack.OsFamily;
@@ -205,11 +206,11 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
// HDP 2.4 configs
EasyMock.expect(configHelper.getDefaultProperties(EasyMock.eq(s_currentStackId),
- EasyMock.anyObject(Cluster.class), EasyMock.anyBoolean())).andReturn(oldStackDefaultConfigurationsByType);
+ EasyMock.anyString())).andReturn(oldStackDefaultConfigurationsByType);
// HDP 2.5 configs
EasyMock.expect(configHelper.getDefaultProperties(EasyMock.eq(s_targetStackId),
- EasyMock.anyObject(Cluster.class), EasyMock.anyBoolean())).andReturn(newConfigurationsByType);
+ EasyMock.anyString())).andReturn(newConfigurationsByType);
// CURRENT HDP 2.4 configs
Config currentClusterConfigFoo = createNiceMock(Config.class);
@@ -238,6 +239,7 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
Capture<Map<String, Map<String, String>>> capturedArgument = EasyMock.newCapture();
configHelper.createConfigTypes(EasyMock.anyObject(Cluster.class),
EasyMock.anyObject(AmbariManagementController.class),
+ EasyMock.anyObject(StackId.class),
EasyMock.capture(capturedArgument),
EasyMock.anyString(), EasyMock.anyString());
@@ -252,10 +254,8 @@ public class StackUpgradeConfigurationMergeTest extends EasyMockSupport {
EasyMock.expect(upgradeContext.getTargetRepositoryVersion(EasyMock.anyString())).andReturn(repositoryVersionEntity).anyTimes();
replayAll();
- UpgradeResourceProvider upgradeResourceProvider = new UpgradeResourceProvider(amc);
- m_injector.injectMembers(upgradeResourceProvider);
-
- upgradeResourceProvider.applyStackAndProcessConfigurations(upgradeContext);
+ UpgradeHelper upgradeHelper = m_injector.getInstance(UpgradeHelper.class);
+ upgradeHelper.updateDesiredRepositoriesAndConfigs(upgradeContext);
// assertion time!
Map<String, Map<String, String>> mergedConfigurations = capturedArgument.getValue();
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index 3780ea5..04773bc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -65,7 +65,6 @@ import org.apache.ambari.server.controller.utilities.PropertyHelper;
import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
import org.apache.ambari.server.orm.GuiceJpaInitializer;
import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.OrmTestHelper;
import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
import org.apache.ambari.server.orm.dao.RequestDAO;
@@ -79,6 +78,7 @@ import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.StageEntity;
import org.apache.ambari.server.orm.entities.UpgradeEntity;
import org.apache.ambari.server.orm.entities.UpgradeGroupEntity;
+import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
import org.apache.ambari.server.orm.entities.UpgradeItemEntity;
import org.apache.ambari.server.security.TestAuthenticationFactory;
import org.apache.ambari.server.serveraction.upgrades.AutoSkipFailedSummaryAction;
@@ -95,7 +95,6 @@ import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.UpgradeContext;
-import org.apache.ambari.server.state.UpgradeContextFactory;
import org.apache.ambari.server.state.UpgradeHelper;
import org.apache.ambari.server.state.UpgradeState;
import org.apache.ambari.server.state.stack.UpgradePack;
@@ -133,7 +132,6 @@ public class UpgradeResourceProviderTest {
private RepositoryVersionDAO repoVersionDao = null;
private Injector injector;
private Clusters clusters;
- private OrmTestHelper helper;
private AmbariManagementController amc;
private ConfigHelper configHelper;
private StackDAO stackDAO;
@@ -141,7 +139,6 @@ public class UpgradeResourceProviderTest {
private TopologyManager topologyManager;
private ConfigFactory configFactory;
private HostRoleCommandDAO hrcDAO;
- private UpgradeContextFactory upgradeContextFactory;
RepositoryVersionEntity repoVersionEntity2110;
RepositoryVersionEntity repoVersionEntity2111;
@@ -162,7 +159,7 @@ public class UpgradeResourceProviderTest {
expect(
configHelper.getDefaultProperties(EasyMock.anyObject(StackId.class),
- EasyMock.anyObject(Cluster.class), EasyMock.anyBoolean())).andReturn(
+ EasyMock.anyString())).andReturn(
new HashMap<String, Map<String, String>>()).anyTimes();
@@ -176,13 +173,9 @@ public class UpgradeResourceProviderTest {
H2DatabaseCleaner.resetSequences(injector);
injector.getInstance(GuiceJpaInitializer.class);
-
- helper = injector.getInstance(OrmTestHelper.class);
-
amc = injector.getInstance(AmbariManagementController.class);
ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
configFactory = injector.getInstance(ConfigFactory.class);
- upgradeContextFactory = injector.getInstance(UpgradeContextFactory.class);
Field field = AmbariServer.class.getDeclaredField("clusterController");
field.setAccessible(true);
@@ -203,8 +196,7 @@ public class UpgradeResourceProviderTest {
// For now, Ignore the tests that fail.
StackEntity stackEntity211 = stackDAO.find("HDP", "2.1.1");
StackEntity stackEntity220 = stackDAO.find("HDP", "2.2.0");
- StackId stack211 = new StackId("HDP-2.1.1");
- StackId stack220 = new StackId("HDP-2.2.0");
+ StackId stack211 = new StackId(stackEntity211);
repoVersionEntity2110 = new RepositoryVersionEntity();
repoVersionEntity2110.setDisplayName("My New Version 1");
@@ -232,9 +224,6 @@ public class UpgradeResourceProviderTest {
clusters.addCluster("c1", stack211);
Cluster cluster = clusters.getCluster("c1");
- helper.getOrCreateRepositoryVersion(stack211, stack211.getStackVersion());
- helper.getOrCreateRepositoryVersion(stack220, stack220.getStackVersion());
-
clusters.addHost("h1");
Host host = clusters.getHost("h1");
Map<String, String> hostAttributes = new HashMap<>();
@@ -245,9 +234,8 @@ public class UpgradeResourceProviderTest {
clusters.mapHostToCluster("h1", "c1");
- // add a single ZK server
+ // add a single ZK server and client on 2.1.1.0
Service service = cluster.addService("ZOOKEEPER", repoVersionEntity2110);
-
ServiceComponent component = service.addServiceComponent("ZOOKEEPER_SERVER");
ServiceComponentHost sch = component.addServiceComponentHost("h1");
sch.setVersion("2.1.1.0");
@@ -1113,9 +1101,21 @@ public class UpgradeResourceProviderTest {
*/
@Test
public void testMergeConfigurations() throws Exception {
+ RepositoryVersionEntity repoVersion211 = createNiceMock(RepositoryVersionEntity.class);
+ RepositoryVersionEntity repoVersion220 = createNiceMock(RepositoryVersionEntity.class);
+
StackId stack211 = new StackId("HDP-2.1.1");
StackId stack220 = new StackId("HDP-2.2.0");
+ String version211 = "2.1.1.0-1234";
+ String version220 = "2.2.0.0-1234";
+
+ EasyMock.expect(repoVersion211.getStackId()).andReturn(stack211).atLeastOnce();
+ EasyMock.expect(repoVersion211.getVersion()).andReturn(version211).atLeastOnce();
+
+ EasyMock.expect(repoVersion220.getStackId()).andReturn(stack220).atLeastOnce();
+ EasyMock.expect(repoVersion220.getVersion()).andReturn(version220).atLeastOnce();
+
Map<String, Map<String, String>> stack211Configs = new HashMap<>();
Map<String, String> stack211FooType = new HashMap<>();
Map<String, String> stack211BarType = new HashMap<>();
@@ -1174,17 +1174,18 @@ public class UpgradeResourceProviderTest {
EasyMock.reset(configHelper);
expect(
- configHelper.getDefaultProperties(EasyMock.eq(stack211), EasyMock.anyObject(Cluster.class), EasyMock.anyBoolean())).andReturn(
+ configHelper.getDefaultProperties(EasyMock.eq(stack211), EasyMock.anyString())).andReturn(
stack211Configs).anyTimes();
expect(
- configHelper.getDefaultProperties(EasyMock.eq(stack220), EasyMock.anyObject(Cluster.class), EasyMock.anyBoolean())).andReturn(
- stack220Configs).anyTimes();
+ configHelper.getDefaultProperties(EasyMock.eq(stack220), EasyMock.anyString())).andReturn(
+ stack220Configs).anyTimes();
Capture<Map<String, Map<String, String>>> expectedConfigurationsCapture = EasyMock.newCapture();
configHelper.createConfigTypes(EasyMock.anyObject(Cluster.class),
EasyMock.anyObject(AmbariManagementController.class),
+ EasyMock.anyObject(StackId.class),
EasyMock.capture(expectedConfigurationsCapture),
EasyMock.anyObject(String.class), EasyMock.anyObject(String.class));
@@ -1192,13 +1193,16 @@ public class UpgradeResourceProviderTest {
EasyMock.replay(configHelper, cluster, fooConfig, barConfig, bazConfig);
- UpgradeResourceProvider upgradeResourceProvider = createProvider(amc);
-
Map<String, UpgradePack> upgradePacks = ambariMetaInfo.getUpgradePacks("HDP", "2.1.1");
- UpgradePack upgrade = upgradePacks.get("upgrade_to_new_stack");
+ UpgradePack upgradePack = upgradePacks.get("upgrade_to_new_stack");
UpgradeContext upgradeContext = EasyMock.createNiceMock(UpgradeContext.class);
- upgradeResourceProvider.applyStackAndProcessConfigurations(upgradeContext);
+ EasyMock.expect(upgradeContext.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+ EasyMock.expect(upgradeContext.getCluster()).andReturn(cluster).anyTimes();
+ EasyMock.expect(upgradeContext.getDirection()).andReturn(Direction.UPGRADE).anyTimes();
+ EasyMock.expect(upgradeContext.getUpgradePack()).andReturn(upgradePack).anyTimes();
+ EasyMock.expect(upgradeContext.getRepositoryVersion()).andReturn(repoVersion211).anyTimes();
+ EasyMock.expect(upgradeContext.getTargetRepositoryVersion(EasyMock.anyString())).andReturn(repoVersion220).anyTimes();
Map<String, Map<String, String>> expectedConfigurations = expectedConfigurationsCapture.getValue();
Map<String, String> expectedFooType = expectedConfigurations.get("foo-site");
@@ -1502,8 +1506,9 @@ public class UpgradeResourceProviderTest {
/**
* Exercises that a component that goes from upgrade->downgrade that switches
- * {@code versionAdvertised} between will go to UKNOWN. This exercises
- * {@link UpgradeHelper#putComponentsToUpgradingState(String, Map, StackId)}
+ * {@code versionAdvertised} between will go to UKNOWN. This exercises
+ * {@link UpgradeHelper#updateDesiredRepositoriesAndConfigs(UpgradeContext)}
+ *
* @throws Exception
*/
@Test
@@ -1617,9 +1622,67 @@ public class UpgradeResourceProviderTest {
}
}
+ /**
+ * Tests that from/to repository version history is created correctly on the
+ * upgrade.
+ *
+ * @throws Exception
+ */
@Test
public void testUpgradeHistory() throws Exception {
- Assert.fail("Implement me!");
+ Cluster cluster = clusters.getCluster("c1");
+
+ Map<String, Object> requestProps = new HashMap<>();
+ requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
+ requestProps.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, String.valueOf(repoVersionEntity2200.getId()));
+ requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_test");
+ requestProps.put(UpgradeResourceProvider.UPGRADE_TYPE, UpgradeType.ROLLING.toString());
+ requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_MANUAL_VERIFICATION, Boolean.TRUE.toString());
+ requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, Boolean.TRUE.toString());
+ requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.UPGRADE.name());
+
+ ResourceProvider upgradeResourceProvider = createProvider(amc);
+ Request request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps), null);
+ upgradeResourceProvider.createResources(request);
+
+ List<UpgradeEntity> upgrades = upgradeDao.findUpgrades(cluster.getClusterId());
+ assertEquals(1, upgrades.size());
+
+ UpgradeEntity upgrade = cluster.getUpgradeInProgress();
+ List<UpgradeHistoryEntity> histories = upgrade.getHistory();
+ assertEquals(2, histories.size());
+
+ for( UpgradeHistoryEntity history : histories){
+ assertEquals( "ZOOKEEPER", history.getServiceName() );
+ assertEquals(repoVersionEntity2110, history.getFromReposistoryVersion());
+ assertEquals(repoVersionEntity2200, history.getTargetRepositoryVersion());
+ }
+
+ // abort the upgrade and create the downgrade
+ abortUpgrade(upgrade.getRequestId());
+
+ requestProps.put(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME, "c1");
+ requestProps.put(UpgradeResourceProvider.UPGRADE_PACK, "upgrade_nonrolling_new_stack");
+ requestProps.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, "true");
+ requestProps.put(UpgradeResourceProvider.UPGRADE_DIRECTION, Direction.DOWNGRADE.name());
+
+ Map<String, String> requestInfoProperties = new HashMap<>();
+
+ request = PropertyHelper.getCreateRequest(Collections.singleton(requestProps),
+ requestInfoProperties);
+ RequestStatus status = upgradeResourceProvider.createResources(request);
+ UpgradeEntity downgrade = upgradeDao.findUpgradeByRequestId(getRequestId(status));
+ assertEquals(Direction.DOWNGRADE, downgrade.getDirection());
+
+ // check from/to history
+ histories = downgrade.getHistory();
+ assertEquals(2, histories.size());
+
+ for (UpgradeHistoryEntity history : histories) {
+ assertEquals("ZOOKEEPER", history.getServiceName());
+ assertEquals(repoVersionEntity2200, history.getFromReposistoryVersion());
+ assertEquals(repoVersionEntity2110, history.getTargetRepositoryVersion());
+ }
}
private String parseSingleMessage(String msgStr){
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
index 77593a7..7b9ff52 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
@@ -388,10 +388,15 @@ public class ServiceConfigDAOTest {
long clusterId = serviceConfigEntity.getClusterId();
- List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getAllServiceConfigsForClusterAndStack(clusterId, HDP_01);
- Assert.assertEquals(4, serviceConfigs.size());
+ List<ServiceConfigEntity> serviceConfigs = serviceConfigDAO.getServiceConfigsForServiceAndStack(
+ clusterId, HDP_01, "HDFS");
- serviceConfigs = serviceConfigDAO.getAllServiceConfigsForClusterAndStack(clusterId, HDP_02);
+ Assert.assertEquals(3, serviceConfigs.size());
+
+ serviceConfigs = serviceConfigDAO.getServiceConfigsForServiceAndStack(clusterId, HDP_01, "YARN");
+ Assert.assertEquals(1, serviceConfigs.size());
+
+ serviceConfigs = serviceConfigDAO.getServiceConfigsForServiceAndStack(clusterId, HDP_02, "HDFS");
Assert.assertEquals(0, serviceConfigs.size());
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
index 941c424..2f2771d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckActionTest.java
@@ -51,6 +51,7 @@ import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.orm.entities.RequestEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.ConfigFactory;
@@ -85,7 +86,7 @@ public class ComponentVersionCheckActionTest {
private static final String HDP_2_1_1_0 = "2.1.1.0-1";
private static final String HDP_2_1_1_1 = "2.1.1.1-2";
- private static final String HDP_2_2_1_0 = "2.2.0.1-3";
+ private static final String HDP_2_2_1_0 = "2.2.1.0-1";
private static final StackId HDP_21_STACK = new StackId("HDP-2.1.1");
private static final StackId HDP_22_STACK = new StackId("HDP-2.2.0");
@@ -205,8 +206,21 @@ public class ComponentVersionCheckActionTest {
c.setUpgradeEntity(upgradeEntity);
}
- private void makeCrossStackUpgradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack,
- String targetRepo, String clusterName, String hostName) throws Exception {
+ /**
+ * Creates a cluster with a running upgrade. The upgrade will have no services
+ * attached to it, so those will need to be set after this is called.
+ *
+ * @param sourceStack
+ * @param sourceRepo
+ * @param targetStack
+ * @param targetRepo
+ * @param clusterName
+ * @param hostName
+ * @throws Exception
+ */
+ private void makeCrossStackUpgradeCluster(StackId sourceStack, String sourceRepo,
+ StackId targetStack, String targetRepo, String clusterName, String hostName)
+ throws Exception {
m_helper.createStack(sourceStack);
m_helper.createStack(targetStack);
@@ -265,24 +279,22 @@ public class ComponentVersionCheckActionTest {
c.setUpgradeEntity(upgradeEntity);
}
- private void createNewRepoVersion(StackId targetStack, String targetRepo, String clusterName,
- String hostName) throws AmbariException {
- StackDAO stackDAO = m_injector.getInstance(StackDAO.class);
-
- StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion());
-
- // Create the new repo version
- String urlInfo = "[{'repositories':["
- + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'" + targetRepo + "'}"
- + "], 'OperatingSystems/os_type':'redhat6'}]";
- repoVersionDAO.create(stackEntityTarget, targetRepo, String.valueOf(System.currentTimeMillis()), urlInfo);
-
+ /**
+ * Creates a new {@link HostVersionEntity} instance in the
+ * {@link RepositoryVersionState#INSTALLED} for the specified host.
+ *
+ * @param hostName
+ * @param repositoryVersion
+ * @throws AmbariException
+ */
+ private void installRepositoryOnHost(String hostName, RepositoryVersionEntity repositoryVersion)
+ throws AmbariException {
// Start upgrading the newer repo
HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
HostVersionEntity entity = new HostVersionEntity();
entity.setHostEntity(hostDAO.findByName(hostName));
- entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo));
+ entity.setRepositoryVersion(repositoryVersion);
entity.setState(RepositoryVersionState.INSTALLED);
hostVersionDAO.create(entity);
}
@@ -325,42 +337,65 @@ public class ComponentVersionCheckActionTest {
public void testMixedComponentVersions() throws Exception {
StackId sourceStack = HDP_21_STACK;
StackId targetStack = HDP_22_STACK;
- String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_2_1_0;
+ String sourceVersion = HDP_2_1_1_0;
+ String targetVersion = HDP_2_2_1_0;
String clusterName = "c1";
String hostName = "h1";
- makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo, clusterName, hostName);
+ makeCrossStackUpgradeCluster(sourceStack, sourceVersion, targetStack, targetVersion,
+ clusterName, hostName);
Clusters clusters = m_injector.getInstance(Clusters.class);
Cluster cluster = clusters.getCluster("c1");
- RepositoryVersionEntity repositoryVersion = m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_1_1_0);
+ RepositoryVersionEntity sourceRepoVersion = m_helper.getOrCreateRepositoryVersion(HDP_21_STACK, HDP_2_1_1_0);
+ RepositoryVersionEntity targetRepoVersion = m_helper.getOrCreateRepositoryVersion(HDP_22_STACK, HDP_2_2_1_0);
- Service service = installService(cluster, "HDFS", repositoryVersion);
+ Service service = installService(cluster, "HDFS", sourceRepoVersion);
addServiceComponent(cluster, service, "NAMENODE");
addServiceComponent(cluster, service, "DATANODE");
- createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
- createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
-
- createNewRepoVersion(targetStack, targetRepo, clusterName, hostName);
+ createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", hostName);
+ createNewServiceComponentHost(cluster, "HDFS", "DATANODE", hostName);
// create some configs
createConfigs(cluster);
+
+ // install the target repo
+ installRepositoryOnHost(hostName, targetRepoVersion);
+
// setup the cluster for the upgrade across stacks
cluster.setCurrentStackVersion(sourceStack);
cluster.setDesiredStackVersion(targetStack);
- // set the SCH versions to the new stack so that the finalize action is
- // happy
- cluster.getServiceComponentHosts("HDFS", "NAMENODE").get(0).setVersion(targetRepo);
- // don't update DATANODE - we want to make the action complain
+ // tell the upgrade that HDFS is upgrading - without this, no services will
+ // be participating in the upgrade
+ UpgradeEntity upgrade = cluster.getUpgradeInProgress();
+ UpgradeHistoryEntity history = new UpgradeHistoryEntity();
+ history.setUpgrade(upgrade);
+ history.setServiceName("HDFS");
+ history.setComponentName("NAMENODE");
+ history.setFromRepositoryVersion(sourceRepoVersion);
+ history.setTargetRepositoryVersion(targetRepoVersion);
+ upgrade.addHistory(history);
+
+ history = new UpgradeHistoryEntity();
+ history.setUpgrade(upgrade);
+ history.setServiceName("HDFS");
+ history.setComponentName("DATANODE");
+ history.setFromRepositoryVersion(sourceRepoVersion);
+ history.setTargetRepositoryVersion(targetRepoVersion);
+ upgrade.addHistory(history);
- // inject an unhappy path where the cluster repo version is still UPGRADING
- // even though all of the hosts are UPGRADED
+ UpgradeDAO upgradeDAO = m_injector.getInstance(UpgradeDAO.class);
+ upgrade = upgradeDAO.merge(upgrade);
+
+ // set the SCH versions to the new stack so that the finalize action is
+ // happy - don't update DATANODE - we want to make the action complain
+ cluster.getServiceComponentHosts("HDFS", "NAMENODE").get(0).setVersion(targetVersion);
// verify the conditions for the test are met properly
- List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion("c1", HDP_22_STACK, targetRepo);
+ List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion("c1",
+ HDP_22_STACK, targetVersion);
assertTrue(hostVersions.size() > 0);
for (HostVersionEntity hostVersion : hostVersions) {
@@ -386,6 +421,14 @@ public class ComponentVersionCheckActionTest {
assertNotNull(report);
assertEquals(HostRoleStatus.FAILED.name(), report.getStatus());
assertEquals(-1, report.getExitCode());
+
+ // OK, now set the datanode so it completes
+ cluster.getServiceComponentHosts("HDFS", "DATANODE").get(0).setVersion(targetVersion);
+
+ report = action.execute(null);
+ assertNotNull(report);
+ assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
+ assertEquals(0, report.getExitCode());
}
@Test
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
index 0aea8b3..f306d69 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeActionTest.java
@@ -58,13 +58,13 @@ import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.orm.entities.RequestEntity;
import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.UpgradeEntity;
+import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
import org.apache.ambari.server.serveraction.ServerAction;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigFactory;
import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.RepositoryInfo;
import org.apache.ambari.server.state.RepositoryVersionState;
import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponent;
@@ -77,6 +77,7 @@ import org.apache.ambari.server.state.State;
import org.apache.ambari.server.state.stack.UpgradePack;
import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
import org.apache.ambari.server.utils.EventBusSynchronizer;
+import org.apache.hadoop.metrics2.sink.relocated.commons.lang.StringUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -102,8 +103,6 @@ public class UpgradeActionTest {
private static final StackId HDP_21_STACK = new StackId("HDP-2.1.1");
private static final StackId HDP_22_STACK = new StackId("HDP-2.2.0");
- private static final String HDP_211_CENTOS6_REPO_URL = "http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.1.1.0-118";
-
private RepositoryVersionEntity sourceRepositoryVersion;
private Injector m_injector;
@@ -172,10 +171,11 @@ public class UpgradeActionTest {
H2DatabaseCleaner.clearDatabase(m_injector.getProvider(EntityManager.class).get());
}
- private void makeDowngradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack, String targetRepo) throws Exception {
+ private void makeDowngradeCluster(RepositoryVersionEntity sourceRepoVersion,
+ RepositoryVersionEntity targetRepoVersion) throws Exception {
String hostName = "h1";
- clusters.addCluster(clusterName, sourceStack);
+ clusters.addCluster(clusterName, sourceRepoVersion.getStackId());
// add a host component
clusters.addHost(hostName);
@@ -187,24 +187,17 @@ public class UpgradeActionTest {
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- // Create the starting repo version
- m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
-
- // Start upgrading the newer repo
- m_helper.getOrCreateRepositoryVersion(targetStack, targetRepo);
-
HostVersionEntity entity = new HostVersionEntity();
entity.setHostEntity(hostDAO.findByName(hostName));
- entity.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo));
+ entity.setRepositoryVersion(targetRepoVersion);
entity.setState(RepositoryVersionState.INSTALLING);
hostVersionDAO.create(entity);
}
- private void makeTwoUpgradesWhereLastDidNotComplete(StackId sourceStack, String sourceRepo, StackId midStack, String midRepo, StackId targetStack, String targetRepo) throws Exception {
- String hostName = "h1";
-
- clusters.addCluster(clusterName, sourceStack);
+ private void createUpgradeCluster(
+ RepositoryVersionEntity sourceRepoVersion, String hostName) throws Exception {
+ clusters.addCluster(clusterName, sourceRepoVersion.getStackId());
Cluster c = clusters.getCluster(clusterName);
// add a host component
@@ -217,113 +210,33 @@ public class UpgradeActionTest {
hostAttributes.put("os_release_version", "6");
host.setHostAttributes(hostAttributes);
- // Create the starting repo version
- m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
-
- // Start upgrading the mid repo
- m_helper.getOrCreateRepositoryVersion(midStack, midRepo);
- c.setDesiredStackVersion(midStack);
-
- // Notice that we have not yet changed the cluster current stack to the mid stack to simulate
- // the user skipping this step.
-
- m_helper.getOrCreateRepositoryVersion(targetStack, targetRepo);
- c.setDesiredStackVersion(targetStack);
-
- // Create a host version for the starting repo in INSTALLED
- HostVersionEntity entitySource = new HostVersionEntity();
- entitySource.setHostEntity(hostDAO.findByName(hostName));
- entitySource.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(sourceStack, sourceRepo));
- entitySource.setState(RepositoryVersionState.INSTALL_FAILED);
- hostVersionDAO.create(entitySource);
-
- // Create a host version for the target repo in UPGRADED
- HostVersionEntity entityTarget = new HostVersionEntity();
- entityTarget.setHostEntity(hostDAO.findByName(hostName));
- entityTarget.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo));
- entityTarget.setState(RepositoryVersionState.INSTALLED);
- hostVersionDAO.create(entityTarget);
- }
-
- private RepositoryVersionEntity createUpgradeClusterAndSourceRepo(StackId sourceStack,
- String sourceRepo,
- String hostName) throws Exception {
-
- clusters.addCluster(clusterName, sourceStack);
-
- StackEntity stackEntitySource = stackDAO.find(sourceStack.getStackName(), sourceStack.getStackVersion());
- assertNotNull(stackEntitySource);
-
- Cluster c = clusters.getCluster(clusterName);
- c.setDesiredStackVersion(sourceStack);
-
- // add a host component
- clusters.addHost(hostName);
-
- Host host = clusters.getHost(hostName);
-
- Map<String, String> hostAttributes = new HashMap<>();
- hostAttributes.put("os_family", "redhat");
- hostAttributes.put("os_release_version", "6");
- host.setHostAttributes(hostAttributes);
-
// without this, HostEntity will not have a relation to ClusterEntity
clusters.mapHostToCluster(hostName, clusterName);
- // Create the starting repo version
- sourceRepositoryVersion = m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
- sourceRepositoryVersion.setOperatingSystems("[\n" +
- " {\n" +
- " \"repositories\":[\n" +
- " {\n" +
- " \"Repositories/base_url\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.2.0.0\",\n" +
- " \"Repositories/repo_name\":\"HDP\",\n" +
- " \"Repositories/repo_id\":\"HDP-2.2\"\n" +
- " }\n" +
- " ],\n" +
- " \"OperatingSystems/os_type\":\"redhat6\"\n" +
- " }\n" +
- "]");
- repoVersionDAO.merge(sourceRepositoryVersion);
-
- return sourceRepositoryVersion;
+ HostVersionEntity entity = new HostVersionEntity(hostDAO.findByName(hostName),
+ sourceRepoVersion, RepositoryVersionState.INSTALLED);
+
+ hostVersionDAO.create(entity);
}
- private RepositoryVersionEntity createUpgradeClusterTargetRepo(StackId targetStack, String targetRepo,
- String hostName) throws AmbariException {
+ private void createHostVersions(RepositoryVersionEntity targetRepoVersion,
+ String hostName) throws AmbariException {
Cluster c = clusters.getCluster(clusterName);
- StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion());
- assertNotNull(stackEntityTarget);
-
- // Create the new repo version
- String urlInfo = "[{'repositories':["
- + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'" + targetStack.getStackId() + "-1'}"
- + "], 'OperatingSystems/os_type':'redhat6'}]";
-
- repoVersionDAO.create(stackEntityTarget, targetRepo, String.valueOf(System.currentTimeMillis()), urlInfo);
-
- // Start upgrading the newer repo
- c.setCurrentStackVersion(targetStack);
// create a single host with the UPGRADED HostVersionEntity
HostDAO hostDAO = m_injector.getInstance(HostDAO.class);
- RepositoryVersionEntity repositoryVersionEntity = repoVersionDAO.findByStackAndVersion(
- targetStack, targetRepo);
-
HostVersionEntity entity = new HostVersionEntity(hostDAO.findByName(hostName),
- repositoryVersionEntity, RepositoryVersionState.INSTALLED);
+ targetRepoVersion, RepositoryVersionState.INSTALLED);
hostVersionDAO.create(entity);
// verify the UPGRADED host versions were created successfully
- List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion(clusterName,
- targetStack, targetRepo);
+ List<HostVersionEntity> hostVersions = hostVersionDAO.findHostVersionByClusterAndRepository(
+ c.getClusterId(), targetRepoVersion);
assertEquals(1, hostVersions.size());
assertEquals(RepositoryVersionState.INSTALLED, hostVersions.get(0).getState());
-
- return repositoryVersionEntity;
}
private void makeCrossStackUpgradeClusterAndSourceRepo(StackId sourceStack, String sourceRepo,
@@ -358,11 +271,6 @@ public class UpgradeActionTest {
StackEntity stackEntityTarget = stackDAO.find(targetStack.getStackName(), targetStack.getStackVersion());
assertNotNull(stackEntityTarget);
- // Create the new repo version
- String urlInfo = "[{'repositories':["
- + "{'Repositories/base_url':'http://foo1','Repositories/repo_name':'HDP','Repositories/repo_id':'" + targetRepo + "'}"
- + "], 'OperatingSystems/os_type':'redhat6'}]";
-
m_helper.getOrCreateRepositoryVersion(new StackId(stackEntityTarget), targetRepo);
// Start upgrading the newer repo
@@ -386,7 +294,6 @@ public class UpgradeActionTest {
StackId sourceStack = HDP_21_STACK;
StackId targetStack = HDP_22_STACK;
String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_2_0_1;
String hostName = "h1";
// Must be a NON_ROLLING upgrade that jumps stacks in order for it to apply config changes.
@@ -400,8 +307,6 @@ public class UpgradeActionTest {
Cluster cluster = clusters.getCluster(clusterName);
- createUpgrade(cluster, repositoryVersion2201);
-
// Install ZK and HDFS with some components
Service zk = installService(cluster, "ZOOKEEPER");
addServiceComponent(cluster, zk, "ZOOKEEPER_SERVER");
@@ -415,10 +320,10 @@ public class UpgradeActionTest {
createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", "h1");
createNewServiceComponentHost(cluster, "HDFS", "DATANODE", "h1");
- makeCrossStackUpgradeTargetRepo(targetStack, targetRepo, hostName);
+ makeCrossStackUpgradeTargetRepo(targetStack, repositoryVersion2201.getVersion(), hostName);
+ createUpgrade(cluster, repositoryVersion2201);
- RepositoryVersionEntity targetRve = repoVersionDAO.findByStackNameAndVersion("HDP", targetRepo);
- Assert.assertNotNull(targetRve);
+ Assert.assertNotNull(repositoryVersion2201);
// Create some configs
createConfigs(cluster);
@@ -459,12 +364,7 @@ public class UpgradeActionTest {
@Test
public void testFinalizeDowngrade() throws Exception {
- StackId sourceStack = HDP_21_STACK;
- StackId targetStack = HDP_21_STACK;
- String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_1_1_1;
-
- makeDowngradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
+ makeDowngradeCluster(repositoryVersion2110, repositoryVersion2111);
Cluster cluster = clusters.getCluster(clusterName);
@@ -486,74 +386,25 @@ public class UpgradeActionTest {
assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
for (HostVersionEntity entity : hostVersionDAO.findByClusterAndHost(clusterName, "h1")) {
- if (entity.getRepositoryVersion().getVersion().equals(sourceRepo)) {
+ if (StringUtils.equals(entity.getRepositoryVersion().getVersion(), repositoryVersion2110.getVersion())) {
assertEquals(RepositoryVersionState.CURRENT, entity.getState());
- } else if (entity.getRepositoryVersion().getVersion().equals(targetRepo)) {
+ } else if (StringUtils.equals(entity.getRepositoryVersion().getVersion(), repositoryVersion2111.getVersion())) {
assertEquals(RepositoryVersionState.INSTALLED, entity.getState());
}
}
}
- /**
- * Test a case in which a customer performs an upgrade from HDP 2.1 to 2.2 (e.g., 2.2.0.0), but skips the step to
- * finalize, which calls "Save DB State". Therefore, the cluster's current stack is still on HDP 2.1.
- * They can still modify the database manually to mark HDP 2.2 as CURRENT in the cluster_version and then begin
- * another upgrade to 2.2.0.2 and then downgrade.
- * In the downgrade, the original stack is still 2.1 but the stack for the version marked as CURRENT is 2.2; this
- * mismatch means that the downgrade should not delete configs and will report a warning.
- * @throws Exception
- */
- @Test
- public void testFinalizeDowngradeWhenDidNotFinalizePreviousUpgrade() throws Exception {
- StackId sourceStack = HDP_21_STACK;
- StackId midStack = HDP_22_STACK;
- StackId targetStack = HDP_22_STACK;
-
- String sourceRepo = HDP_2_1_1_0;
- String midRepo = HDP_2_2_0_1;
- String targetRepo = HDP_2_2_0_2;
-
- makeTwoUpgradesWhereLastDidNotComplete(sourceStack, sourceRepo, midStack, midRepo, targetStack, targetRepo);
-
- Cluster cluster = clusters.getCluster(clusterName);
-
- createUpgrade(cluster, repositoryVersion2202);
-
- Map<String, String> commandParams = new HashMap<>();
- ExecutionCommand executionCommand = new ExecutionCommand();
- executionCommand.setCommandParams(commandParams);
- executionCommand.setClusterName(clusterName);
-
- HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
- hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
-
- finalizeUpgradeAction.setExecutionCommand(executionCommand);
- finalizeUpgradeAction.setHostRoleCommand(hostRoleCommand);
-
- CommandReport report = finalizeUpgradeAction.execute(null);
- assertNotNull(report);
- assertEquals(HostRoleStatus.FAILED.name(), report.getStatus());
- assertTrue(report.getStdErr().contains(FinalizeUpgradeAction.PREVIOUS_UPGRADE_NOT_COMPLETED_MSG));
- }
-
@Test
public void testFinalizeUpgrade() throws Exception {
- StackId sourceStack = HDP_21_STACK;
- StackId targetStack = HDP_21_STACK;
- String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_1_1_1;
String hostName = "h1";
- createUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
- createUpgradeClusterTargetRepo(targetStack, targetRepo, hostName);
+ createUpgradeCluster(repositoryVersion2110, hostName);
+ createHostVersions(repositoryVersion2111, hostName);
Cluster cluster = clusters.getCluster(clusterName);
createUpgrade(cluster, repositoryVersion2111);
- RepositoryInfo repo = ambariMetaInfo.getRepository(sourceStack.getStackName(), sourceStack.getStackVersion(), "redhat6", sourceStack.getStackId());
- assertEquals(HDP_211_CENTOS6_REPO_URL, repo.getBaseUrl());
-
// Finalize the upgrade
Map<String, String> commandParams = new HashMap<>();
ExecutionCommand executionCommand = new ExecutionCommand();
@@ -579,14 +430,10 @@ public class UpgradeActionTest {
*/
@Test
public void testFinalizeWithHostsAlreadyCurrent() throws Exception {
- StackId sourceStack = HDP_21_STACK;
- StackId targetStack = HDP_21_STACK;
- String sourceRepo = HDP_2_1_1_0;
- String targetRepo = HDP_2_1_1_1;
String hostName = "h1";
- createUpgradeClusterAndSourceRepo(sourceStack, sourceRepo, hostName);
- createUpgradeClusterTargetRepo(targetStack, targetRepo, hostName);
+ createUpgradeCluster(repositoryVersion2110, hostName);
+ createHostVersions(repositoryVersion2111, hostName);
// move the old version from CURRENT to INSTALLED and the new version from
// UPGRADED to CURRENT - this will simulate what happens when a host is
@@ -607,10 +454,6 @@ public class UpgradeActionTest {
createUpgrade(cluster, repositoryVersion2111);
- RepositoryInfo repo = ambariMetaInfo.getRepository(sourceStack.getStackName(),
- sourceStack.getStackVersion(), "redhat6", sourceStack.getStackId());
- assertEquals(HDP_211_CENTOS6_REPO_URL, repo.getBaseUrl());
-
// Finalize the upgrade
Map<String, String> commandParams = new HashMap<>();
@@ -935,10 +778,23 @@ public class UpgradeActionTest {
upgradeEntity.setRepositoryVersion(repositoryVersion);
upgradeEntity.setUpgradeType(UpgradeType.NON_ROLLING);
- upgradeDAO.create(upgradeEntity);
+ Map<String, Service> services = cluster.getServices();
+ for (String serviceName : services.keySet()) {
+ Service service = services.get(serviceName);
+ Map<String, ServiceComponent> components = service.getServiceComponents();
+ for (String componentName : components.keySet()) {
+ UpgradeHistoryEntity history = new UpgradeHistoryEntity();
+ history.setUpgrade(upgradeEntity);
+ history.setServiceName(serviceName);
+ history.setComponentName(componentName);
+ history.setFromRepositoryVersion(service.getDesiredRepositoryVersion());
+ history.setTargetRepositoryVersion(repositoryVersion);
+ upgradeEntity.addHistory(history);
+ }
+ }
+ upgradeDAO.create(upgradeEntity);
cluster.setUpgradeEntity(upgradeEntity);
-
return upgradeEntity;
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
index f43dbd8..c6f3276 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigGroupTest.java
@@ -105,7 +105,7 @@ public class ConfigGroupTest {
configs.put(config.getType(), config);
hosts.put(host.getHostId(), host);
- ConfigGroup configGroup = configGroupFactory.createNew(cluster, "cg-test",
+ ConfigGroup configGroup = configGroupFactory.createNew(cluster, "HDFS", "cg-test",
"HDFS", "New HDFS configs for h1", configs, hosts);
cluster.addConfigGroup(configGroup);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
index dd0a840..e9e5399 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ConfigHelperTest.java
@@ -257,7 +257,7 @@ public class ConfigHelperTest {
configMap.put(config.getType(), config);
}
- ConfigGroup configGroup = configGroupFactory.createNew(cluster, name,
+ ConfigGroup configGroup = configGroupFactory.createNew(cluster, null, name,
tag, "", configMap, hostMap);
LOG.info("Config group created with tag " + tag);
configGroup.setTag(tag);
[3/3] ambari git commit: AMBARI-21078 - Merging Configurations On
Service/Patch Upgrades Should Create New Configurations Only For Included
Services (jonathanhurley)
Posted by jo...@apache.org.
AMBARI-21078 - Merging Configurations On Service/Patch Upgrades Should Create New Configurations Only For Included Services (jonathanhurley)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c4148d80
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c4148d80
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c4148d80
Branch: refs/heads/branch-feature-AMBARI-12556
Commit: c4148d805c4145d545712bbce6127e7518a7b7ce
Parents: a45f542
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri May 19 15:14:15 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue May 23 17:35:11 2017 -0400
----------------------------------------------------------------------
.../controller/AmbariManagementController.java | 2 +-
.../AmbariManagementControllerImpl.java | 4 +-
.../internal/ConfigGroupResourceProvider.java | 4 +-
.../internal/UpgradeResourceProvider.java | 287 +------------------
.../ambari/server/orm/dao/ServiceConfigDAO.java | 18 +-
.../apache/ambari/server/orm/dao/StackDAO.java | 14 +
.../orm/entities/ServiceConfigEntity.java | 24 +-
.../upgrades/ComponentVersionCheckAction.java | 5 +-
.../upgrades/FinalizeUpgradeAction.java | 18 +-
.../upgrades/UpdateDesiredStackAction.java | 7 +-
.../ambari/server/stack/MasterHostResolver.java | 16 +-
.../org/apache/ambari/server/state/Cluster.java | 16 +-
.../ambari/server/state/ConfigFactory.java | 1 +
.../ambari/server/state/ConfigHelper.java | 56 ++--
.../apache/ambari/server/state/ConfigImpl.java | 13 +-
.../ambari/server/state/UpgradeContext.java | 68 ++++-
.../ambari/server/state/UpgradeHelper.java | 286 ++++++++++++++++--
.../server/state/cluster/ClusterImpl.java | 108 ++++---
.../server/state/configgroup/ConfigGroup.java | 2 +-
.../state/configgroup/ConfigGroupFactory.java | 5 +-
.../state/configgroup/ConfigGroupImpl.java | 35 ++-
.../state/stack/upgrade/UpgradeScope.java | 9 -
.../RequiredConfigPropertiesValidator.java | 3 +-
.../server/upgrade/AbstractUpgradeCatalog.java | 3 +-
.../TestActionSchedulerThreading.java | 35 ++-
.../ambari/server/agent/AgentResourceTest.java | 2 +
.../AmbariManagementControllerTest.java | 16 +-
.../ConfigGroupResourceProviderTest.java | 5 +-
.../StackUpgradeConfigurationMergeTest.java | 12 +-
.../internal/UpgradeResourceProviderTest.java | 115 ++++++--
.../server/orm/dao/ServiceConfigDAOTest.java | 11 +-
.../ComponentVersionCheckActionTest.java | 107 ++++---
.../upgrades/UpgradeActionTest.java | 230 +++------------
.../ambari/server/state/ConfigGroupTest.java | 2 +-
.../ambari/server/state/ConfigHelperTest.java | 2 +-
.../ambari/server/state/UpgradeHelperTest.java | 107 +++----
.../server/state/cluster/ClusterTest.java | 142 ++++++---
.../svccomphost/ServiceComponentHostTest.java | 6 +-
.../upgrade/AbstractUpgradeCatalogTest.java | 6 +-
.../server/upgrade/UpgradeCatalog210Test.java | 4 +-
.../server/upgrade/UpgradeCatalog211Test.java | 2 +-
.../server/upgrade/UpgradeCatalog220Test.java | 4 +-
.../server/upgrade/UpgradeCatalog221Test.java | 4 +-
.../server/upgrade/UpgradeCatalog222Test.java | 4 +-
.../server/upgrade/UpgradeCatalog240Test.java | 36 +--
.../server/upgrade/UpgradeCatalog250Test.java | 34 +--
.../server/upgrade/UpgradeCatalog300Test.java | 6 +-
47 files changed, 1030 insertions(+), 866 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
index fe01a0d..807bded 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
@@ -115,7 +115,7 @@ public interface AmbariManagementController {
* TODO move this method to Cluster? doesn't seem to be on its place
* @return config created
*/
- Config createConfig(StackId stackId, Cluster cluster, String type, Map<String, String> properties,
+ Config createConfig(Cluster cluster, StackId stackId, String type, Map<String, String> properties,
String versionTag, Map<String, Map<String, String>> propertiesAttributes);
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index faa9c54..3a5a4e6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -930,7 +930,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
stackId = cluster.getDesiredStackVersion();
}
- Config config = createConfig(stackId, cluster, request.getType(), requestProperties,
+ Config config = createConfig(cluster, stackId, request.getType(), requestProperties,
request.getVersionTag(), propertiesAttributes);
LOG.info(MessageFormat.format("Creating configuration with tag ''{0}'' to cluster ''{1}'' for configuration type {2}",
@@ -942,7 +942,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
}
@Override
- public Config createConfig(StackId stackId, Cluster cluster, String type, Map<String, String> properties,
+ public Config createConfig(Cluster cluster, StackId stackId, String type, Map<String, String> properties,
String versionTag, Map<String, Map<String, String>> propertiesAttributes) {
Config config = configFactory.createNew(stackId, cluster, type, versionTag, properties,
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
index cf6b717..71f2be4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
@@ -579,13 +579,11 @@ public class ConfigGroupResourceProvider extends
verifyConfigs(request.getConfigs(), cluster.getClusterName());
- ConfigGroup configGroup = configGroupFactory.createNew(cluster,
+ ConfigGroup configGroup = configGroupFactory.createNew(cluster, serviceName,
request.getGroupName(),
request.getTag(), request.getDescription(),
request.getConfigs(), hosts);
- configGroup.setServiceName(serviceName);
-
cluster.addConfigGroup(configGroup);
if (serviceName != null) {
cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(),
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 115a043..de2386a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -27,7 +27,6 @@ import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
-import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -65,13 +64,11 @@ import org.apache.ambari.server.controller.spi.SystemException;
import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
import org.apache.ambari.server.orm.dao.HostRoleCommandStatusSummaryDTO;
-import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
import org.apache.ambari.server.orm.dao.RequestDAO;
import org.apache.ambari.server.orm.dao.UpgradeDAO;
import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
import org.apache.ambari.server.orm.entities.RequestEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.ambari.server.orm.entities.UpgradeEntity;
import org.apache.ambari.server.orm.entities.UpgradeGroupEntity;
import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
@@ -82,9 +79,7 @@ import org.apache.ambari.server.security.authorization.ResourceType;
import org.apache.ambari.server.security.authorization.RoleAuthorization;
import org.apache.ambari.server.state.Cluster;
import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.DesiredConfig;
import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceInfo;
@@ -98,7 +93,6 @@ import org.apache.ambari.server.state.stack.ConfigUpgradePack;
import org.apache.ambari.server.state.stack.UpgradePack;
import org.apache.ambari.server.state.stack.upgrade.ConfigureTask;
import org.apache.ambari.server.state.stack.upgrade.Direction;
-import org.apache.ambari.server.state.stack.upgrade.Grouping;
import org.apache.ambari.server.state.stack.upgrade.ManualTask;
import org.apache.ambari.server.state.stack.upgrade.ServerSideActionTask;
import org.apache.ambari.server.state.stack.upgrade.StageWrapper;
@@ -208,9 +202,6 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
private static Provider<AmbariMetaInfo> s_metaProvider = null;
@Inject
- private static RepositoryVersionDAO s_repoVersionDAO = null;
-
- @Inject
private static Provider<RequestFactory> s_requestFactory;
@Inject
@@ -275,9 +266,6 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
PROPERTY_IDS.add(REQUEST_STATUS_PROPERTY_ID);
PROPERTY_IDS.add(REQUEST_TYPE_ID);
- PROPERTY_IDS.add("Upgrade/from_version");
- PROPERTY_IDS.add("Upgrade/to_version");
-
// keys
KEY_PROPERTY_IDS.put(Resource.Type.Upgrade, UPGRADE_REQUEST_ID);
KEY_PROPERTY_IDS.put(Resource.Type.Cluster, UPGRADE_CLUSTER_NAME);
@@ -688,16 +676,11 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
at the appropriate moment during the orchestration.
**/
if (pack.getType() == UpgradeType.ROLLING) {
- // Desired configs must be set before creating stages because the config tag
- // names are read and set on the command for filling in later
- applyStackAndProcessConfigurations(upgradeContext);
-
- // move component desired version and upgrade state
- s_upgradeHelper.putComponentsToUpgradingState(upgradeContext);
+ s_upgradeHelper.updateDesiredRepositoriesAndConfigs(upgradeContext);
}
@Experimental(feature = ExperimentalFeature.PATCH_UPGRADES, comment = "This is wrong")
- StackId configurationPackSourceStackId = upgradeContext.getRepositoryVersion().getStackId();
+ StackId configurationPackSourceStackId = upgradeContext.getSourceVersions().values().iterator().next().getStackId();
// resolve or build a proper config upgrade pack - always start out with the config pack
// for the current stack and merge into that
@@ -801,272 +784,6 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
return upgradeEntity;
}
- /**
- * Handles the creation or resetting of configurations based on whether an
- * upgrade or downgrade is occurring. This method will not do anything when
- * the target stack version is the same as the cluster's current stack version
- * since, by definition, no new configurations are automatically created when
- * upgrading with the same stack (ie HDP 2.2.0.0 -> HDP 2.2.1.0).
- * <p/>
- * When upgrading or downgrade between stacks (HDP 2.2.0.0 -> HDP 2.3.0.0)
- * then this will perform the following:
- * <ul>
- * <li>Upgrade: Create new configurations that are a merge between the current
- * stack and the desired stack. If a value has changed between stacks, then
- * the target stack value should be taken unless the cluster's value differs
- * from the old stack. This can occur if a property has been customized after
- * installation.</li>
- * <li>Downgrade: Reset the latest configurations from the cluster's original
- * stack. The new configurations that were created on upgrade must be left
- * intact until all components have been reverted, otherwise heartbeats will
- * fail due to missing configurations.</li>
- * </ul>
- *
- * @param upgradeContext the upgrade context (not {@code null}).
- * @throws AmbariException
- */
- public void applyStackAndProcessConfigurations(UpgradeContext upgradeContext)
- throws AmbariException {
-
- Cluster cluster = upgradeContext.getCluster();
- Direction direction = upgradeContext.getDirection();
- UpgradePack upgradePack = upgradeContext.getUpgradePack();
- String stackName = upgradeContext.getRepositoryVersion().getStackId().getStackName();
- String version = upgradeContext.getRepositoryVersion().getStackId().getStackVersion();
- String userName = getManagementController().getAuthName();
-
- RepositoryVersionEntity targetRve = s_repoVersionDAO.findByStackNameAndVersion(stackName, version);
- if (null == targetRve) {
- LOG.info("Could not find version entity for {}; not setting new configs", version);
- return;
- }
-
- if (null == userName) {
- userName = getManagementController().getAuthName();
- }
-
- // if the current and target stacks are the same (ie HDP 2.2.0.0 -> 2.2.1.0)
- // then we should never do anything with configs on either upgrade or
- // downgrade; however if we are going across stacks, we have to do the stack
- // checks differently depending on whether this is an upgrade or downgrade
- StackEntity targetStack = targetRve.getStack();
- StackId currentStackId = cluster.getCurrentStackVersion();
- StackId desiredStackId = cluster.getDesiredStackVersion();
- StackId targetStackId = new StackId(targetStack);
- // Only change configs if moving to a different stack.
- switch (direction) {
- case UPGRADE:
- if (currentStackId.equals(targetStackId)) {
- return;
- }
- break;
- case DOWNGRADE:
- if (desiredStackId.equals(targetStackId)) {
- return;
- }
- break;
- }
-
- Map<String, Map<String, String>> newConfigurationsByType = null;
- ConfigHelper configHelper = getManagementController().getConfigHelper();
-
- if (direction == Direction.UPGRADE) {
- // populate a map of default configurations for the old stack (this is
- // used when determining if a property has been customized and should be
- // overriden with the new stack value)
- Map<String, Map<String, String>> oldStackDefaultConfigurationsByType = configHelper.getDefaultProperties(
- currentStackId, cluster, true);
-
- // populate a map with default configurations from the new stack
- newConfigurationsByType = configHelper.getDefaultProperties(targetStackId, cluster, true);
-
- // We want to skip updating config-types of services that are not in the upgrade pack.
- // Care should be taken as some config-types could be in services that are in and out
- // of the upgrade pack. We should never ignore config-types of services in upgrade pack.
- Set<String> skipConfigTypes = new HashSet<>();
- Set<String> upgradePackServices = new HashSet<>();
- Set<String> upgradePackConfigTypes = new HashSet<>();
- AmbariMetaInfo ambariMetaInfo = s_metaProvider.get();
-
- // ensure that we get the service info from the target stack
- // (since it could include new configuration types for a service)
- Map<String, ServiceInfo> stackServicesMap = ambariMetaInfo.getServices(
- targetStack.getStackName(), targetStack.getStackVersion());
-
- for (Grouping group : upgradePack.getGroups(direction)) {
- for (UpgradePack.OrderService service : group.services) {
- if (service.serviceName == null || upgradePackServices.contains(service.serviceName)) {
- // No need to re-process service that has already been looked at
- continue;
- }
-
- upgradePackServices.add(service.serviceName);
- ServiceInfo serviceInfo = stackServicesMap.get(service.serviceName);
- if (serviceInfo == null) {
- continue;
- }
-
- // add every configuration type for all services defined in the
- // upgrade pack
- Set<String> serviceConfigTypes = serviceInfo.getConfigTypeAttributes().keySet();
- for (String serviceConfigType : serviceConfigTypes) {
- if (!upgradePackConfigTypes.contains(serviceConfigType)) {
- upgradePackConfigTypes.add(serviceConfigType);
- }
- }
- }
- }
-
- // build a set of configurations that should not be merged since their
- // services are not installed
- Set<String> servicesNotInUpgradePack = new HashSet<>(stackServicesMap.keySet());
- servicesNotInUpgradePack.removeAll(upgradePackServices);
- for (String serviceNotInUpgradePack : servicesNotInUpgradePack) {
- ServiceInfo serviceInfo = stackServicesMap.get(serviceNotInUpgradePack);
- Set<String> configTypesOfServiceNotInUpgradePack = serviceInfo.getConfigTypeAttributes().keySet();
- for (String configType : configTypesOfServiceNotInUpgradePack) {
- if (!upgradePackConfigTypes.contains(configType) && !skipConfigTypes.contains(configType)) {
- skipConfigTypes.add(configType);
- }
- }
- }
-
- // remove any configurations from the target stack that are not used
- // because the services are not installed
- Iterator<String> iterator = newConfigurationsByType.keySet().iterator();
- while (iterator.hasNext()) {
- String configType = iterator.next();
- if (skipConfigTypes.contains(configType)) {
- LOG.info("Stack Upgrade: Removing configs for config-type {}", configType);
- iterator.remove();
- }
- }
-
- // now that the map has been populated with the default configurations
- // from the stack/service, overlay the existing configurations on top
- Map<String, DesiredConfig> existingDesiredConfigurationsByType = cluster.getDesiredConfigs();
- for (Map.Entry<String, DesiredConfig> existingEntry : existingDesiredConfigurationsByType.entrySet()) {
- String configurationType = existingEntry.getKey();
- if(skipConfigTypes.contains(configurationType)) {
- LOG.info("Stack Upgrade: Skipping config-type {} as upgrade-pack contains no updates to its service", configurationType);
- continue;
- }
-
- // NPE sanity, although shouldn't even happen since we are iterating
- // over the desired configs to start with
- Config currentClusterConfig = cluster.getDesiredConfigByType(configurationType);
- if (null == currentClusterConfig) {
- continue;
- }
-
- // get current stack default configurations on install
- Map<String, String> configurationTypeDefaultConfigurations = oldStackDefaultConfigurationsByType.get(
- configurationType);
-
- // NPE sanity for current stack defaults
- if (null == configurationTypeDefaultConfigurations) {
- configurationTypeDefaultConfigurations = Collections.emptyMap();
- }
-
- // get the existing configurations
- Map<String, String> existingConfigurations = currentClusterConfig.getProperties();
-
- // if the new stack configurations don't have the type, then simply add
- // all of the existing in
- Map<String, String> newDefaultConfigurations = newConfigurationsByType.get(
- configurationType);
-
- if (null == newDefaultConfigurations) {
- newConfigurationsByType.put(configurationType, existingConfigurations);
- continue;
- } else {
- // TODO, should we remove existing configs whose value is NULL even though they don't have a value in the new stack?
-
- // Remove any configs in the new stack whose value is NULL, unless they currently exist and the value is not NULL.
- Iterator<Map.Entry<String, String>> iter = newDefaultConfigurations.entrySet().iterator();
- while (iter.hasNext()) {
- Map.Entry<String, String> entry = iter.next();
- if (entry.getValue() == null) {
- iter.remove();
- }
- }
- }
-
- // for every existing configuration, see if an entry exists; if it does
- // not exist, then put it in the map, otherwise we'll have to compare
- // the existing value to the original stack value to see if its been
- // customized
- for (Map.Entry<String, String> existingConfigurationEntry : existingConfigurations.entrySet()) {
- String existingConfigurationKey = existingConfigurationEntry.getKey();
- String existingConfigurationValue = existingConfigurationEntry.getValue();
-
- // if there is already an entry, we now have to try to determine if
- // the value was customized after stack installation
- if (newDefaultConfigurations.containsKey(existingConfigurationKey)) {
- String newDefaultConfigurationValue = newDefaultConfigurations.get(
- existingConfigurationKey);
-
- if (!StringUtils.equals(existingConfigurationValue, newDefaultConfigurationValue)) {
- // the new default is different from the existing cluster value;
- // only override the default value if the existing value differs
- // from the original stack
- String oldDefaultValue = configurationTypeDefaultConfigurations.get(
- existingConfigurationKey);
-
- if (!StringUtils.equals(existingConfigurationValue, oldDefaultValue)) {
- // at this point, we've determined that there is a difference
- // between default values between stacks, but the value was
- // also customized, so keep the customized value
- newDefaultConfigurations.put(existingConfigurationKey, existingConfigurationValue);
- }
- }
- } else {
- // there is no entry in the map, so add the existing key/value pair
- newDefaultConfigurations.put(existingConfigurationKey, existingConfigurationValue);
- }
- }
-
- /*
- for every new configuration which does not exist in the existing
- configurations, see if it was present in the current stack
-
- stack 2.x has foo-site/property (on-ambari-upgrade is false)
- stack 2.y has foo-site/property
- the current cluster (on 2.x) does not have it
-
- In this case, we should NOT add it back as clearly stack advisor has removed it
- */
- Iterator<Map.Entry<String, String>> newDefaultConfigurationsIterator = newDefaultConfigurations.entrySet().iterator();
- while( newDefaultConfigurationsIterator.hasNext() ){
- Map.Entry<String, String> newConfigurationEntry = newDefaultConfigurationsIterator.next();
- String newConfigurationPropertyName = newConfigurationEntry.getKey();
- if (configurationTypeDefaultConfigurations.containsKey(newConfigurationPropertyName)
- && !existingConfigurations.containsKey(newConfigurationPropertyName)) {
- LOG.info(
- "The property {}/{} exists in both {} and {} but is not part of the current set of configurations and will therefore not be included in the configuration merge",
- configurationType, newConfigurationPropertyName, currentStackId, targetStackId);
-
- // remove the property so it doesn't get merged in
- newDefaultConfigurationsIterator.remove();
- }
- }
- }
- } else {
- // downgrade
- cluster.applyLatestConfigurations(cluster.getCurrentStackVersion());
- }
-
- // !!! update the stack
- cluster.setDesiredStackVersion(
- new StackId(targetStack.getStackName(), targetStack.getStackVersion()));
-
- // !!! configs must be created after setting the stack version
- if (null != newConfigurationsByType) {
- configHelper.createConfigTypes(cluster, getManagementController(), newConfigurationsByType,
- userName, "Configuration created for Upgrade");
- }
- }
-
private RequestStageContainer createRequest(UpgradeContext upgradeContext) {
ActionManager actionManager = getManagementController().getActionManager();
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigDAO.java
index 49ad682..72666e5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigDAO.java
@@ -65,7 +65,7 @@ public class ServiceConfigDAO {
"WHERE scv.serviceName=?1 AND scv.version=?2", ServiceConfigEntity.class);
return daoUtils.selectOne(query, serviceName, version);
}
-
+
@RequiresSession
public List<ServiceConfigEntity> findByService(Long clusterId, String serviceName) {
TypedQuery<ServiceConfigEntity> query = entityManagerProvider.get().
@@ -145,29 +145,31 @@ public class ServiceConfigDAO {
}
/**
- * Get all service configurations for the specified cluster and stack. This
- * will return different versions of the same configuration (HDFS v1 and v2)
- * if they exist.
+ * Get service configurations for the specified cluster and stack. This will
+ * return different versions of the same configuration (HDFS v1 and v2) if
+ * they exist.
*
* @param clusterId
* the cluster (not {@code null}).
* @param stackId
* the stack (not {@code null}).
+ * @param service
* @return all service configurations for the cluster and stack.
*/
@RequiresSession
- public List<ServiceConfigEntity> getAllServiceConfigsForClusterAndStack(Long clusterId,
- StackId stackId) {
+ public List<ServiceConfigEntity> getServiceConfigsForServiceAndStack(Long clusterId,
+ StackId stackId, String serviceName) {
StackEntity stackEntity = stackDAO.find(stackId.getStackName(),
stackId.getStackVersion());
TypedQuery<ServiceConfigEntity> query = entityManagerProvider.get().createNamedQuery(
- "ServiceConfigEntity.findAllServiceConfigsByStack",
+ "ServiceConfigEntity.findServiceConfigsByStack",
ServiceConfigEntity.class);
query.setParameter("clusterId", clusterId);
query.setParameter("stack", stackEntity);
+ query.setParameter("serviceName", serviceName);
return daoUtils.selectList(query);
}
@@ -266,7 +268,7 @@ public class ServiceConfigDAO {
@Transactional
public void removeHostFromServiceConfigs(final Long hostId) {
- List<ServiceConfigEntity> allServiceConfigs = this.findAll();
+ List<ServiceConfigEntity> allServiceConfigs = findAll();
for (ServiceConfigEntity serviceConfigEntity : allServiceConfigs) {
List<Long> hostIds = serviceConfigEntity.getHostIds();
if (hostIds != null && hostIds.contains(hostId)) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StackDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StackDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StackDAO.java
index 1385990..c0c7792 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StackDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/StackDAO.java
@@ -25,6 +25,7 @@ import javax.persistence.TypedQuery;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.orm.RequiresSession;
import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.state.StackId;
import com.google.inject.Inject;
import com.google.inject.Provider;
@@ -94,6 +95,19 @@ public class StackDAO {
}
/**
+ * Gets the stack that matches the specified stack ID by name and version.
+ *
+ * @param stackId
+ * the stack ID to find (not {@code null}).
+ * @return the stack matching the specified name and version or {@code null}
+ * if none.
+ */
+ @RequiresSession
+ public StackEntity find(StackId stackId) {
+ return find(stackId.getStackName(), stackId.getStackVersion());
+ }
+
+ /**
* Persists a new stack instance.
*
* @param stack
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java
index a7ee0f6..b1409ed 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ServiceConfigEntity.java
@@ -46,12 +46,24 @@ import javax.persistence.TableGenerator;
, initialValue = 1
)
@NamedQueries({
- @NamedQuery(name = "ServiceConfigEntity.findAll", query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId=:clusterId ORDER BY serviceConfig.version DESC"),
- @NamedQuery(name = "ServiceConfigEntity.findNextServiceConfigVersion", query = "SELECT COALESCE(MAX(serviceConfig.version), 0) + 1 AS nextVersion FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.serviceName=:serviceName AND serviceConfig.clusterId=:clusterId"),
- @NamedQuery(name = "ServiceConfigEntity.findAllServiceConfigsByStack", query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId=:clusterId AND serviceConfig.stack=:stack"),
- @NamedQuery(name = "ServiceConfigEntity.findLatestServiceConfigsByStack", query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId = :clusterId AND (serviceConfig.groupId = null OR serviceConfig.groupId IN (SELECT cg.groupId from ConfigGroupEntity cg)) AND serviceConfig.version = (SELECT MAX(serviceConfig2.version) FROM ServiceConfigEntity serviceConfig2 WHERE serviceConfig2.clusterId= :clusterId AND serviceConfig2.stack = :stack AND serviceConfig2.serviceName = serviceConfig.serviceName)"),
- @NamedQuery(name = "ServiceConfigEntity.findLatestServiceConfigsByService", query = "SELECT scv FROM ServiceConfigEntity scv WHERE scv.clusterId = :clusterId AND scv.serviceName = :serviceName AND (scv.groupId = null OR scv.groupId IN (SELECT cg.groupId from ConfigGroupEntity cg)) AND scv.version = (SELECT MAX(scv2.version) FROM ServiceConfigEntity scv2 WHERE (scv2.serviceName = :serviceName AND scv2.clusterId = :clusterId) AND (scv2.groupId = scv.groupId OR (scv2.groupId IS NULL AND scv.groupId IS NULL)))"),
- @NamedQuery(name = "ServiceConfigEntity.findLatestServiceConfigsByCluster", query = "SELECT scv FROM ServiceConfigEntity scv WHERE scv.clusterId = :clusterId AND scv.serviceConfigId IN (SELECT MAX(scv1.serviceConfigId) FROM ServiceConfigEntity scv1 WHERE (scv1.clusterId = :clusterId) AND (scv1.groupId IS NULL) GROUP BY scv1.serviceName)")})
+ @NamedQuery(
+ name = "ServiceConfigEntity.findAll",
+ query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId=:clusterId ORDER BY serviceConfig.version DESC"),
+ @NamedQuery(
+ name = "ServiceConfigEntity.findNextServiceConfigVersion",
+ query = "SELECT COALESCE(MAX(serviceConfig.version), 0) + 1 AS nextVersion FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.serviceName=:serviceName AND serviceConfig.clusterId=:clusterId"),
+ @NamedQuery(
+ name = "ServiceConfigEntity.findServiceConfigsByStack",
+ query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId=:clusterId AND serviceConfig.stack=:stack AND serviceConfig.serviceName=:serviceName"),
+ @NamedQuery(
+ name = "ServiceConfigEntity.findLatestServiceConfigsByStack",
+ query = "SELECT serviceConfig FROM ServiceConfigEntity serviceConfig WHERE serviceConfig.clusterId = :clusterId AND (serviceConfig.groupId = null OR serviceConfig.groupId IN (SELECT cg.groupId from ConfigGroupEntity cg)) AND serviceConfig.version = (SELECT MAX(serviceConfig2.version) FROM ServiceConfigEntity serviceConfig2 WHERE serviceConfig2.clusterId= :clusterId AND serviceConfig2.stack = :stack AND serviceConfig2.serviceName = serviceConfig.serviceName)"),
+ @NamedQuery(
+ name = "ServiceConfigEntity.findLatestServiceConfigsByService",
+ query = "SELECT scv FROM ServiceConfigEntity scv WHERE scv.clusterId = :clusterId AND scv.serviceName = :serviceName AND (scv.groupId = null OR scv.groupId IN (SELECT cg.groupId from ConfigGroupEntity cg)) AND scv.version = (SELECT MAX(scv2.version) FROM ServiceConfigEntity scv2 WHERE (scv2.serviceName = :serviceName AND scv2.clusterId = :clusterId) AND (scv2.groupId = scv.groupId OR (scv2.groupId IS NULL AND scv.groupId IS NULL)))"),
+ @NamedQuery(
+ name = "ServiceConfigEntity.findLatestServiceConfigsByCluster",
+ query = "SELECT scv FROM ServiceConfigEntity scv WHERE scv.clusterId = :clusterId AND scv.serviceConfigId IN (SELECT MAX(scv1.serviceConfigId) FROM ServiceConfigEntity scv1 WHERE (scv1.clusterId = :clusterId) AND (scv1.groupId IS NULL) GROUP BY scv1.serviceName)") })
public class ServiceConfigEntity {
@Id
@Column(name = "service_config_id")
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
index dc7bc10..1d0cc76 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
@@ -46,8 +46,6 @@ public class ComponentVersionCheckAction extends FinalizeUpgradeAction {
public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
throws AmbariException, InterruptedException {
- Map<String, String> commandParams = getExecutionCommand().getCommandParams();
-
String clusterName = getExecutionCommand().getClusterName();
Cluster cluster = m_clusters.getCluster(clusterName);
@@ -59,8 +57,7 @@ public class ComponentVersionCheckAction extends FinalizeUpgradeAction {
StringBuilder errSB = new StringBuilder();
if (errors.isEmpty()) {
- outSB.append("No version mismatches found for components");
- errSB.append("No errors found for components");
+ outSB.append("All service components are reporting the correct version.");
return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", outSB.toString(), errSB.toString());
} else {
String structuredOut = getErrors(outSB, errSB, errors);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
index 55ec84b..6e79e84 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
@@ -226,6 +226,7 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
Cluster cluster = upgradeContext.getCluster();
RepositoryVersionEntity downgradeFromRepositoryVersion = upgradeContext.getRepositoryVersion();
String downgradeFromVersion = downgradeFromRepositoryVersion.getVersion();
+ Set<String> servicesInUpgrade = upgradeContext.getSupportedServices();
String message;
@@ -234,7 +235,6 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
"Finalizing the downgrade from {0} for all cluster services.",
downgradeFromVersion);
} else {
- Set<String> servicesInUpgrade = upgradeContext.getSupportedServices();
message = MessageFormat.format(
"Finalizing the downgrade from {0} for the following services: {1}",
downgradeFromVersion, StringUtils.join(servicesInUpgrade, ','));
@@ -291,6 +291,22 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
}
}
+ // remove any configurations for services which crossed a stack boundary
+ for( String serviceName : servicesInUpgrade ){
+ RepositoryVersionEntity sourceRepositoryVersion = upgradeContext.getSourceRepositoryVersion(serviceName);
+ RepositoryVersionEntity targetRepositoryVersion = upgradeContext.getTargetRepositoryVersion(serviceName);
+ StackId sourceStackId = sourceRepositoryVersion.getStackId();
+ StackId targetStackId = targetRepositoryVersion.getStackId();
+ // only work with configurations when crossing stacks
+ if (!sourceStackId.equals(targetStackId)) {
+ outSB.append(
+ String.format("Removing %s configurations for %s", sourceStackId,
+ serviceName)).append(System.lineSeparator());
+
+ cluster.removeConfigurations(sourceStackId, serviceName);
+ }
+ }
+
// ensure that when downgrading, we set the desired back to the
// original value
versionEventPublisher.publish(new StackUpgradeFinishEvent(cluster));
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
index 2eec581..84ca326 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredStackAction.java
@@ -29,8 +29,6 @@ import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.agent.CommandReport;
import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariServer;
-import org.apache.ambari.server.controller.internal.UpgradeResourceProvider;
import org.apache.ambari.server.orm.dao.HostVersionDAO;
import org.apache.ambari.server.orm.entities.HostVersionEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
@@ -159,9 +157,8 @@ public class UpdateDesiredStackAction extends AbstractUpgradeServerAction {
}
}
- UpgradeResourceProvider upgradeResourceProvider = new UpgradeResourceProvider(AmbariServer.getController());
- upgradeResourceProvider.applyStackAndProcessConfigurations(upgradeContext);
- m_upgradeHelper.putComponentsToUpgradingState(upgradeContext);
+ // move repositories to the right version and create/revert configs
+ m_upgradeHelper.updateDesiredRepositoriesAndConfigs(upgradeContext);
// a downgrade must force host versions back to INSTALLED for the
// repository which failed to be upgraded.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
index 3f1d859..466b695 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
@@ -38,6 +38,7 @@ import org.apache.ambari.server.state.MaintenanceState;
import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.UpgradeContext;
+import org.apache.ambari.server.state.UpgradeState;
import org.apache.ambari.server.state.stack.upgrade.Direction;
import org.apache.ambari.server.utils.HTTPUtils;
import org.apache.ambari.server.utils.HostAndPort;
@@ -80,10 +81,10 @@ public class MasterHostResolver {
* @param upgradeContext
* the upgrade context
*/
- public MasterHostResolver(ConfigHelper configHelper, UpgradeContext upgradeContext) {
+ public MasterHostResolver(Cluster cluster, ConfigHelper configHelper, UpgradeContext upgradeContext) {
m_configHelper = configHelper;
m_upgradeContext = upgradeContext;
- m_cluster = upgradeContext.getCluster();
+ m_cluster = cluster;
}
/**
@@ -209,11 +210,20 @@ public class MasterHostResolver {
continue;
}
- if(m_upgradeContext.getDirection() == Direction.UPGRADE){
+ if (sch.getUpgradeState() == UpgradeState.FAILED) {
upgradeHosts.add(hostName);
continue;
}
+ if(m_upgradeContext.getDirection() == Direction.UPGRADE){
+ RepositoryVersionEntity targetRepositoryVersion = m_upgradeContext.getRepositoryVersion();
+ if (!StringUtils.equals(targetRepositoryVersion.getVersion(), sch.getVersion())) {
+ upgradeHosts.add(hostName);
+ }
+
+ continue;
+ }
+
// it's a downgrade ...
RepositoryVersionEntity downgradeToRepositoryVersion = m_upgradeContext.getTargetRepositoryVersion(service);
String downgradeToVersion = downgradeToRepositoryVersion.getVersion();
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index 4d943f4..f72ab4f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -544,9 +544,8 @@ public interface Cluster {
Map<String, Object> getSessionAttributes();
/**
- * Makes the most recent configurations in the specified stack the current set
- * of configurations. This method will first ensure that the cluster's current
- * stack matches that of the configuration stack specified.
+ * Makes the most recent configurations for the specified stack current. This
+ * will only modify configurations for the given service.
* <p/>
* When completed, all other configurations for any other stack will remain,
* but will not be marked as selected.
@@ -554,18 +553,21 @@ public interface Cluster {
* @param stackId
* the stack to use when finding the latest configurations (not
* {@code null}).
+ * @param serviceName
+ * the service to modify configurations for (not {@code null}).
*/
- void applyLatestConfigurations(StackId stackId);
+ void applyLatestConfigurations(StackId stackId, String serviceName);
/**
- * Removes all cluster configurations and service configurations that belong
- * to the specified stack.
+ * Removes all configurations for the specified service and stack.
*
* @param stackId
* the stack to use when finding the configurations to remove (not
* {@code null}).
+ * @param serviceName
+ * the service to rmeove configurations for (not {@code null}).
*/
- void removeConfigurations(StackId stackId);
+ void removeConfigurations(StackId stackId, String serviceName);
/**
* Returns whether this cluster was provisioned by a Blueprint or not.
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
index 78f10cd..475c274 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigFactory.java
@@ -34,6 +34,7 @@ public interface ConfigFactory {
* Creates a new {@link Config} object using provided values.
*
* @param cluster
+ * @param stackId
* @param type
* @param tag
* @param map
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
index 96c2dd0..66c9e21 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java
@@ -1029,7 +1029,8 @@ public class ConfigHelper {
String serviceVersionNote) throws AmbariException {
// create the configuration history entry
- Config baseConfig = createConfig(cluster, controller, configType, FIRST_VERSION_TAG, properties,
+ Config baseConfig = createConfig(cluster, controller, cluster.getDesiredStackVersion(),
+ configType, FIRST_VERSION_TAG, properties,
propertyAttributes);
if (baseConfig != null) {
@@ -1070,13 +1071,14 @@ public class ConfigHelper {
* Create configurations and assign them for services.
* @param cluster the cluster
* @param controller the controller
+ * @param stackId the stack to create the new properties for
* @param batchProperties the type->config map batch of properties
* @param authenticatedUserName the user that initiated the change
* @param serviceVersionNote the service version note
* @throws AmbariException
*/
public void createConfigTypes(Cluster cluster,
- AmbariManagementController controller,
+ AmbariManagementController controller, StackId stackId,
Map<String, Map<String, String>> batchProperties, String authenticatedUserName,
String serviceVersionNote) throws AmbariException {
@@ -1086,8 +1088,8 @@ public class ConfigHelper {
String type = entry.getKey();
Map<String, String> properties = entry.getValue();
- Config baseConfig = createConfig(cluster, controller, type, FIRST_VERSION_TAG, properties,
- Collections.<String, Map<String,String>>emptyMap());
+ Config baseConfig = createConfig(cluster, controller, stackId, type, FIRST_VERSION_TAG,
+ properties, Collections.<String, Map<String, String>> emptyMap());
if (null != baseConfig) {
try {
@@ -1122,6 +1124,8 @@ public class ConfigHelper {
* @param controller
* the controller which actually creates the configuration (not
* {@code null}).
+ * @param stackId
+ * the stack to create the new properties for
* @param type
* the new configuration type (not {@code null}).
* @param tag
@@ -1134,8 +1138,8 @@ public class ConfigHelper {
* @return
* @throws AmbariException
*/
- Config createConfig(Cluster cluster, AmbariManagementController controller, String type,
- String tag, Map<String, String> properties,
+ Config createConfig(Cluster cluster, AmbariManagementController controller, StackId stackId,
+ String type, String tag, Map<String, String> properties,
Map<String, Map<String, String>> propertyAttributes) throws AmbariException {
// if the configuration is not new, then create a timestamp tag
@@ -1158,24 +1162,22 @@ public class ConfigHelper {
}
}
- return controller.createConfig(cluster.getDesiredStackVersion(), cluster, type, properties, tag, propertyAttributes);
+ return controller.createConfig(cluster, stackId, type, properties, tag, propertyAttributes);
}
/**
- * Gets the default properties from the specified stack and services when a
- * cluster is first installed.
+ * Gets the default properties for the specified service. These properties
+ * represent those which would be used when a service is first installed.
*
* @param stack
* the stack to pull stack-values from (not {@code null})
- * @param cluster
- * the cluster to use when determining which services default
- * configurations to include (not {@code null}).
- * @param onStackUpgradeFilter if true skip {@code <on-stack-upgrade merge="false"/>} properties
+ * @param serviceName
+ * the service name {@code null}).
* @return a mapping of configuration type to map of key/value pairs for the
* default configurations.
* @throws AmbariException
*/
- public Map<String, Map<String, String>> getDefaultProperties(StackId stack, Cluster cluster, boolean onStackUpgradeFilter)
+ public Map<String, Map<String, String>> getDefaultProperties(StackId stack, String serviceName)
throws AmbariException {
Map<String, Map<String, String>> defaultPropertiesByType = new HashMap<>();
@@ -1189,28 +1191,26 @@ public class ConfigHelper {
if (!defaultPropertiesByType.containsKey(type)) {
defaultPropertiesByType.put(type, new HashMap<String, String>());
}
- if (!onStackUpgradeFilter || stackDefaultProperty.getPropertyStackUpgradeBehavior().isMerge()) {
+ if (stackDefaultProperty.getPropertyStackUpgradeBehavior().isMerge()) {
defaultPropertiesByType.get(type).put(stackDefaultProperty.getName(),
stackDefaultProperty.getValue());
}
}
// for every installed service, populate the default service properties
- for (String serviceName : cluster.getServices().keySet()) {
- Set<org.apache.ambari.server.state.PropertyInfo> serviceConfigurationProperties = ambariMetaInfo.getServiceProperties(
- stack.getStackName(), stack.getStackVersion(), serviceName);
+ Set<org.apache.ambari.server.state.PropertyInfo> serviceConfigurationProperties = ambariMetaInfo.getServiceProperties(
+ stack.getStackName(), stack.getStackVersion(), serviceName);
- // !!! use new stack as the basis
- for (PropertyInfo serviceDefaultProperty : serviceConfigurationProperties) {
- String type = ConfigHelper.fileNameToConfigType(serviceDefaultProperty.getFilename());
+ // !!! use new stack as the basis
+ for (PropertyInfo serviceDefaultProperty : serviceConfigurationProperties) {
+ String type = ConfigHelper.fileNameToConfigType(serviceDefaultProperty.getFilename());
- if (!defaultPropertiesByType.containsKey(type)) {
- defaultPropertiesByType.put(type, new HashMap<String, String>());
- }
- if (!onStackUpgradeFilter || serviceDefaultProperty.getPropertyStackUpgradeBehavior().isMerge()) {
- defaultPropertiesByType.get(type).put(serviceDefaultProperty.getName(),
- serviceDefaultProperty.getValue());
- }
+ if (!defaultPropertiesByType.containsKey(type)) {
+ defaultPropertiesByType.put(type, new HashMap<String, String>());
+ }
+ if (serviceDefaultProperty.getPropertyStackUpgradeBehavior().isMerge()) {
+ defaultPropertiesByType.get(type).put(serviceDefaultProperty.getName(),
+ serviceDefaultProperty.getValue());
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index 0adf1bd..2ee1b26 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -32,8 +32,10 @@ import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
import org.apache.ambari.server.logging.LockFactory;
import org.apache.ambari.server.orm.dao.ClusterDAO;
import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -99,10 +101,11 @@ public class ConfigImpl implements Config {
ConfigImpl(@Assisted Cluster cluster, @Assisted("type") String type,
@Assisted("tag") @Nullable String tag,
@Assisted Map<String, String> properties,
- @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+ @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes,
+ ClusterDAO clusterDAO, StackDAO stackDAO,
Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
this(cluster.getDesiredStackVersion(), cluster, type, tag, properties, propertiesAttributes,
- clusterDAO, gson, eventPublisher, lockFactory);
+ clusterDAO, stackDAO, gson, eventPublisher, lockFactory);
}
@@ -110,7 +113,8 @@ public class ConfigImpl implements Config {
ConfigImpl(@Assisted @Nullable StackId stackId, @Assisted Cluster cluster, @Assisted("type") String type,
@Assisted("tag") @Nullable String tag,
@Assisted Map<String, String> properties,
- @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes, ClusterDAO clusterDAO,
+ @Assisted @Nullable Map<String, Map<String, String>> propertiesAttributes,
+ ClusterDAO clusterDAO, StackDAO stackDAO,
Gson gson, AmbariEventPublisher eventPublisher, LockFactory lockFactory) {
propertyLock = lockFactory.newReadWriteLock(PROPERTY_LOCK_LABEL);
@@ -133,6 +137,7 @@ public class ConfigImpl implements Config {
this.tag = tag;
ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());
+ StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
ClusterConfigEntity entity = new ClusterConfigEntity();
entity.setClusterEntity(clusterEntity);
@@ -141,7 +146,7 @@ public class ConfigImpl implements Config {
entity.setVersion(version);
entity.setTag(this.tag);
entity.setTimestamp(System.currentTimeMillis());
- entity.setStack(clusterEntity.getDesiredStack());
+ entity.setStack(stackEntity);
entity.setData(gson.toJson(properties));
if (null != propertiesAttributes) {
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 5c29fb5..f07bd37 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -186,6 +186,14 @@ public class UpgradeContext {
private final Map<String, RepositoryVersionEntity> m_targetRepositoryMap = new HashMap<>();
/**
+ * A mapping of service to source (from) repository. On an upgrade, this will
+ * be the current desired repository of every service. When downgrading, this
+ * will be the same for all components and will represent the value returned
+ * from {@link #getRepositoryVersion()}.
+ */
+ private final Map<String, RepositoryVersionEntity> m_sourceRepositoryMap = new HashMap<>();
+
+ /**
* Used by some {@link Grouping}s to generate commands. It is exposed here
* mainly for injection purposes since the XML is not created by Guice.
*/
@@ -303,8 +311,10 @@ public class UpgradeContext {
}
// populate the target repository map for all services in the upgrade
- for (String service : m_services) {
- m_targetRepositoryMap.put(service, m_repositoryVersion);
+ for (String serviceName : m_services) {
+ Service service = cluster.getService(serviceName);
+ m_sourceRepositoryMap.put(serviceName, service.getDesiredRepositoryVersion());
+ m_targetRepositoryMap.put(serviceName, m_repositoryVersion);
}
break;
@@ -315,9 +325,10 @@ public class UpgradeContext {
m_repositoryVersion = upgrade.getRepositoryVersion();
- // populate the target repository map for all services in the upgrade
+ // populate the repository maps for all services in the upgrade
for (UpgradeHistoryEntity history : upgrade.getHistory()) {
m_services.add(history.getServiceName());
+ m_sourceRepositoryMap.put(history.getServiceName(), m_repositoryVersion);
m_targetRepositoryMap.put(history.getServiceName(), history.getFromReposistoryVersion());
}
@@ -376,7 +387,7 @@ public class UpgradeContext {
m_autoSkipServiceCheckFailures = skipServiceCheckFailures;
m_autoSkipManualVerification = skipManualVerification;
- m_resolver = new MasterHostResolver(configHelper, this);
+ m_resolver = new MasterHostResolver(m_cluster, configHelper, this);
}
/**
@@ -405,7 +416,9 @@ public class UpgradeContext {
List<UpgradeHistoryEntity> allHistory = upgradeEntity.getHistory();
for (UpgradeHistoryEntity history : allHistory) {
String serviceName = history.getServiceName();
+ RepositoryVersionEntity sourceRepositoryVersion = history.getFromReposistoryVersion();
RepositoryVersionEntity targetRepositoryVersion = history.getTargetRepositoryVersion();
+ m_sourceRepositoryMap.put(serviceName, sourceRepositoryVersion);
m_targetRepositoryMap.put(serviceName, targetRepositoryVersion);
m_services.add(serviceName);
}
@@ -416,7 +429,7 @@ public class UpgradeContext {
Map<String, UpgradePack> packs = m_metaInfo.getUpgradePacks(stackId.getStackName(), stackId.getStackVersion());
m_upgradePack = packs.get(upgradePackage);
- m_resolver = new MasterHostResolver(configHelper, this);
+ m_resolver = new MasterHostResolver(m_cluster, configHelper, this);
}
/**
@@ -448,6 +461,50 @@ public class UpgradeContext {
}
/**
+ * Gets the version that components are being considered to be "coming from".
+ * <p/>
+ * With a {@link Direction#UPGRADE}, this value represent the services'
+ * desired repository. However, {@link Direction#DOWNGRADE} will use the same
+ * value for all services which is the version that the downgrade is coming
+ * from.
+ *
+ * @return the source version for the upgrade
+ */
+ public Map<String, RepositoryVersionEntity> getSourceVersions() {
+ return new HashMap<>(m_sourceRepositoryMap);
+ }
+
+ /**
+ * Gets the version that service is being considered to be "coming from".
+ * <p/>
+ * With a {@link Direction#UPGRADE}, this value represent the services'
+ * desired repository. However, {@link Direction#DOWNGRADE} will use the same
+ * value for all services which is the version that the downgrade is coming
+ * from.
+ *
+ * @return the source repository for the upgrade
+ */
+ public RepositoryVersionEntity getSourceRepositoryVersion(String serviceName) {
+ return m_sourceRepositoryMap.get(serviceName);
+ }
+
+ /**
+ * Gets the version that service is being considered to be "coming from".
+ * <p/>
+ * With a {@link Direction#UPGRADE}, this value represent the services'
+ * desired repository. However, {@link Direction#DOWNGRADE} will use the same
+ * value for all services which is the version that the downgrade is coming
+ * from.
+ *
+ * @return the source repository for the upgrade
+ * @see #getSourceRepositoryVersion(String)
+ */
+ public String getSourceVersion(String serviceName) {
+ RepositoryVersionEntity serviceSourceVersion = m_sourceRepositoryMap.get(serviceName);
+ return serviceSourceVersion.getVersion();
+ }
+
+ /**
* Gets the version being upgraded to or downgraded to for all services
* participating. This is the version that the service will be on if the
* upgrade or downgrade succeeds.
@@ -487,6 +544,7 @@ public class UpgradeContext {
* the original repository that the service was on.
*
* @return the target version for the upgrade
+ * @see #getTargetRepositoryVersion(String)
*/
public String getTargetVersion(String serviceName) {
RepositoryVersionEntity serviceTargetVersion = m_targetRepositoryMap.get(serviceName);
http://git-wip-us.apache.org/repos/asf/ambari/blob/c4148d80/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index 0f39e60..b228988 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
@@ -33,6 +34,7 @@ import org.apache.ambari.annotations.Experimental;
import org.apache.ambari.annotations.ExperimentalFeature;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.controller.internal.TaskResourceProvider;
import org.apache.ambari.server.controller.predicate.AndPredicate;
import org.apache.ambari.server.controller.spi.ClusterController;
@@ -49,7 +51,10 @@ import org.apache.ambari.server.controller.utilities.PredicateBuilder;
import org.apache.ambari.server.controller.utilities.PropertyHelper;
import org.apache.ambari.server.events.listeners.upgrade.StackVersionListener;
import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.dao.ServiceConfigDAO;
+import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
import org.apache.ambari.server.stack.HostsType;
import org.apache.ambari.server.stack.MasterHostResolver;
import org.apache.ambari.server.state.stack.UpgradePack;
@@ -177,25 +182,46 @@ public class UpgradeHelper {
* {@link StageWrapperBuilder} has finished building out all of the stages.
*/
@Inject
- private Provider<ConfigHelper> m_configHelper;
+ private Provider<ConfigHelper> m_configHelperProvider;
@Inject
- private Provider<AmbariMetaInfo> m_ambariMetaInfo;
+ private Provider<AmbariMetaInfo> m_ambariMetaInfoProvider;
@Inject
- private Provider<Clusters> clusters;
+ private Provider<Clusters> m_clusters;
@Inject
- private Provider<RepositoryVersionDAO> s_repoVersionDAO;
+ private Provider<RepositoryVersionDAO> m_repoVersionProvider;
/**
- * Get right Upgrade Pack, depends on stack, direction and upgrade type information
- * @param clusterName The name of the cluster
- * @param upgradeFromVersion Current stack version
- * @param upgradeToVersion Target stack version
- * @param direction {@code Direction} of the upgrade
- * @param upgradeType The {@code UpgradeType}
- * @param preferredUpgradePackName For unit test, need to prefer an upgrade pack since multiple matches can be found.
+ * Used to update the configuration properties.
+ */
+ @Inject
+ private Provider<AmbariManagementController> m_controllerProvider;
+
+ /**
+ * Used to get configurations by service name.
+ */
+ @Inject
+ private ServiceConfigDAO m_serviceConfigDAO;
+
+ /**
+ * Get right Upgrade Pack, depends on stack, direction and upgrade type
+ * information
+ *
+ * @param clusterName
+ * The name of the cluster
+ * @param upgradeFromVersion
+ * Current stack version
+ * @param upgradeToVersion
+ * Target stack version
+ * @param direction
+ * {@code Direction} of the upgrade
+ * @param upgradeType
+ * The {@code UpgradeType}
+ * @param preferredUpgradePackName
+ * For unit test, need to prefer an upgrade pack since multiple
+ * matches can be found.
* @return {@code UpgradeType} object
* @throws AmbariException
*/
@@ -203,7 +229,7 @@ public class UpgradeHelper {
Direction direction, UpgradeType upgradeType, String preferredUpgradePackName) throws AmbariException {
// Find upgrade packs based on current stack. This is where to upgrade from
- Cluster cluster = clusters.get().getCluster(clusterName);
+ Cluster cluster = m_clusters.get().getCluster(clusterName);
StackId stack = cluster.getCurrentStackVersion();
String repoVersion = upgradeToVersion;
@@ -213,13 +239,14 @@ public class UpgradeHelper {
repoVersion = upgradeFromVersion;
}
- RepositoryVersionEntity versionEntity = s_repoVersionDAO.get().findByStackNameAndVersion(stack.getStackName(), repoVersion);
+ RepositoryVersionEntity versionEntity = m_repoVersionProvider.get().findByStackNameAndVersion(
+ stack.getStackName(), repoVersion);
if (versionEntity == null) {
throw new AmbariException(String.format("Repository version %s was not found", repoVersion));
}
- Map<String, UpgradePack> packs = m_ambariMetaInfo.get().getUpgradePacks(stack.getStackName(), stack.getStackVersion());
+ Map<String, UpgradePack> packs = m_ambariMetaInfoProvider.get().getUpgradePacks(stack.getStackName(), stack.getStackVersion());
UpgradePack pack = null;
if (StringUtils.isNotEmpty(preferredUpgradePackName) && packs.containsKey(preferredUpgradePackName)) {
@@ -595,7 +622,7 @@ public class UpgradeHelper {
value = ctx.getDirection().getText(p == Placeholder.DIRECTION_TEXT_PROPER);
break;
default:
- value = m_configHelper.get().getPlaceholderValueFromDesiredConfigurations(
+ value = m_configHelperProvider.get().getPlaceholderValueFromDesiredConfigurations(
cluster, token);
break;
}
@@ -701,7 +728,7 @@ public class UpgradeHelper {
private void setDisplayNames(UpgradeContext context, String service, String component) {
StackId stackId = context.getCluster().getDesiredStackVersion();
try {
- ServiceInfo serviceInfo = m_ambariMetaInfo.get().getService(stackId.getStackName(),
+ ServiceInfo serviceInfo = m_ambariMetaInfoProvider.get().getService(stackId.getStackName(),
stackId.getStackVersion(), service);
context.setServiceDisplay(service, serviceInfo.getDisplayName());
@@ -714,6 +741,32 @@ public class UpgradeHelper {
}
/**
+ * Updates the various repositories and configurations for services
+ * participating in the upgrade or downgrade. The following actions are
+ * performed in order:
+ * <ul>
+ * <li>The desired repository for every service and component is changed<
+ * <li>The {@link UpgradeState} of every component host is moved to either
+ * {@link UpgradeState#IN_PROGRESS} or {@link UpgradeState#NONE}.
+ * <li>In the case of an upgrade, new configurations and service
+ * configurations are created if necessary. In the case of a downgrade, any
+ * configurations created by the upgrade are reverted.
+ * </ul>
+ *
+ * @param upgradeContext
+ * the upgrade context holding all relevent upgrade information (not
+ * {@code null}).
+ * @throws AmbariException
+ */
+ @Transactional
+ @Experimental(feature = ExperimentalFeature.PATCH_UPGRADES)
+ public void updateDesiredRepositoriesAndConfigs(UpgradeContext upgradeContext)
+ throws AmbariException {
+ setDesiredRepositories(upgradeContext);
+ processConfigurationsIfRequired(upgradeContext);
+ }
+
+ /**
* Transitions all affected components to {@link UpgradeState#IN_PROGRESS}.
* Transition is performed only for components that advertise their version.
* Additionally sets the service component desired version to the specified
@@ -726,10 +779,8 @@ public class UpgradeHelper {
* @param upgradeContext
* the upgrade context (not {@code null}).
*/
- @Transactional
@Experimental(feature = ExperimentalFeature.PATCH_UPGRADES)
- public void putComponentsToUpgradingState(UpgradeContext upgradeContext) throws AmbariException {
-
+ private void setDesiredRepositories(UpgradeContext upgradeContext) throws AmbariException {
Cluster cluster = upgradeContext.getCluster();
Set<String> services = upgradeContext.getSupportedServices();
@@ -743,7 +794,7 @@ public class UpgradeHelper {
for (ServiceComponent serviceComponent : components) {
boolean versionAdvertised = false;
try {
- ComponentInfo ci = m_ambariMetaInfo.get().getComponent(targetStack.getStackName(),
+ ComponentInfo ci = m_ambariMetaInfoProvider.get().getComponent(targetStack.getStackName(),
targetStack.getStackVersion(), serviceComponent.getServiceName(),
serviceComponent.getName());
@@ -777,4 +828,199 @@ public class UpgradeHelper {
}
}
}
+
+ /**
+ * Handles the creation or resetting of configurations based on whether an
+ * upgrade or downgrade is occurring. This method will not do anything when
+ * the service is not crossing major stack versions, since, by definition, no
+ * new configurations are automatically created when upgrading with the same
+ * stack (ie HDP 2.2.0.0 -> HDP 2.2.1.0).
+ * <p/>
+ * When upgrading or downgrade between stacks (HDP 2.2.0.0 -> HDP 2.3.0.0)
+ * then this will perform the following:
+ * <ul>
+ * <li>Upgrade: Create new configurations that are a merge between the source
+ * stack and the target stack. If a value has changed between stacks, then the
+ * target stack value should be taken unless the cluster's value differs from
+ * the old stack. This can occur if a property has been customized after
+ * installation.</li>
+ * <li>Downgrade: Reset the latest configurations from the service's original
+ * stack. The new configurations that were created on upgrade must be left
+ * intact until all components have been reverted, otherwise heartbeats will
+ * fail due to missing configurations.</li>
+ * </ul>
+ *
+ * @param upgradeContext
+ * the upgrade context (not {@code null}).
+ * @throws AmbariException
+ */
+ private void processConfigurationsIfRequired(UpgradeContext upgradeContext)
+ throws AmbariException {
+
+ AmbariManagementController controller = m_controllerProvider.get();
+
+ Cluster cluster = upgradeContext.getCluster();
+ Direction direction = upgradeContext.getDirection();
+ String userName = controller.getAuthName();
+ Set<String> servicesInUpgrade = upgradeContext.getSupportedServices();
+
+ // merge or revert configurations for any service that needs it
+ for( String serviceName : servicesInUpgrade ){
+ RepositoryVersionEntity sourceRepositoryVersion = upgradeContext.getSourceRepositoryVersion(serviceName);
+ RepositoryVersionEntity targetRepositoryVersion = upgradeContext.getTargetRepositoryVersion(serviceName);
+ StackId sourceStackId = sourceRepositoryVersion.getStackId();
+ StackId targetStackId = targetRepositoryVersion.getStackId();
+
+ // only work with configurations when crossing stacks
+ if (sourceStackId.equals(targetStackId)) {
+ RepositoryVersionEntity associatedRepositoryVersion = upgradeContext.getRepositoryVersion();
+ LOG.info(
+ "The {} {} {} will not change stack configurations for {} since the source and target are both {}",
+ direction.getText(false), direction.getPreposition(),
+ associatedRepositoryVersion.getVersion(), serviceName, targetStackId);
+
+ continue;
+ }
+
+ ConfigHelper configHelper = m_configHelperProvider.get();
+
+ // downgrade is easy - just remove the new and make the old current
+ if (direction == Direction.DOWNGRADE) {
+ cluster.applyLatestConfigurations(targetStackId, serviceName);
+ return;
+ }
+
+ // upgrade is a bit harder - we have to merge new stack configurations in
+
+ // populate a map of default configurations for the service on the old
+ // stack (this is used when determining if a property has been
+ // customized and should be overriden with the new stack value)
+ Map<String, Map<String, String>> oldServiceDefaultConfigsByType = configHelper.getDefaultProperties(
+ sourceStackId, serviceName);
+
+ // populate a map with default configurations from the new stack
+ Map<String, Map<String, String>> newServiceDefaultConfigsByType = configHelper.getDefaultProperties(
+ targetStackId, serviceName);
+
+ // find the current, existing configurations for the service
+ List<Config> existingServiceConfigs = new ArrayList<>();
+ List<ServiceConfigEntity> latestServiceConfigs = m_serviceConfigDAO.getLastServiceConfigsForService(
+ cluster.getClusterId(), serviceName);
+
+ for (ServiceConfigEntity serviceConfig : latestServiceConfigs) {
+ List<ClusterConfigEntity> existingConfigurations = serviceConfig.getClusterConfigEntities();
+ for (ClusterConfigEntity currentServiceConfig : existingConfigurations) {
+ String configurationType = currentServiceConfig.getType();
+ Config currentClusterConfigForService = cluster.getDesiredConfigByType(configurationType);
+ existingServiceConfigs.add(currentClusterConfigForService);
+ }
+ }
+
+ // now that we have found, old, new, and existing confgs, overlay the
+ // existing on top of the new
+ for (Config existingServiceConfig : existingServiceConfigs) {
+ String configurationType = existingServiceConfig.getType();
+
+ // get current stack default configurations on install
+ Map<String, String> oldServiceDefaultConfigs = oldServiceDefaultConfigsByType.get(
+ configurationType);
+
+ // NPE sanity for current stack defaults
+ if (null == oldServiceDefaultConfigs) {
+ oldServiceDefaultConfigs = Collections.emptyMap();
+ }
+
+ // get the existing configurations
+ Map<String, String> existingConfigurations = existingServiceConfig.getProperties();
+
+ // get the new configurations
+ Map<String, String> newDefaultConfigurations = newServiceDefaultConfigsByType.get(
+ configurationType);
+
+ // if the new stack configurations don't have the type, then simply add
+ // all of the existing in
+ if (null == newDefaultConfigurations) {
+ newServiceDefaultConfigsByType.put(configurationType, existingConfigurations);
+ continue;
+ } else {
+ // Remove any configs in the new stack whose value is NULL, unless
+ // they currently exist and the value is not NULL.
+ Iterator<Map.Entry<String, String>> iter = newDefaultConfigurations.entrySet().iterator();
+ while (iter.hasNext()) {
+ Map.Entry<String, String> entry = iter.next();
+ if (entry.getValue() == null) {
+ iter.remove();
+ }
+ }
+ }
+
+ // process every existing configuration property for this configuration
+ // type
+ for (Map.Entry<String, String> existingConfigurationEntry : existingConfigurations.entrySet()) {
+ String existingConfigurationKey = existingConfigurationEntry.getKey();
+ String existingConfigurationValue = existingConfigurationEntry.getValue();
+
+ // if there is already an entry, we now have to try to determine if
+ // the value was customized after stack installation
+ if (newDefaultConfigurations.containsKey(existingConfigurationKey)) {
+ String newDefaultConfigurationValue = newDefaultConfigurations.get(
+ existingConfigurationKey);
+
+ if (!StringUtils.equals(existingConfigurationValue, newDefaultConfigurationValue)) {
+ // the new default is different from the existing cluster value;
+ // only override the default value if the existing value differs
+ // from the original stack
+ String oldDefaultValue = oldServiceDefaultConfigs.get(existingConfigurationKey);
+
+ if (!StringUtils.equals(existingConfigurationValue, oldDefaultValue)) {
+ // at this point, we've determined that there is a
+ // difference
+ // between default values between stacks, but the value was
+ // also customized, so keep the customized value
+ newDefaultConfigurations.put(existingConfigurationKey, existingConfigurationValue);
+ }
+ }
+ } else {
+ // there is no entry in the map, so add the existing key/value
+ // pair
+ newDefaultConfigurations.put(existingConfigurationKey, existingConfigurationValue);
+ }
+ }
+
+ /*
+ for every new configuration which does not exist in the existing
+ configurations, see if it was present in the current stack
+
+ stack 2.x has foo-site/property (on-ambari-upgrade is false)
+ stack 2.y has foo-site/property
+ the current cluster (on 2.x) does not have it
+
+ In this case, we should NOT add it back as clearly stack advisor has removed it
+ */
+ Iterator<Map.Entry<String, String>> newDefaultConfigurationsIterator = newDefaultConfigurations.entrySet().iterator();
+ while (newDefaultConfigurationsIterator.hasNext()) {
+ Map.Entry<String, String> newConfigurationEntry = newDefaultConfigurationsIterator.next();
+ String newConfigurationPropertyName = newConfigurationEntry.getKey();
+ if (oldServiceDefaultConfigs.containsKey(newConfigurationPropertyName)
+ && !existingConfigurations.containsKey(newConfigurationPropertyName)) {
+ LOG.info(
+ "The property {}/{} exists in both {} and {} but is not part of the current set of configurations and will therefore not be included in the configuration merge",
+ configurationType, newConfigurationPropertyName, sourceStackId, targetStackId);
+
+ // remove the property so it doesn't get merged in
+ newDefaultConfigurationsIterator.remove();
+ }
+ }
+ }
+
+ if (null != newServiceDefaultConfigsByType) {
+ Set<String> configTypes = newServiceDefaultConfigsByType.keySet();
+ LOG.info("The upgrade will create the following configurations for stack {}: {}",
+ targetStackId, StringUtils.join(configTypes, ','));
+
+ configHelper.createConfigTypes(cluster, controller, targetStackId,
+ newServiceDefaultConfigsByType, userName, "Configuration created for Upgrade");
+ }
+ }
+ }
}